diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f9c69fbf5d6..8775e1464d0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -117,7 +117,7 @@ For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to Please follow these formatting guidelines: * Java indent is 4 spaces -* Line width is 100 characters +* Line width is 140 characters * The rest is left to Java coding standards * Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do. * Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them. diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index dd9d1781ccd..b3c2f4faef8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -311,16 +311,9 @@ class BuildPlugin implements Plugin { /** * Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms. * - * + * The current fixup is to set compile time deps back to compile from runtime (known issue with maven-publish plugin). */ private static Closure fixupDependencies(Project project) { - // TODO: revisit this when upgrading to Gradle 2.14+, see Javadoc comment above return { XmlProvider xml -> // first find if we have dependencies at all, and grab the node NodeList depsNodes = xml.asNode().get('dependencies') diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml index 85b55a71cf8..891a85d50a9 100644 --- a/buildSrc/src/main/resources/checkstyle.xml +++ b/buildSrc/src/main/resources/checkstyle.xml @@ -22,7 +22,7 @@ suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is unfair. --> - + diff --git a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs index e30b8df6cc4..48c93f444ba 100644 --- a/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs +++ b/buildSrc/src/main/resources/eclipse.settings/org.eclipse.jdt.core.prefs @@ -16,6 +16,6 @@ eclipse.preferences.version=1 # org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning -org.eclipse.jdt.core.formatter.lineSplit=100 +org.eclipse.jdt.core.formatter.lineSplit=140 org.eclipse.jdt.core.formatter.tabulation.char=space org.eclipse.jdt.core.formatter.tabulation.size=4 diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index f2308fd93fe..7ec5888674d 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -35,48 +35,6 @@ public class Version implements Comparable { * values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99 * indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id */ - public static final int V_2_0_0_ID = 2000099; - public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); - public static final int V_2_0_1_ID = 2000199; - public static final Version V_2_0_1 = new Version(V_2_0_1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); - public static final int V_2_0_2_ID = 2000299; - public static final Version V_2_0_2 = new Version(V_2_0_2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1); - public static final int V_2_1_0_ID = 2010099; - public static final Version V_2_1_0 = new Version(V_2_1_0_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); - public static final int V_2_1_1_ID = 2010199; - public static final Version V_2_1_1 = new Version(V_2_1_1_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); - public static final int V_2_1_2_ID = 2010299; - public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1); - public static final int V_2_2_0_ID = 2020099; - public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); - public static final int V_2_2_1_ID = 2020199; - public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); - public static final int V_2_2_2_ID = 2020299; - public static final Version V_2_2_2 = new Version(V_2_2_2_ID, org.apache.lucene.util.Version.LUCENE_5_4_1); - public static final int V_2_3_0_ID = 2030099; - public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_1_ID = 2030199; - public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_2_ID = 2030299; - public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_3_ID = 2030399; - public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_4_ID = 2030499; - public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_3_5_ID = 2030599; - public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0); - public static final int V_2_4_0_ID = 2040099; - public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_1_ID = 2040199; - public static final Version V_2_4_1 = new Version(V_2_4_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_2_ID = 2040299; - public static final Version V_2_4_2 = new Version(V_2_4_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_3_ID = 2040399; - public static final Version V_2_4_3 = new Version(V_2_4_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_4_ID = 2040499; - public static final Version V_2_4_4 = new Version(V_2_4_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); - public static final int V_2_4_5_ID = 2040599; - public static final Version V_2_4_5 = new Version(V_2_4_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_2); public static final int V_5_0_0_alpha1_ID = 5000001; public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0); public static final int V_5_0_0_alpha2_ID = 5000002; @@ -182,48 +140,6 @@ public class Version implements Comparable { return V_5_0_0_alpha2; case V_5_0_0_alpha1_ID: return V_5_0_0_alpha1; - case V_2_4_5_ID: - return V_2_4_5; - case V_2_4_4_ID: - return V_2_4_4; - case V_2_4_3_ID: - return V_2_4_3; - case V_2_4_2_ID: - return V_2_4_2; - case V_2_4_1_ID: - return V_2_4_1; - case V_2_4_0_ID: - return V_2_4_0; - case V_2_3_5_ID: - return V_2_3_5; - case V_2_3_4_ID: - return V_2_3_4; - case V_2_3_3_ID: - return V_2_3_3; - case V_2_3_2_ID: - return V_2_3_2; - case V_2_3_1_ID: - return V_2_3_1; - case V_2_3_0_ID: - return V_2_3_0; - case V_2_2_2_ID: - return V_2_2_2; - case V_2_2_1_ID: - return V_2_2_1; - case V_2_2_0_ID: - return V_2_2_0; - case V_2_1_2_ID: - return V_2_1_2; - case V_2_1_1_ID: - return V_2_1_1; - case V_2_1_0_ID: - return V_2_1_0; - case V_2_0_2_ID: - return V_2_0_2; - case V_2_0_1_ID: - return V_2_0_1; - case V_2_0_0_ID: - return V_2_0_0; default: return new Version(id, org.apache.lucene.util.Version.LATEST); } diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index c1d0541d4ce..d52175c9eb4 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; +import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction; @@ -235,6 +237,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestNodesStatsAction; import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction; import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction; +import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction; import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction; import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction; import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction; @@ -400,6 +403,7 @@ public class ActionModule extends AbstractModule { actions.register(MainAction.INSTANCE, TransportMainAction.class); actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); + actions.register(RemoteInfoAction.INSTANCE, TransportRemoteInfoAction.class); actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class); @@ -509,6 +513,7 @@ public class ActionModule extends AbstractModule { }; registerHandler.accept(new RestMainAction(settings, restController)); registerHandler.accept(new RestNodesInfoAction(settings, restController, settingsFilter)); + registerHandler.accept(new RestRemoteClusterInfoAction(settings, restController)); registerHandler.accept(new RestNodesStatsAction(settings, restController)); registerHandler.accept(new RestNodesHotThreadsAction(settings, restController)); registerHandler.accept(new RestClusterAllocationExplainAction(settings, restController)); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java new file mode 100644 index 00000000000..aa546c7dffd --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoAction.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public final class RemoteInfoAction extends Action { + + public static final String NAME = "cluster:monitor/remote/info"; + public static final RemoteInfoAction INSTANCE = new RemoteInfoAction(); + + public RemoteInfoAction() { + super(NAME); + } + + @Override + public RemoteInfoRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new RemoteInfoRequestBuilder(client, INSTANCE); + } + + @Override + public RemoteInfoResponse newResponse() { + return new RemoteInfoResponse(); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java new file mode 100644 index 00000000000..6e41f145b65 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequest.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +public final class RemoteInfoRequest extends ActionRequest { + + @Override + public ActionRequestValidationException validate() { + return null; + } + +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java new file mode 100644 index 00000000000..f46f5ecd2d3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoRequestBuilder.java @@ -0,0 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.client.ElasticsearchClient; + +public final class RemoteInfoRequestBuilder extends ActionRequestBuilder { + + public RemoteInfoRequestBuilder(ElasticsearchClient client, RemoteInfoAction action) { + super(client, action, new RemoteInfoRequest()); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java new file mode 100644 index 00000000000..6d79e230922 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.search.RemoteConnectionInfo; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public final class RemoteInfoResponse extends ActionResponse implements ToXContentObject { + + private List infos; + + RemoteInfoResponse() { + } + + RemoteInfoResponse(Collection infos) { + this.infos = Collections.unmodifiableList(new ArrayList<>(infos)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(infos); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + infos = in.readList(RemoteConnectionInfo::new); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + for (RemoteConnectionInfo info : infos) { + info.toXContent(builder, params); + } + builder.endObject(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java new file mode 100644 index 00000000000..cdb79a82583 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/remote/TransportRemoteInfoAction.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.remote; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.RemoteClusterService; +import org.elasticsearch.action.search.SearchTransportService; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; + +public final class TransportRemoteInfoAction extends HandledTransportAction { + + private final RemoteClusterService remoteClusterService; + + @Inject + public TransportRemoteInfoAction(Settings settings, ThreadPool threadPool, TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + SearchTransportService searchTransportService) { + super(settings, RemoteInfoAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, + RemoteInfoRequest::new); + this.remoteClusterService = searchTransportService.getRemoteClusterService(); + } + + @Override + protected void doExecute(RemoteInfoRequest remoteInfoRequest, ActionListener listener) { + remoteClusterService.getRemoteConnectionInfos(ActionListener.wrap(remoteConnectionInfos + -> listener.onResponse(new RemoteInfoResponse(remoteConnectionInfos)), listener::onFailure)); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java index e4c6b34d9c7..b92839638d8 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java @@ -126,9 +126,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable) in.readGenericValue(); - } + attributes = (Map) in.readGenericValue(); } @Override @@ -141,9 +139,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable 1 ? positionLength : null); } out.writeOptionalString(type); - if (out.getVersion().onOrAfter(Version.V_2_2_0)) { - out.writeGenericValue(attributes); - } + out.writeGenericValue(attributes); } } @@ -200,9 +196,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable { onScrollResponse(lastBatchStartTime, lastBatchSize, response); }); diff --git a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java index 1b458caa3d5..ef8043b9c5f 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/byscroll/WorkingBulkByScrollTask.java @@ -178,14 +178,14 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success AbstractRunnable prepareBulkRequestRunnable) { // Synchronize so we are less likely to schedule the same request twice. synchronized (delayedPrepareBulkRequestReference) { - TimeValue delay = throttleWaitTime(lastBatchStartTime, lastBatchSize); + TimeValue delay = throttleWaitTime(lastBatchStartTime, timeValueNanos(System.nanoTime()), lastBatchSize); delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(), delay, new RunOnce(prepareBulkRequestRunnable))); } } - TimeValue throttleWaitTime(TimeValue lastBatchStartTime, int lastBatchSize) { - long earliestNextBatchStartTime = lastBatchStartTime.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize); + TimeValue throttleWaitTime(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) { + long earliestNextBatchStartTime = now.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize); return timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime())); } diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java index 7dfcdcfa108..6453e4dff35 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldstats; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; @@ -200,9 +199,7 @@ public class FieldStatsRequest extends BroadcastRequest { out.writeByte(indexConstraint.getProperty().getId()); out.writeByte(indexConstraint.getComparison().getId()); out.writeString(indexConstraint.getValue()); - if (out.getVersion().onOrAfter(Version.V_2_0_1)) { - out.writeOptionalString(indexConstraint.getOptionalFormat()); - } + out.writeOptionalString(indexConstraint.getOptionalFormat()); } out.writeString(level); out.writeBoolean(useCache); diff --git a/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java b/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java index 62eaf207e31..fe39ba6e377 100644 --- a/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java +++ b/core/src/main/java/org/elasticsearch/action/fieldstats/IndexConstraint.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.fieldstats; -import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import java.io.IOException; @@ -39,11 +38,7 @@ public class IndexConstraint { this.property = Property.read(input.readByte()); this.comparison = Comparison.read(input.readByte()); this.value = input.readString(); - if (input.getVersion().onOrAfter(Version.V_2_0_1)) { - this.optionalFormat = input.readOptionalString(); - } else { - this.optionalFormat = null; - } + this.optionalFormat = input.readOptionalString(); } public IndexConstraint(String field, Property property, Comparison comparison, String value) { diff --git a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index ee835fa06be..884af4a3af9 100644 --- a/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/core/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -68,13 +68,6 @@ public class TransportGetAction extends TransportSingleShardAction listener) { + final Optional anyNode = connectedNodes.stream().findAny(); + if (anyNode.isPresent() == false) { + // not connected we return immediately + RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias, + Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0, + RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings)); + listener.onResponse(remoteConnectionStats); + } else { + NodesInfoRequest request = new NodesInfoRequest(); + request.clear(); + request.http(true); + + transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler() { + @Override + public NodesInfoResponse newInstance() { + return new NodesInfoResponse(); + } + + @Override + public void handleResponse(NodesInfoResponse response) { + Collection httpAddresses = new HashSet<>(); + for (NodeInfo info : response.getNodes()) { + if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) { + httpAddresses.add(info.getHttp().getAddress().publishAddress()); + } + } + + if (httpAddresses.size() < maxNumRemoteConnections) { + // just in case non of the connected nodes have http enabled we get other http enabled nodes instead. + for (NodeInfo info : response.getNodes()) { + if (nodePredicate.test(info.getNode()) && info.getHttp() != null) { + httpAddresses.add(info.getHttp().getAddress().publishAddress()); + } + if (httpAddresses.size() == maxNumRemoteConnections) { + break; // once we have enough return... + } + } + } + RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias, + seedNodes.stream().map(n -> n.getAddress()).collect(Collectors.toList()), new ArrayList<>(httpAddresses), + maxNumRemoteConnections, connectedNodes.size(), + RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings)); + listener.onResponse(remoteConnectionInfo); + } + + @Override + public void handleException(TransportException exp) { + listener.onFailure(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + }); + } + + } + + int getNumNodesConnected() { + return connectedNodes.size(); + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java index 089ce57a114..34cb5a84da7 100644 --- a/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteClusterService.java @@ -24,9 +24,10 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.PlainShardIterator; import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.common.Booleans; @@ -51,10 +52,12 @@ import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -111,11 +114,13 @@ public final class RemoteClusterService extends AbstractComponent implements Clo private final TransportService transportService; private final int numRemoteConnections; + private final ClusterNameExpressionResolver clusterNameResolver; private volatile Map remoteClusters = Collections.emptyMap(); RemoteClusterService(Settings settings, TransportService transportService) { super(settings); this.transportService = transportService; + this.clusterNameResolver = new ClusterNameExpressionResolver(settings); numRemoteConnections = REMOTE_CONNECTIONS_PER_CLUSTER.get(settings); } @@ -203,25 +208,30 @@ public final class RemoteClusterService extends AbstractComponent implements Clo */ Map> groupClusterIndices(String[] requestIndices, Predicate indexExists) { Map> perClusterIndices = new HashMap<>(); + Set remoteClusterNames = this.remoteClusters.keySet(); for (String index : requestIndices) { int i = index.indexOf(REMOTE_CLUSTER_INDEX_SEPARATOR); - String indexName = index; - String clusterName = LOCAL_CLUSTER_GROUP_KEY; if (i >= 0) { String remoteClusterName = index.substring(0, i); - if (isRemoteClusterRegistered(remoteClusterName)) { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusterNames, remoteClusterName); + if (clusters.isEmpty() == false) { if (indexExists.test(index)) { // we use : as a separator for remote clusters. might conflict if there is an index that is actually named // remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias // if that happens throw new IllegalArgumentException("Can not filter indices; index " + index + " exists but there is also a remote cluster named: " + remoteClusterName); + } + String indexName = index.substring(i + 1); + for (String clusterName : clusters) { + perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(indexName); } - indexName = index.substring(i + 1); - clusterName = remoteClusterName; + } else { + perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index); } + } else { + perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index); } - perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList()).add(indexName); } return perClusterIndices; } @@ -413,4 +423,17 @@ public final class RemoteClusterService extends AbstractComponent implements Clo public void close() throws IOException { IOUtils.close(remoteClusters.values()); } + + public void getRemoteConnectionInfos(ActionListener> listener) { + final Map remoteClusters = this.remoteClusters; + if (remoteClusters.isEmpty()) { + listener.onResponse(Collections.emptyList()); + } else { + final GroupedActionListener actionListener = new GroupedActionListener<>(listener, + remoteClusters.size(), Collections.emptyList()); + for (RemoteClusterConnection connection : remoteClusters.values()) { + connection.getConnectionInfo(actionListener); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/search/RemoteConnectionInfo.java b/core/src/main/java/org/elasticsearch/action/search/RemoteConnectionInfo.java new file mode 100644 index 00000000000..ff3548d215b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/search/RemoteConnectionInfo.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.search; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * This class encapsulates all remote cluster information to be rendered on + * _remote/info requests. + */ +public final class RemoteConnectionInfo implements ToXContent, Writeable { + final List seedNodes; + final List httpAddresses; + final int connectionsPerCluster; + final TimeValue initialConnectionTimeout; + final int numNodesConnected; + final String clusterAlias; + + RemoteConnectionInfo(String clusterAlias, List seedNodes, + List httpAddresses, + int connectionsPerCluster, int numNodesConnected, + TimeValue initialConnectionTimeout) { + this.clusterAlias = clusterAlias; + this.seedNodes = seedNodes; + this.httpAddresses = httpAddresses; + this.connectionsPerCluster = connectionsPerCluster; + this.numNodesConnected = numNodesConnected; + this.initialConnectionTimeout = initialConnectionTimeout; + } + + public RemoteConnectionInfo(StreamInput input) throws IOException { + seedNodes = input.readList(TransportAddress::new); + httpAddresses = input.readList(TransportAddress::new); + connectionsPerCluster = input.readVInt(); + initialConnectionTimeout = new TimeValue(input); + numNodesConnected = input.readVInt(); + clusterAlias = input.readString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(clusterAlias); + { + builder.startArray("seeds"); + for (TransportAddress addr : seedNodes) { + builder.value(addr.toString()); + } + builder.endArray(); + builder.startArray("http_addresses"); + for (TransportAddress addr : httpAddresses) { + builder.value(addr.toString()); + } + builder.endArray(); + builder.field("connected", numNodesConnected > 0); + builder.field("num_nodes_connected", numNodesConnected); + builder.field("max_connections_per_cluster", connectionsPerCluster); + builder.field("initial_connect_timeout", initialConnectionTimeout); + } + builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeList(seedNodes); + out.writeList(httpAddresses); + out.writeVInt(connectionsPerCluster); + initialConnectionTimeout.writeTo(out); + out.writeVInt(numNodesConnected); + out.writeString(clusterAlias); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + RemoteConnectionInfo that = (RemoteConnectionInfo) o; + return connectionsPerCluster == that.connectionsPerCluster && + numNodesConnected == that.numNodesConnected && + Objects.equals(seedNodes, that.seedNodes) && + Objects.equals(httpAddresses, that.httpAddresses) && + Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) && + Objects.equals(clusterAlias, that.clusterAlias); + } + + @Override + public int hashCode() { + return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout, numNodesConnected, clusterAlias); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 008d022a655..63a3ad0b62d 100644 --- a/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -60,7 +60,7 @@ public class TransportSearchAction extends HandledTransportAction SHARD_COUNT_LIMIT_SETTING = Setting.longSetting( - "action.search.shard_count.limit", 1000L, 1L, Property.Dynamic, Property.NodeScope); + "action.search.shard_count.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.NodeScope); private final ClusterService clusterService; private final SearchTransportService searchTransportService; diff --git a/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java b/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java new file mode 100644 index 00000000000..85b418e046c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/GroupedActionListener.java @@ -0,0 +1,81 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.CountDown; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +/** + * An action listener that delegates it's results to another listener once + * it has received one or more failures or N results. This allows synchronous + * tasks to be forked off in a loop with the same listener and respond to a + * higher level listener once all tasks responded. + */ +public final class GroupedActionListener implements ActionListener { + private final CountDown countDown; + private final AtomicInteger pos = new AtomicInteger(); + private final AtomicArray roles; + private final ActionListener> delegate; + private final Collection defaults; + private final AtomicReference failure = new AtomicReference<>(); + + /** + * Creates a new listener + * @param delegate the delegate listener + * @param groupSize the group size + */ + public GroupedActionListener(ActionListener> delegate, int groupSize, + Collection defaults) { + roles = new AtomicArray<>(groupSize); + countDown = new CountDown(groupSize); + this.delegate = delegate; + this.defaults = defaults; + } + + @Override + public void onResponse(T element) { + roles.set(pos.incrementAndGet() - 1, element); + if (countDown.countDown()) { + if (failure.get() != null) { + delegate.onFailure(failure.get()); + } else { + List collect = this.roles.asList(); + collect.addAll(defaults); + delegate.onResponse(Collections.unmodifiableList(collect)); + } + } + } + + @Override + public void onFailure(Exception e) { + if (failure.compareAndSet(null, e) == false) { + failure.get().addSuppressed(e); + } + if (countDown.countDown()) { + delegate.onFailure(failure.get()); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 7d627d45318..7f63faac49c 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -74,7 +74,6 @@ public class ReplicationOperation< */ private final AtomicInteger pendingActions = new AtomicInteger(); private final AtomicInteger successfulShards = new AtomicInteger(); - private final boolean executeOnReplicas; private final Primary primary; private final Replicas replicasProxy; private final AtomicBoolean finished = new AtomicBoolean(); @@ -86,9 +85,8 @@ public class ReplicationOperation< public ReplicationOperation(Request request, Primary primary, ActionListener listener, - boolean executeOnReplicas, Replicas replicas, + Replicas replicas, Supplier clusterStateSupplier, Logger logger, String opType) { - this.executeOnReplicas = executeOnReplicas; this.replicasProxy = replicas; this.primary = primary; this.resultListener = listener; @@ -160,7 +158,7 @@ public class ReplicationOperation< final String localNodeId = primary.routingEntry().currentNodeId(); // If the index gets deleted after primary operation, we skip replication for (final ShardRouting shard : shards) { - if (executeOnReplicas == false || shard.unassigned()) { + if (shard.unassigned()) { if (shard.primary() == false) { totalShards.incrementAndGet(); } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 71908799768..e9a26778e70 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -319,11 +319,10 @@ public abstract class TransportReplicationAction< } else { setPhase(replicationTask, "primary"); final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex()); - final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData); final ActionListener listener = createResponseListener(primaryShardReference); createReplicatedOperation(request, ActionListener.wrap(result -> result.respond(listener), listener::onFailure), - primaryShardReference, executeOnReplicas) + primaryShardReference) .execute(); } } catch (Exception e) { @@ -371,9 +370,9 @@ public abstract class TransportReplicationAction< protected ReplicationOperation> createReplicatedOperation( Request request, ActionListener> listener, - PrimaryShardReference primaryShardReference, boolean executeOnReplicas) { + PrimaryShardReference primaryShardReference) { return new ReplicationOperation<>(request, primaryShardReference, listener, - executeOnReplicas, replicasProxy, clusterService::state, logger, actionName); + replicasProxy, clusterService::state, logger, actionName); } } @@ -909,14 +908,6 @@ public abstract class TransportReplicationAction< indexShard.acquirePrimaryOperationLock(onAcquired, executor); } - /** - * Indicated whether this operation should be replicated to shadow replicas or not. If this method returns true the replication phase - * will be skipped. For example writes such as index and delete don't need to be replicated on shadow replicas but refresh and flush do. - */ - protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) { - return indexMetaData.isIndexUsingShadowReplicas() == false; - } - class ShardReference implements Releasable { protected final IndexShard indexShard; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index d3766cc958c..2b47908c352 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -338,13 +338,12 @@ final class Bootstrap { INSTANCE.setup(true, environment); - /* TODO: close this once s3 repository doesn't try to read during repository construction try { // any secure settings must be read during node construction IOUtils.close(keystore); } catch (IOException e) { throw new BootstrapException(e); - }*/ + } INSTANCE.start(); diff --git a/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java index 8372a6b8ab8..79a4fd7329f 100644 --- a/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java +++ b/core/src/main/java/org/elasticsearch/cli/EnvironmentAwareCommand.java @@ -45,7 +45,16 @@ public abstract class EnvironmentAwareCommand extends Command { final Map settings = new HashMap<>(); for (final KeyValuePair kvp : settingOption.values(options)) { if (kvp.value.isEmpty()) { - throw new UserException(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty"); + throw new UserException(ExitCodes.USAGE, "setting [" + kvp.key + "] must not be empty"); + } + if (settings.containsKey(kvp.key)) { + final String message = String.format( + Locale.ROOT, + "setting [%s] already set, saw [%s] and [%s]", + kvp.key, + settings.get(kvp.key), + kvp.value); + throw new UserException(ExitCodes.USAGE, message); } settings.put(kvp.key, kvp.value); } diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index a9392d3c017..b0baac6bd90 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -383,13 +383,6 @@ public class InternalClusterInfoService extends AbstractComponent if (logger.isTraceEnabled()) { logger.trace("shard: {} size: {}", sid, size); } - if (indexMeta != null && indexMeta.isIndexUsingShadowReplicas()) { - // Shards on a shared filesystem should be considered of size 0 - if (logger.isTraceEnabled()) { - logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid); - } - size = 0; - } newShardSizes.put(sid, size); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java new file mode 100644 index 00000000000..2032c2f4ef3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolver.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Settings; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * Resolves cluster names from an expression. The expression must be the exact match of a cluster + * name or must be a wildcard expression. + */ +public final class ClusterNameExpressionResolver extends AbstractComponent { + + private final WildcardExpressionResolver wildcardResolver = new WildcardExpressionResolver(); + + public ClusterNameExpressionResolver(Settings settings) { + super(settings); + } + + /** + * Resolves the provided cluster expression to matching cluster names. This method only + * supports exact or wildcard matches. + * + * @param remoteClusters the aliases for remote clusters + * @param clusterExpression the expressions that can be resolved to cluster names. + * @return the resolved cluster aliases. + */ + public List resolveClusterNames(Set remoteClusters, String clusterExpression) { + if (remoteClusters.contains(clusterExpression)) { + return Collections.singletonList(clusterExpression); + } else if (Regex.isSimpleMatchPattern(clusterExpression)) { + return wildcardResolver.resolve(remoteClusters, clusterExpression); + } else { + return Collections.emptyList(); + } + } + + private static class WildcardExpressionResolver { + + private List resolve(Set remoteClusters, String clusterExpression) { + if (isTrivialWildcard(clusterExpression)) { + return resolveTrivialWildcard(remoteClusters); + } + + Set matches = matches(remoteClusters, clusterExpression); + if (matches.isEmpty()) { + return Collections.emptyList(); + } else { + return new ArrayList<>(matches); + } + } + + private boolean isTrivialWildcard(String clusterExpression) { + return Regex.isMatchAllPattern(clusterExpression); + } + + private List resolveTrivialWildcard(Set remoteClusters) { + return new ArrayList<>(remoteClusters); + } + + private static Set matches(Set remoteClusters, String expression) { + if (expression.indexOf("*") == expression.length() - 1) { + return otherWildcard(remoteClusters, expression); + } else { + return otherWildcard(remoteClusters, expression); + } + } + + private static Set otherWildcard(Set remoteClusters, String expression) { + final String pattern = expression; + return remoteClusters.stream() + .filter(n -> Regex.simpleMatch(pattern, n)) + .collect(Collectors.toSet()); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 713fce2848f..67f4d71bd4e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -192,18 +192,11 @@ public class IndexMetaData implements Diffable, ToXContent { public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas"; public static final Setting INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope); - public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas"; - public static final Setting INDEX_SHADOW_REPLICAS_SETTING = - Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope, Property.Deprecated); public static final String SETTING_ROUTING_PARTITION_SIZE = "index.routing_partition_size"; public static final Setting INDEX_ROUTING_PARTITION_SIZE_SETTING = Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope); - public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem"; - public static final Setting INDEX_SHARED_FILESYSTEM_SETTING = - Setting.boolSetting(SETTING_SHARED_FILESYSTEM, INDEX_SHADOW_REPLICAS_SETTING, Property.IndexScope, Property.Deprecated); - public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; public static final String SETTING_READ_ONLY = "index.blocks.read_only"; @@ -240,10 +233,6 @@ public class IndexMetaData implements Diffable, ToXContent { public static final String SETTING_DATA_PATH = "index.data_path"; public static final Setting INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope); - public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node"; - public static final Setting INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = - Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, - Property.Dynamic, Property.IndexScope, Property.Deprecated); public static final String INDEX_UUID_NA_VALUE = "_na_"; public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require"; @@ -1237,35 +1226,6 @@ public class IndexMetaData implements Diffable, ToXContent { } } - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(IndexMetaData.class)); - - /** - * Returns true iff the given settings indicate that the index - * associated with these settings allocates it's shards on a shared - * filesystem. Otherwise false. The default setting for this - * is the returned value from - * {@link #isIndexUsingShadowReplicas(org.elasticsearch.common.settings.Settings)}. - */ - public boolean isOnSharedFilesystem(Settings settings) { - // don't use the setting directly, not to trigger verbose deprecation logging - return settings.getAsBooleanLenientForPreEs6Indices( - this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings), deprecationLogger); - } - - /** - * Returns true iff the given settings indicate that the index associated - * with these settings uses shadow replicas. Otherwise false. The default - * setting for this is false. - */ - public boolean isIndexUsingShadowReplicas() { - return isIndexUsingShadowReplicas(this.settings); - } - - public boolean isIndexUsingShadowReplicas(Settings settings) { - // don't use the setting directly, not to trigger verbose deprecation logging - return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false, deprecationLogger); - } - /** * Adds human readable version and creation date settings. * This method is used to display the settings in a human readable format in REST API diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 1a878919749..2cb93373700 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -433,10 +433,9 @@ public class MetaDataCreateIndexService extends AbstractComponent { .put(indexMetaData, false) .build(); - String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : ""; - logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}", + logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(), - indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet()); + indexMetaData.getNumberOfReplicas(), mappings.keySet()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); if (!request.blocks().isEmpty()) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 7619b0cc95e..d80a1c326cf 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -139,8 +139,7 @@ public class IndexRoutingTable extends AbstractDiffable imple "allocation set " + inSyncAllocationIds); } - if (indexMetaData.isIndexUsingShadowReplicas() == false && // see #20650 - shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false && + if (shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false && RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) == false && inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false) throw new IllegalStateException("a primary shard routing " + shardRouting + " is a primary that is recovering from " + diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java index 0f3a8c6f214..883b4c22f7f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingChangesObserver.java @@ -69,6 +69,12 @@ public interface RoutingChangesObserver { */ void replicaPromoted(ShardRouting replicaShard); + /** + * Called when an initializing replica is reinitialized. This happens when a primary relocation completes, which + * reinitializes all currently initializing replicas as their recovery source node changes + */ + void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica); + /** * Abstract implementation of {@link RoutingChangesObserver} that does not take any action. Useful for subclasses that only override @@ -120,6 +126,11 @@ public interface RoutingChangesObserver { public void replicaPromoted(ShardRouting replicaShard) { } + + @Override + public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) { + + } } class DelegatingRoutingChangesObserver implements RoutingChangesObserver { @@ -192,5 +203,12 @@ public interface RoutingChangesObserver { routingChangesObserver.replicaPromoted(replicaShard); } } + + @Override + public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) { + for (RoutingChangesObserver routingChangesObserver : routingChangesObservers) { + routingChangesObserver.initializedReplicaReinitialized(oldReplica, reinitializedReplica); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index 45d567b657e..3e9303d3d42 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -451,6 +451,9 @@ public class RoutingNodes implements Iterable { * * Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source. * + * If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their + * recovery source changes + * * @return the started shard */ public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) { @@ -468,6 +471,30 @@ public class RoutingNodes implements Iterable { + initializingShard + " but was: " + relocationSourceShard.getTargetRelocatingShard(); remove(relocationSourceShard); routingChangesObserver.relocationCompleted(relocationSourceShard); + + // if this is a primary shard with ongoing replica recoveries, reinitialize them as their recovery source changed + if (startedShard.primary()) { + List assignedShards = assignedShards(startedShard.shardId()); + // copy list to prevent ConcurrentModificationException + for (ShardRouting routing : new ArrayList<>(assignedShards)) { + if (routing.initializing() && routing.primary() == false) { + if (routing.isRelocationTarget()) { + // find the relocation source + ShardRouting sourceShard = getByAllocationId(routing.shardId(), routing.allocationId().getRelocationId()); + // cancel relocation and start relocation to same node again + ShardRouting startedReplica = cancelRelocation(sourceShard); + remove(routing); + routingChangesObserver.shardFailed(routing, + new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "primary changed")); + relocateShard(startedReplica, sourceShard.relocatingNodeId(), + sourceShard.getExpectedShardSize(), routingChangesObserver); + } else { + ShardRouting reinitializedReplica = reinitReplica(routing); + routingChangesObserver.initializedReplicaReinitialized(routing, reinitializedReplica); + } + } + } + } } return startedShard; } @@ -540,9 +567,6 @@ public class RoutingNodes implements Iterable { if (failedShard.primary()) { // promote active replica to primary if active replica exists (only the case for shadow replicas) ShardRouting activeReplica = activeReplica(failedShard.shardId()); - assert activeReplica == null || indexMetaData.isIndexUsingShadowReplicas() : - "initializing primary [" + failedShard + "] with active replicas [" + activeReplica + "] only expected when " + - "using shadow replicas"; if (activeReplica == null) { moveToUnassigned(failedShard, unassignedInfo); } else { @@ -599,10 +623,6 @@ public class RoutingNodes implements Iterable { assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica; ShardRouting primarySwappedCandidate = promoteActiveReplicaShardToPrimary(activeReplica); routingChangesObserver.replicaPromoted(activeReplica); - if (indexMetaData.isIndexUsingShadowReplicas()) { - ShardRouting initializedShard = reinitShadowPrimary(primarySwappedCandidate); - routingChangesObserver.startedPrimaryReinitialized(primarySwappedCandidate, initializedShard); - } } /** @@ -730,6 +750,15 @@ public class RoutingNodes implements Iterable { return reinitializedShard; } + private ShardRouting reinitReplica(ShardRouting shard) { + assert shard.primary() == false : "shard must be a replica: " + shard; + assert shard.initializing() : "can only reinitialize an initializing replica: " + shard; + assert shard.isRelocationTarget() == false : "replication target cannot be reinitialized: " + shard; + ShardRouting reinitializedShard = shard.reinitializeReplicaShard(); + updateAssigned(shard, reinitializedShard); + return reinitializedShard; + } + private void updateAssigned(ShardRouting oldShard, ShardRouting newShard) { assert oldShard.shardId().equals(newShard.shardId()) : "can only update " + oldShard + " by shard with same shard id but was " + newShard; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 4db922d5aeb..3a60e5338d7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -393,6 +393,17 @@ public final class ShardRouting implements Writeable, ToXContent { allocationId, UNAVAILABLE_EXPECTED_SHARD_SIZE); } + /** + * Reinitializes a replica shard, giving it a fresh allocation id + */ + public ShardRouting reinitializeReplicaShard() { + assert state == ShardRoutingState.INITIALIZING : this; + assert primary == false : this; + assert isRelocationTarget() == false : this; + return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.INITIALIZING, + recoverySource, unassignedInfo, AllocationId.newInitializing(), expectedShardSize); + } + /** * Set the shards state to STARTED. The shards state must be * INITIALIZING or RELOCATING. Any relocation will be diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index ff5b8e63d5e..8974c8a4a9a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -41,7 +41,9 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayAllocator; +import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; import java.util.List; import java.util.function.Function; @@ -88,6 +90,9 @@ public class AllocationService extends AbstractComponent { routingNodes.unassigned().shuffle(); RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, clusterInfoService.getClusterInfo(), currentNanoTime(), false); + // as starting a primary relocation target can reinitialize replica shards, start replicas first + startedShards = new ArrayList<>(startedShards); + Collections.sort(startedShards, Comparator.comparing(ShardRouting::primary)); applyStartedShards(allocation, startedShards); gatewayAllocator.applyStartedShards(allocation, startedShards); reroute(allocation); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java index 42e80689eec..3e465e42b44 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesChangedObserver.java @@ -96,6 +96,17 @@ public class RoutingNodesChangedObserver implements RoutingChangesObserver { setChanged(); } + @Override + public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) { + assert oldReplica.initializing() && oldReplica.primary() == false : + "expected initializing replica shard " + oldReplica; + assert reinitializedReplica.initializing() && reinitializedReplica.primary() == false : + "expected reinitialized replica shard " + reinitializedReplica; + assert oldReplica.allocationId().getId().equals(reinitializedReplica.allocationId().getId()) == false : + "expected allocation id to change for reinitialized replica shard (old: " + oldReplica + " new: " + reinitializedReplica + ")"; + setChanged(); + } + /** * Marks the allocation as changed. */ diff --git a/core/src/main/java/org/elasticsearch/common/settings/AddFileKeyStoreCommand.java b/core/src/main/java/org/elasticsearch/common/settings/AddFileKeyStoreCommand.java new file mode 100644 index 00000000000..5ccac9a2ac3 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/AddFileKeyStoreCommand.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.BufferedReader; +import java.io.File; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Arrays; +import java.util.List; + +import joptsimple.OptionSet; +import joptsimple.OptionSpec; +import org.elasticsearch.cli.EnvironmentAwareCommand; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.env.Environment; + +/** + * A subcommand for the keystore cli which adds a file setting. + */ +class AddFileKeyStoreCommand extends EnvironmentAwareCommand { + + private final OptionSpec forceOption; + private final OptionSpec arguments; + + AddFileKeyStoreCommand() { + super("Add a file setting to the keystore"); + this.forceOption = parser.acceptsAll(Arrays.asList("f", "force"), "Overwrite existing setting without prompting"); + // jopt simple has issue with multiple non options, so we just get one set of them here + // and convert to File when necessary + // see https://github.com/jopt-simple/jopt-simple/issues/103 + this.arguments = parser.nonOptions("setting [filepath]"); + } + + @Override + protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile()); + if (keystore == null) { + throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one."); + } + + keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */); + + List argumentValues = arguments.values(options); + if (argumentValues.size() == 0) { + throw new UserException(ExitCodes.USAGE, "Missing setting name"); + } + String setting = argumentValues.get(0); + if (keystore.getSettingNames().contains(setting) && options.has(forceOption) == false) { + if (terminal.promptYesNo("Setting " + setting + " already exists. Overwrite?", false) == false) { + terminal.println("Exiting without modifying keystore."); + return; + } + } + + if (argumentValues.size() == 1) { + throw new UserException(ExitCodes.USAGE, "Missing file name"); + } + Path file = getPath(argumentValues.get(1)); + if (Files.exists(file) == false) { + throw new UserException(ExitCodes.IO_ERROR, "File [" + file.toString() + "] does not exist"); + } + if (argumentValues.size() > 2) { + throw new UserException(ExitCodes.USAGE, "Unrecognized extra arguments [" + + String.join(", ", argumentValues.subList(2, argumentValues.size())) + "] after filepath"); + } + keystore.setFile(setting, Files.readAllBytes(file)); + keystore.save(env.configFile()); + } + + @SuppressForbidden(reason="file arg for cli") + private Path getPath(String file) { + return PathUtils.get(file); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index bb7aa223f72..8478a790689 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -368,7 +368,6 @@ public final class ClusterSettings extends AbstractScopedSettings { TribeService.TRIBE_NAME_SETTING, NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING, NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING, - NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH, OsService.REFRESH_INTERVAL_SETTING, ProcessService.REFRESH_INTERVAL_SETTING, JvmService.REFRESH_INTERVAL_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index a072b68b277..efbe7acf5e1 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -70,13 +70,10 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING, - IndexMetaData.INDEX_SHADOW_REPLICAS_SETTING, - IndexMetaData.INDEX_SHARED_FILESYSTEM_SETTING, IndexMetaData.INDEX_READ_ONLY_SETTING, IndexMetaData.INDEX_BLOCKS_READ_SETTING, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, - IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING, IndexMetaData.INDEX_PRIORITY_SETTING, IndexMetaData.INDEX_DATA_PATH_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, diff --git a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java index 5bded392fdb..c2345f2ddd8 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java +++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreCli.java @@ -32,6 +32,7 @@ public class KeyStoreCli extends MultiCommand { subcommands.put("create", new CreateKeyStoreCommand()); subcommands.put("list", new ListKeyStoreCommand()); subcommands.put("add", new AddStringKeyStoreCommand()); + subcommands.put("add-file", new AddStringKeyStoreCommand()); subcommands.put("remove", new RemoveSettingKeyStoreCommand()); } diff --git a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index e4dd982512d..338987cc714 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/core/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -25,7 +25,6 @@ import javax.crypto.spec.PBEKeySpec; import javax.security.auth.DestroyFailedException; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; -import java.io.Closeable; import java.io.IOException; import java.io.InputStream; import java.nio.CharBuffer; @@ -41,10 +40,14 @@ import java.security.KeyStore; import java.security.KeyStoreException; import java.security.NoSuchAlgorithmException; import java.util.Arrays; +import java.util.Base64; import java.util.Enumeration; +import java.util.HashMap; import java.util.HashSet; import java.util.Locale; +import java.util.Map; import java.util.Set; +import java.util.stream.Collectors; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.BufferedChecksumIndexInput; @@ -54,7 +57,6 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchException; /** * A wrapper around a Java KeyStore which provides supplements the keystore with extra metadata. @@ -67,29 +69,52 @@ import org.elasticsearch.ElasticsearchException; */ public class KeyStoreWrapper implements SecureSettings { + /** An identifier for the type of data that may be stored in a keystore entry. */ + private enum KeyType { + STRING, + FILE + } + /** The name of the keystore file to read and write. */ private static final String KEYSTORE_FILENAME = "elasticsearch.keystore"; /** The version of the metadata written before the keystore data. */ - private static final int FORMAT_VERSION = 1; + private static final int FORMAT_VERSION = 2; + + /** The oldest metadata format version that can be read. */ + private static final int MIN_FORMAT_VERSION = 1; /** The keystore type for a newly created keystore. */ private static final String NEW_KEYSTORE_TYPE = "PKCS12"; - /** The algorithm used to store password for a newly created keystore. */ - private static final String NEW_KEYSTORE_SECRET_KEY_ALGO = "PBE";//"PBEWithHmacSHA256AndAES_128"; + /** The algorithm used to store string setting contents. */ + private static final String NEW_KEYSTORE_STRING_KEY_ALGO = "PBE"; + + /** The algorithm used to store file setting contents. */ + private static final String NEW_KEYSTORE_FILE_KEY_ALGO = "PBE"; /** An encoder to check whether string values are ascii. */ private static final CharsetEncoder ASCII_ENCODER = StandardCharsets.US_ASCII.newEncoder(); + /** The metadata format version used to read the current keystore wrapper. */ + private final int formatVersion; + /** True iff the keystore has a password needed to read. */ private final boolean hasPassword; /** The type of the keystore, as passed to {@link java.security.KeyStore#getInstance(String)} */ private final String type; - /** A factory necessary for constructing instances of secrets in a {@link KeyStore}. */ - private final SecretKeyFactory secretFactory; + /** A factory necessary for constructing instances of string secrets in a {@link KeyStore}. */ + private final SecretKeyFactory stringFactory; + + /** A factory necessary for constructing instances of file secrets in a {@link KeyStore}. */ + private final SecretKeyFactory fileFactory; + + /** + * The settings that exist in the keystore, mapped to their type of data. + */ + private final Map settingTypes; /** The raw bytes of the encrypted keystore. */ private final byte[] keystoreBytes; @@ -100,17 +125,19 @@ public class KeyStoreWrapper implements SecureSettings { /** The password for the keystore. See {@link #decrypt(char[])}. */ private final SetOnce keystorePassword = new SetOnce<>(); - /** The setting names contained in the loaded keystore. */ - private final Set settingNames = new HashSet<>(); - - private KeyStoreWrapper(boolean hasPassword, String type, String secretKeyAlgo, byte[] keystoreBytes) { + private KeyStoreWrapper(int formatVersion, boolean hasPassword, String type, + String stringKeyAlgo, String fileKeyAlgo, + Map settingTypes, byte[] keystoreBytes) { + this.formatVersion = formatVersion; this.hasPassword = hasPassword; this.type = type; try { - secretFactory = SecretKeyFactory.getInstance(secretKeyAlgo); + stringFactory = SecretKeyFactory.getInstance(stringKeyAlgo); + fileFactory = SecretKeyFactory.getInstance(fileKeyAlgo); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } + this.settingTypes = settingTypes; this.keystoreBytes = keystoreBytes; } @@ -121,7 +148,8 @@ public class KeyStoreWrapper implements SecureSettings { /** Constructs a new keystore with the given password. */ static KeyStoreWrapper create(char[] password) throws Exception { - KeyStoreWrapper wrapper = new KeyStoreWrapper(password.length != 0, NEW_KEYSTORE_TYPE, NEW_KEYSTORE_SECRET_KEY_ALGO, null); + KeyStoreWrapper wrapper = new KeyStoreWrapper(FORMAT_VERSION, password.length != 0, NEW_KEYSTORE_TYPE, + NEW_KEYSTORE_STRING_KEY_ALGO, NEW_KEYSTORE_FILE_KEY_ALGO, new HashMap<>(), null); KeyStore keyStore = KeyStore.getInstance(NEW_KEYSTORE_TYPE); keyStore.load(null, null); wrapper.keystore.set(keyStore); @@ -144,7 +172,7 @@ public class KeyStoreWrapper implements SecureSettings { SimpleFSDirectory directory = new SimpleFSDirectory(configDir); try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) { ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput); - CodecUtil.checkHeader(input, KEYSTORE_FILENAME, FORMAT_VERSION, FORMAT_VERSION); + int formatVersion = CodecUtil.checkHeader(input, KEYSTORE_FILENAME, MIN_FORMAT_VERSION, FORMAT_VERSION); byte hasPasswordByte = input.readByte(); boolean hasPassword = hasPasswordByte == 1; if (hasPassword == false && hasPasswordByte != 0) { @@ -152,11 +180,25 @@ public class KeyStoreWrapper implements SecureSettings { + String.format(Locale.ROOT, "%02x", hasPasswordByte)); } String type = input.readString(); - String secretKeyAlgo = input.readString(); + String stringKeyAlgo = input.readString(); + final String fileKeyAlgo; + if (formatVersion >= 2) { + fileKeyAlgo = input.readString(); + } else { + fileKeyAlgo = NEW_KEYSTORE_FILE_KEY_ALGO; + } + final Map settingTypes; + if (formatVersion >= 2) { + settingTypes = input.readMapOfStrings().entrySet().stream().collect(Collectors.toMap( + Map.Entry::getKey, + e -> KeyType.valueOf(e.getValue()))); + } else { + settingTypes = new HashMap<>(); + } byte[] keystoreBytes = new byte[input.readInt()]; input.readBytes(keystoreBytes, 0, keystoreBytes.length); CodecUtil.checkFooter(input); - return new KeyStoreWrapper(hasPassword, type, secretKeyAlgo, keystoreBytes); + return new KeyStoreWrapper(formatVersion, hasPassword, type, stringKeyAlgo, fileKeyAlgo, settingTypes, keystoreBytes); } } @@ -189,10 +231,24 @@ public class KeyStoreWrapper implements SecureSettings { keystorePassword.set(new KeyStore.PasswordProtection(password)); Arrays.fill(password, '\0'); - // convert keystore aliases enum into a set for easy lookup + Enumeration aliases = keystore.get().aliases(); - while (aliases.hasMoreElements()) { - settingNames.add(aliases.nextElement()); + if (formatVersion == 1) { + while (aliases.hasMoreElements()) { + settingTypes.put(aliases.nextElement(), KeyType.STRING); + } + } else { + // verify integrity: keys in keystore match what the metadata thinks exist + Set expectedSettings = new HashSet<>(settingTypes.keySet()); + while (aliases.hasMoreElements()) { + String settingName = aliases.nextElement(); + if (expectedSettings.remove(settingName) == false) { + throw new SecurityException("Keystore has been corrupted or tampered with"); + } + } + if (expectedSettings.isEmpty() == false) { + throw new SecurityException("Keystore has been corrupted or tampered with"); + } } } @@ -206,8 +262,19 @@ public class KeyStoreWrapper implements SecureSettings { try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) { CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION); output.writeByte(password.length == 0 ? (byte)0 : (byte)1); - output.writeString(type); - output.writeString(secretFactory.getAlgorithm()); + output.writeString(NEW_KEYSTORE_TYPE); + output.writeString(NEW_KEYSTORE_STRING_KEY_ALGO); + output.writeString(NEW_KEYSTORE_FILE_KEY_ALGO); + output.writeMapOfStrings(settingTypes.entrySet().stream().collect(Collectors.toMap( + Map.Entry::getKey, + e -> e.getValue().name()))); + + // TODO: in the future if we ever change any algorithms used above, we need + // to create a new KeyStore here instead of using the existing one, so that + // the encoded material inside the keystore is updated + assert type.equals(NEW_KEYSTORE_TYPE) : "keystore type changed"; + assert stringFactory.getAlgorithm().equals(NEW_KEYSTORE_STRING_KEY_ALGO) : "string pbe algo changed"; + assert fileFactory.getAlgorithm().equals(NEW_KEYSTORE_FILE_KEY_ALGO) : "file pbe algo changed"; ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream(); keystore.get().store(keystoreBytesStream, password); @@ -228,25 +295,51 @@ public class KeyStoreWrapper implements SecureSettings { @Override public Set getSettingNames() { - return settingNames; + return settingTypes.keySet(); } // TODO: make settings accessible only to code that registered the setting - /** Retrieve a string setting. The {@link SecureString} should be closed once it is used. */ @Override public SecureString getString(String setting) throws GeneralSecurityException { KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get()); - if (entry instanceof KeyStore.SecretKeyEntry == false) { + if (settingTypes.get(setting) != KeyType.STRING || + entry instanceof KeyStore.SecretKeyEntry == false) { throw new IllegalStateException("Secret setting " + setting + " is not a string"); } // TODO: only allow getting a setting once? KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) entry; - PBEKeySpec keySpec = (PBEKeySpec) secretFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class); + PBEKeySpec keySpec = (PBEKeySpec) stringFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class); SecureString value = new SecureString(keySpec.getPassword()); keySpec.clearPassword(); return value; } + @Override + public InputStream getFile(String setting) throws GeneralSecurityException { + KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get()); + if (settingTypes.get(setting) != KeyType.FILE || + entry instanceof KeyStore.SecretKeyEntry == false) { + throw new IllegalStateException("Secret setting " + setting + " is not a file"); + } + KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) entry; + PBEKeySpec keySpec = (PBEKeySpec) fileFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class); + // The PBE keyspec gives us chars, we first convert to bytes, then decode base64 inline. + char[] chars = keySpec.getPassword(); + byte[] bytes = new byte[chars.length]; + for (int i = 0; i < bytes.length; ++i) { + bytes[i] = (byte)chars[i]; // PBE only stores the lower 8 bits, so this narrowing is ok + } + keySpec.clearPassword(); // wipe the original copy + InputStream bytesStream = new ByteArrayInputStream(bytes) { + @Override + public void close() throws IOException { + super.close(); + Arrays.fill(bytes, (byte)0); // wipe our second copy when the stream is exhausted + } + }; + return Base64.getDecoder().wrap(bytesStream); + } + /** * Set a string setting. * @@ -256,15 +349,27 @@ public class KeyStoreWrapper implements SecureSettings { if (ASCII_ENCODER.canEncode(CharBuffer.wrap(value)) == false) { throw new IllegalArgumentException("Value must be ascii"); } - SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec(value)); + SecretKey secretKey = stringFactory.generateSecret(new PBEKeySpec(value)); keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get()); - settingNames.add(setting); + settingTypes.put(setting, KeyType.STRING); + } + + /** Set a file setting. */ + void setFile(String setting, byte[] bytes) throws GeneralSecurityException { + bytes = Base64.getEncoder().encode(bytes); + char[] chars = new char[bytes.length]; + for (int i = 0; i < chars.length; ++i) { + chars[i] = (char)bytes[i]; // PBE only stores the lower 8 bits, so this narrowing is ok + } + SecretKey secretKey = stringFactory.generateSecret(new PBEKeySpec(chars)); + keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get()); + settingTypes.put(setting, KeyType.FILE); } /** Remove the given setting from the keystore. */ void remove(String setting) throws KeyStoreException { keystore.get().deleteEntry(setting); - settingNames.remove(setting); + settingTypes.remove(setting); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java index a9e4effb0d9..2efb36696c5 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSetting.java @@ -19,6 +19,7 @@ package org.elasticsearch.common.settings; +import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.Arrays; import java.util.HashSet; @@ -137,5 +138,26 @@ public abstract class SecureSetting extends Setting { }; } + /** + * A setting which contains a file. Reading the setting opens an input stream to the file. + * + * This may be any sensitive file, e.g. a set of credentials normally in plaintext. + */ + public static Setting secureFile(String name, Setting fallback, + Property... properties) { + return new SecureSetting(name, properties) { + @Override + protected InputStream getSecret(SecureSettings secureSettings) throws GeneralSecurityException { + return secureSettings.getFile(getKey()); + } + @Override + InputStream getFallback(Settings settings) { + if (fallback != null) { + return fallback.get(settings); + } + return null; + } + }; + } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java index f7098686466..c5a364f5473 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SecureSettings.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.settings; import java.io.Closeable; +import java.io.InputStream; import java.security.GeneralSecurityException; import java.util.Set; @@ -36,4 +37,7 @@ public interface SecureSettings extends Closeable { /** Return a string setting. The {@link SecureString} should be closed once it is used. */ SecureString getString(String setting) throws GeneralSecurityException; + + /** Return a file setting. The {@link InputStream} should be closed once it is used. */ + InputStream getFile(String setting) throws GeneralSecurityException; } diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index db1cf44db22..374d923d30a 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -1294,6 +1294,11 @@ public final class Settings implements ToXContent { return delegate.getString(keyTransform.apply(setting)); } + @Override + public InputStream getFile(String setting) throws GeneralSecurityException{ + return delegate.getFile(keyTransform.apply(setting)); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index e531408b57a..ab969b17d49 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -157,13 +157,6 @@ public final class NodeEnvironment implements Closeable { public static final Setting MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1, Property.NodeScope); - /** - * If true automatically append node lock id to custom data paths. - */ - public static final Setting ADD_NODE_LOCK_ID_TO_CUSTOM_PATH = - Setting.boolSetting("node.add_lock_id_to_custom_path", true, Property.NodeScope); - - /** * Seed for determining a persisted unique uuid of this node. If the node has already a persisted uuid on disk, * this seed will be ignored and the uuid from disk will be reused. @@ -922,11 +915,7 @@ public final class NodeEnvironment implements Closeable { if (customDataDir != null) { // This assert is because this should be caught by MetaDataCreateIndexService assert sharedDataPath != null; - if (ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(indexSettings.getNodeSettings())) { - return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId)); - } else { - return sharedDataPath.resolve(customDataDir); - } + return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId)); } else { throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); } diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 717453d2026..c66c00728a7 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -106,11 +106,10 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index()); final Set inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id()); final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT; - final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData); assert inSyncAllocationIds.isEmpty() == false; // use in-sync allocation ids to select nodes - final NodeShardsResult nodeShardsResult = buildNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode, + final NodeShardsResult nodeShardsResult = buildNodeShardsResult(unassignedShard, snapshotRestore, allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger); final boolean enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0; logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(), @@ -122,10 +121,6 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { logger.debug("[{}][{}]: missing local data, will restore from [{}]", unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource()); return AllocateUnassignedDecision.NOT_TAKEN; - } else if (recoverOnAnyNode) { - // let BalancedShardsAllocator take care of allocating this shard - logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id()); - return AllocateUnassignedDecision.NOT_TAKEN; } else { // We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary. // We could just be waiting for the node that holds the primary to start back up, in which case the allocation for @@ -331,19 +326,6 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { Collections.unmodifiableList(noNodeShards)); } - /** - * Return {@code true} if the index is configured to allow shards to be - * recovered on any node - */ - private boolean recoverOnAnyNode(IndexMetaData metaData) { - // don't use the setting directly, not to trigger verbose deprecation logging - return (metaData.isOnSharedFilesystem(metaData.getSettings()) || metaData.isOnSharedFilesystem(this.settings)) - && (metaData.getSettings().getAsBooleanLenientForPreEs6Indices( - metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger) || - this.settings.getAsBooleanLenientForPreEs6Indices - (metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger)); - } - protected abstract FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); private static class NodeShardsResult { diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index ee35993c01e..e528dde7179 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -55,7 +55,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.IndexingOperationListener; import org.elasticsearch.index.shard.SearchOperationListener; -import org.elasticsearch.index.shard.ShadowIndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.ShardPath; @@ -343,8 +342,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust logger.debug("creating shard_id {}", shardId); // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. - final boolean canDeleteShardContent = this.indexSettings.isOnSharedFilesystem() == false || - (primary && this.indexSettings.isOnSharedFilesystem()); final Engine.Warmer engineWarmer = (searcher) -> { IndexShard shard = getShardOrNull(shardId.getId()); if (shard != null) { @@ -352,18 +349,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } }; store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, - new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId))); - if (useShadowEngine(primary, this.indexSettings)) { - indexShard = new ShadowIndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, - indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, - searchOperationListeners); - // no indexing listeners - shadow engines don't index - } else { - indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, + new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId))); + indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer, () -> globalCheckpointSyncer.accept(shardId), searchOperationListeners, indexingOperationListeners); - } eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap(); @@ -381,10 +371,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } } - static boolean useShadowEngine(boolean primary, IndexSettings indexSettings) { - return primary == false && indexSettings.isShadowReplicaIndex(); - } - @Override public synchronized void removeShard(int shardId, String reason) { final ShardId sId = new ShardId(index(), shardId); @@ -438,16 +424,14 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust } - private void onShardClose(ShardLock lock, boolean ownsShard) { + private void onShardClose(ShardLock lock) { if (deleted.get()) { // we remove that shards content if this index has been deleted try { - if (ownsShard) { - try { - eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); - } finally { - shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings); - eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); - } + try { + eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); + } finally { + shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings); + eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings()); } } catch (IOException e) { shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings); @@ -514,12 +498,10 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private class StoreCloseListener implements Store.OnClose { private final ShardId shardId; - private final boolean ownsShard; private final Closeable[] toClose; - StoreCloseListener(ShardId shardId, boolean ownsShard, Closeable... toClose) { + StoreCloseListener(ShardId shardId, Closeable... toClose) { this.shardId = shardId; - this.ownsShard = ownsShard; this.toClose = toClose; } @@ -527,7 +509,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust public void handle(ShardLock lock) { try { assert lock.getShardId().equals(shardId) : "shard id mismatch, expected: " + shardId + " but got: " + lock.getShardId(); - onShardClose(lock, ownsShard); + onShardClose(lock); } finally { try { IOUtils.close(toClose); diff --git a/core/src/main/java/org/elasticsearch/index/IndexSettings.java b/core/src/main/java/org/elasticsearch/index/IndexSettings.java index 4ae16255d5e..011229256af 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/core/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -160,7 +160,6 @@ public final class IndexSettings { private final String nodeName; private final Settings nodeSettings; private final int numberOfShards; - private final boolean isShadowReplicaIndex; // volatile fields are updated via #updateIndexMetaData(IndexMetaData) under lock private volatile Settings settings; private volatile IndexMetaData indexMetaData; @@ -257,7 +256,6 @@ public final class IndexSettings { nodeName = Node.NODE_NAME_SETTING.get(settings); this.indexMetaData = indexMetaData; numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); - isShadowReplicaIndex = indexMetaData.isIndexUsingShadowReplicas(settings); this.defaultField = DEFAULT_FIELD_SETTING.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); @@ -359,15 +357,6 @@ public final class IndexSettings { return settings.get(IndexMetaData.SETTING_DATA_PATH); } - /** - * Returns true iff the given settings indicate that the index - * associated with these settings allocates it's shards on a shared - * filesystem. - */ - public boolean isOnSharedFilesystem() { - return indexMetaData.isOnSharedFilesystem(getSettings()); - } - /** * Returns the version the index was created on. * @see Version#indexCreated(Settings) @@ -400,12 +389,6 @@ public final class IndexSettings { */ public int getNumberOfReplicas() { return settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); } - /** - * Returns true iff this index uses shadow replicas. - * @see IndexMetaData#isIndexUsingShadowReplicas(Settings) - */ - public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; } - /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java index 63861e80849..282edaeaf73 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.analysis; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.TextFieldMapper; @@ -78,19 +77,6 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider searcherFacotry) throws EngineException { - // There is no translog, so we can get it directly from the searcher - return getFromSearcher(get, searcherFacotry); - } - - @Override - public Translog getTranslog() { - throw new UnsupportedOperationException("shadow engines don't have translogs"); - } - - @Override - public List segments(boolean verbose) { - try (ReleasableLock lock = readLock.acquire()) { - Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose); - for (int i = 0; i < segmentsArr.length; i++) { - // hard code all segments as committed, because they are in - // order for the shadow replica to see them - segmentsArr[i].committed = true; - } - return Arrays.asList(segmentsArr); - } - } - - @Override - public void refresh(String source) throws EngineException { - // we obtain a read lock here, since we don't want a flush to happen while we are refreshing - // since it flushes the index as well (though, in terms of concurrency, we are allowed to do it) - try (ReleasableLock lock = readLock.acquire()) { - ensureOpen(); - searcherManager.maybeRefreshBlocking(); - } catch (AlreadyClosedException e) { - throw e; - } catch (Exception e) { - try { - failEngine("refresh failed", e); - } catch (Exception inner) { - e.addSuppressed(inner); - } - throw new RefreshFailedEngineException(shardId, e); - } - } - - @Override - public IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException { - throw new UnsupportedOperationException("Can not take snapshot from a shadow engine"); - } - - @Override - protected SearcherManager getSearcherManager() { - return searcherManager; - } - - @Override - protected void closeNoLock(String reason) { - if (isClosed.compareAndSet(false, true)) { - try { - logger.debug("shadow replica close searcher manager refCount: {}", store.refCount()); - IOUtils.close(searcherManager); - } catch (Exception e) { - logger.warn("shadow replica failed to close searcher manager", e); - } finally { - store.decRef(); - } - } - } - - @Override - protected SegmentInfos getLastCommittedSegmentInfos() { - return lastCommittedSegmentInfos; - } - - @Override - public long getIndexBufferRAMBytesUsed() { - // No IndexWriter nor version map - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public void writeIndexingBuffer() { - // No indexing buffer - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public void activateThrottling() { - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public void deactivateThrottling() { - throw new UnsupportedOperationException("ShadowEngine has no IndexWriter"); - } - - @Override - public SequenceNumbersService seqNoService() { - throw new UnsupportedOperationException("ShadowEngine doesn't track sequence numbers"); - } - - @Override - public boolean isThrottled() { - return false; - } - - @Override - public long getIndexThrottleTimeInMillis() { - return 0L; - } - - @Override - public Engine recoverFromTranslog() throws IOException { - throw new UnsupportedOperationException("can't recover on a shadow engine"); - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 4045d968c5c..f006f056f93 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.IndexOrDocValuesQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; @@ -286,9 +285,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder[{}], reason [{}]", state, newState, reason); IndexShardState previousState = state; state = newState; @@ -1921,9 +1923,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } /** - * Build {@linkplain RefreshListeners} for this shard. Protected so {@linkplain ShadowIndexShard} can override it to return null. + * Build {@linkplain RefreshListeners} for this shard. */ - protected RefreshListeners buildRefreshListeners() { + private RefreshListeners buildRefreshListeners() { return new RefreshListeners( indexSettings::getMaxRefreshListeners, () -> refresh("too_many_listeners"), diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java deleted file mode 100644 index 638c2fe2783..00000000000 --- a/core/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.shard; - -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.cache.IndexCache; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineFactory; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.merge.MergeStats; -import org.elasticsearch.index.seqno.SeqNoStats; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogStats; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -/** - * ShadowIndexShard extends {@link IndexShard} to add file synchronization - * from the primary when a flush happens. It also ensures that a replica being - * promoted to a primary causes the shard to fail, kicking off a re-allocation - * of the primary shard. - */ -public final class ShadowIndexShard extends IndexShard { - - public ShadowIndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, - MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, - @Nullable EngineFactory engineFactory, IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, - ThreadPool threadPool, BigArrays bigArrays, Engine.Warmer engineWarmer, - List searchOperationListeners) throws IOException { - super(shardRouting, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, - indexEventListener, wrapper, threadPool, bigArrays, engineWarmer, () -> { - }, searchOperationListeners, Collections.emptyList()); - } - - /** - * In addition to the regular accounting done in - * {@link IndexShard#updateRoutingEntry(ShardRouting)}, - * if this shadow replica needs to be promoted to a primary, the shard is - * failed in order to allow a new primary to be re-allocated. - */ - @Override - public void updateRoutingEntry(ShardRouting newRouting) throws IOException { - if (newRouting.primary()) {// becoming a primary - throw new IllegalStateException("can't promote shard to primary"); - } - super.updateRoutingEntry(newRouting); - } - - @Override - public MergeStats mergeStats() { - return new MergeStats(); - } - - @Override - public SeqNoStats seqNoStats() { - return null; - } - - @Override - public boolean canIndex() { - return false; - } - - @Override - protected Engine newEngine(EngineConfig config) { - assert this.shardRouting.primary() == false; - assert config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG; - return engineFactory.newReadOnlyEngine(config); - } - - @Override - protected RefreshListeners buildRefreshListeners() { - // ShadowEngine doesn't have a translog so it shouldn't try to support RefreshListeners. - return null; - } - - @Override - public boolean shouldFlush() { - // we don't need to flush since we don't write - all dominated by the primary - return false; - } - - @Override - public TranslogStats translogStats() { - return null; // shadow engine has no translog - } - - @Override - public void updateGlobalCheckpointOnReplica(long checkpoint) { - } - - @Override - public long getLocalCheckpoint() { - return -1; - } - - @Override - public long getGlobalCheckpoint() { - return -1; - } - - @Override - public void addRefreshListener(Translog.Location location, Consumer listener) { - throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog"); - } - - @Override - public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException { - throw new UnsupportedOperationException("can't snapshot the directory as the primary may change it underneath us"); - } - - @Override - protected void onNewEngine(Engine newEngine) { - // nothing to do here - the superclass sets the translog on some listeners but we don't have such a thing - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 37b728d43d6..f3597d7e5c9 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -331,14 +331,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent { } else if (writtenBy == null) { throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]"); } else if (checksum == null) { - if (physicalName.startsWith("segments_") - && writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) { - // its possible the checksum is null for segments_N files that belong to a shard with no data, - // so we will assign it _na_ for now and try to get the checksum from the file itself later - checksum = UNKNOWN_CHECKSUM; - } else { - throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); - } + throw new ElasticsearchParseException("missing checksum for name [" + name + "]"); } return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize); } diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index bf8e8466dae..fc605430066 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -28,7 +28,6 @@ import org.apache.lucene.store.NIOFSDirectory; import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.store.SimpleFSLockFactory; -import org.apache.lucene.store.SleepingLockWrapper; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -74,9 +73,6 @@ public class FsDirectoryService extends DirectoryService { Set preLoadExtensions = new HashSet<>( indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING)); wrapped = setPreload(wrapped, location, lockFactory, preLoadExtensions); - if (indexSettings.isOnSharedFilesystem()) { - wrapped = new SleepingLockWrapper(wrapped, 5000); - } return wrapped; } diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 51516c3dded..e6e46e00ac0 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -449,7 +449,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref boolean success = false; try { assert metadata.writtenBy() != null; - assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION); output = new LuceneVerifyingIndexOutput(metadata, output); success = true; } finally { @@ -468,7 +467,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException { assert metadata.writtenBy() != null; - assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION); return new VerifyingIndexInput(directory().openInput(filename, context)); } @@ -813,22 +811,14 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref maxVersion = version; } for (String file : info.files()) { - if (version.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) { - checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file))); - } else { - throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + version); - } + checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file))); } } if (maxVersion == null) { - maxVersion = StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION; + maxVersion = org.elasticsearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; } final String segmentsFile = segmentCommitInfos.getSegmentsFileName(); - if (maxVersion.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) { - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); - } else { - throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + maxVersion); - } + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); } catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we either know the index is corrupted or it's just not there throw ex; diff --git a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java index c284ad8313c..908063173c2 100644 --- a/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java +++ b/core/src/main/java/org/elasticsearch/index/store/StoreFileMetaData.java @@ -27,12 +27,11 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.lucene.Lucene; import java.io.IOException; +import java.text.ParseException; import java.util.Objects; public class StoreFileMetaData implements Writeable { - public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_5_0_0; - private final String name; // the actual file size on "disk", if compressed, the compressed size @@ -44,20 +43,11 @@ public class StoreFileMetaData implements Writeable { private final BytesRef hash; - public StoreFileMetaData(String name, long length, String checksum) { - this(name, length, checksum, FIRST_LUCENE_CHECKSUM_VERSION); - } - public StoreFileMetaData(String name, long length, String checksum, Version writtenBy) { this(name, length, checksum, writtenBy, null); } public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) { - // its possible here to have a _na_ checksum or an unsupported writtenBy version, if the - // file is a segments_N file, but that is fine in the case of a segments_N file because - // we handle that case upstream - assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) : - "index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy; this.name = Objects.requireNonNull(name, "name must not be null"); this.length = length; this.checksum = Objects.requireNonNull(checksum, "checksum must not be null"); @@ -72,8 +62,11 @@ public class StoreFileMetaData implements Writeable { name = in.readString(); length = in.readVLong(); checksum = in.readString(); - // TODO Why not Version.parse? - writtenBy = Lucene.parseVersionLenient(in.readString(), FIRST_LUCENE_CHECKSUM_VERSION); + try { + writtenBy = Version.parse(in.readString()); + } catch (ParseException e) { + throw new AssertionError(e); + } hash = in.readBytesRef(); } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index 7bf80cc1986..bde4438158c 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -424,11 +424,11 @@ public class IndicesService extends AbstractLifecycleComponent IndexingOperationListener... indexingOperationListeners) throws IOException { final Index index = indexMetaData.getIndex(); final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopeSetting); - logger.debug("creating Index [{}], shards [{}]/[{}{}] - reason [{}]", + logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]", indexMetaData.getIndex(), idxSettings.getNumberOfShards(), idxSettings.getNumberOfReplicas(), - idxSettings.isShadowReplicaIndex() ? "s" : "", reason); + reason); final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry); for (IndexingOperationListener operationListener : indexingOperationListeners) { @@ -732,16 +732,11 @@ public class IndicesService extends AbstractLifecycleComponent * @return true if the index can be deleted on this node */ public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings) { - // index contents can be deleted if the index is not on a shared file system, - // or if its on a shared file system but its an already closed index (so all - // its resources have already been relinquished) - if (indexSettings.isOnSharedFilesystem() == false || indexSettings.getIndexMetaData().getState() == IndexMetaData.State.CLOSE) { - final IndexService indexService = indexService(index); - if (indexService == null && nodeEnv.hasNodeFile()) { - return true; - } - } else { - logger.trace("{} skipping index directory deletion due to shadow replicas", index); + // index contents can be deleted if its an already closed index (so all its resources have + // already been relinquished) + final IndexService indexService = indexService(index); + if (indexService == null && nodeEnv.hasNodeFile()) { + return true; } return false; } @@ -789,7 +784,6 @@ public class IndicesService extends AbstractLifecycleComponent FOLDER_FOUND_CAN_DELETE, // shard data exists and can be deleted STILL_ALLOCATED, // the shard is still allocated / active on this node NO_FOLDER_FOUND, // the shards data locations do not exist - SHARED_FILE_SYSTEM, // the shard is located on shared and should not be deleted NO_LOCAL_STORAGE // node does not have local storage (see DiscoveryNode.nodeRequiresLocalStorage) } @@ -802,30 +796,25 @@ public class IndicesService extends AbstractLifecycleComponent public ShardDeletionCheckResult canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) { assert shardId.getIndex().equals(indexSettings.getIndex()); final IndexService indexService = indexService(shardId.getIndex()); - if (indexSettings.isOnSharedFilesystem() == false) { - if (nodeEnv.hasNodeFile()) { - final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id()); - if (isAllocated) { - return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard - } else if (indexSettings.hasCustomDataPath()) { - // lets see if it's on a custom path (return false if the shared doesn't exist) - // we don't need to delete anything that is not there - return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)) ? + if (nodeEnv.hasNodeFile()) { + final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id()); + if (isAllocated) { + return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard + } else if (indexSettings.hasCustomDataPath()) { + // lets see if it's on a custom path (return false if the shared doesn't exist) + // we don't need to delete anything that is not there + return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)) ? ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE : ShardDeletionCheckResult.NO_FOLDER_FOUND; - } else { - // lets see if it's path is available (return false if the shared doesn't exist) - // we don't need to delete anything that is not there - return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)) ? - ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE : - ShardDeletionCheckResult.NO_FOLDER_FOUND; - } } else { - return ShardDeletionCheckResult.NO_LOCAL_STORAGE; - } + // lets see if it's path is available (return false if the shared doesn't exist) + // we don't need to delete anything that is not there + return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)) ? + ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE : + ShardDeletionCheckResult.NO_FOLDER_FOUND; + } } else { - logger.trace("{} skipping shard directory deletion due to shadow replicas", shardId); - return ShardDeletionCheckResult.SHARED_FILE_SYSTEM; + return ShardDeletionCheckResult.NO_LOCAL_STORAGE; } } diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 2307a711714..663cdece6ac 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -403,20 +403,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple // state may result in a new shard being initialized while having the same allocation id as the currently started shard. logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting); indexService.removeShard(shardId.id(), "removing shard (stale copy)"); - } else { - // remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards - if (newShardRouting.recoverySource() != null && newShardRouting.recoverySource().getType() == Type.PEER) { - RecoveryState recoveryState = shard.recoveryState(); - final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, newShardRouting); - if (recoveryState.getSourceNode().equals(sourceNode) == false) { - if (recoveryTargetService.cancelRecoveriesForShard(shardId, "recovery source node changed")) { - // getting here means that the shard was still recovering - logger.debug("{} removing shard (recovery source changed), current [{}], global [{}], shard [{}])", - shardId, recoveryState.getSourceNode(), sourceNode, newShardRouting); - indexService.removeShard(shardId.id(), "removing shard (recovery source node changed)"); - } - } - } } } } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index e2113957690..93de86193b5 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -197,13 +197,8 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), targetAllocationId, transportService, request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime)); Supplier currentClusterStateVersionSupplier = () -> clusterService.state().getVersion(); - if (shard.indexSettings().isOnSharedFilesystem()) { - handler = new SharedFSRecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier, - this::delayNewRecoveries, settings); - } else { - handler = new RecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier, + handler = new RecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier, this::delayNewRecoveries, recoverySettings.getChunkSize().bytesAsInt(), settings); - } return handler; } diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 631c18de97f..a93cdd51e38 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -126,17 +126,6 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde } } - /** - * Cancel all ongoing recoveries for the given shard. - * - * @param reason reason for cancellation - * @param shardId shard ID for which to cancel recoveries - * @return {@code true} if a recovery was cancelled - */ - public boolean cancelRecoveriesForShard(ShardId shardId, String reason) { - return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason); - } - public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { // create a new recovery status, and process... final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); @@ -297,13 +286,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde */ private Store.MetadataSnapshot getStoreMetadataSnapshot(final RecoveryTarget recoveryTarget) { try { - if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) { - // we are not going to copy any files, so don't bother listing files, potentially running into concurrency issues with the - // primary changing files underneath us - return Store.MetadataSnapshot.EMPTY; - } else { - return recoveryTarget.indexShard().snapshotStoreMetadata(); - } + return recoveryTarget.indexShard().snapshotStoreMetadata(); } catch (final org.apache.lucene.index.IndexNotFoundException e) { // happens on an empty folder. no need to log logger.trace("{} shard folder empty, recovering all files", recoveryTarget); diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java deleted file mode 100644 index fdf0de32f2f..00000000000 --- a/core/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.indices.recovery; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.translog.Translog; - -import java.io.IOException; -import java.util.function.Function; -import java.util.function.Supplier; - -/** - * A recovery handler that skips phase one as well as sending the translog snapshot. - */ -public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { - - private final IndexShard shard; - private final StartRecoveryRequest request; - - SharedFSRecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, StartRecoveryRequest request, - Supplier currentClusterStateVersionSupplier, - Function delayNewRecoveries, Settings nodeSettings) { - super(shard, recoveryTarget, request, currentClusterStateVersionSupplier, delayNewRecoveries, -1, nodeSettings); - this.shard = shard; - this.request = request; - } - - @Override - public RecoveryResponse recoverToTarget() throws IOException { - boolean engineClosed = false; - try { - logger.trace("recovery [phase1]: skipping phase1 for shared filesystem"); - final long maxUnsafeAutoIdTimestamp = shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp(); - if (request.isPrimaryRelocation()) { - logger.debug("[phase1] closing engine on primary for shared filesystem recovery"); - try { - // if we relocate we need to close the engine in order to open a new - // IndexWriter on the other end of the relocation - engineClosed = true; - shard.flushAndCloseEngine(); - } catch (IOException e) { - logger.warn("close engine failed", e); - shard.failShard("failed to close engine (phase1)", e); - } - } - prepareTargetForTranslog(0, maxUnsafeAutoIdTimestamp); - finalizeRecovery(); - return response; - } catch (Exception e) { - if (engineClosed) { - // If the relocation fails then the primary is closed and can't be - // used anymore... (because it's closed) that's a problem, so in - // that case, fail the shard to reallocate a new IndexShard and - // create a new IndexWriter - logger.info("recovery failed for primary shadow shard, failing shard"); - // pass the failure as null, as we want to ensure the store is not marked as corrupted - shard.failShard("primary relocation failed on shared filesystem", e); - } else { - logger.info("recovery failed on shared filesystem", e); - } - throw e; - } - } - - @Override - protected int sendSnapshot(final long startingSeqNo, final Translog.Snapshot snapshot) { - logger.trace("skipping recovery of translog snapshot on shared filesystem"); - return 0; - } - -} diff --git a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 4f0ee0e11e4..9c9731bc155 100644 --- a/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/core/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -173,9 +173,6 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe case STILL_ALLOCATED: // nothing to do break; - case SHARED_FILE_SYSTEM: - // nothing to do - break; default: assert false : "unknown shard deletion check result: " + shardDeletionCheckResult; } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index 00e00b745a0..bf65f5b9441 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -406,6 +406,8 @@ public class Node implements Closeable { final Transport transport = networkModule.getTransportSupplier().get(); final TransportService transportService = newTransportService(settings, transport, threadPool, networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings()); + final SearchTransportService searchTransportService = new SearchTransportService(settings, + settingsModule.getClusterSettings(), transportService); final Consumer httpBind; final HttpServerTransport httpServerTransport; if (networkModule.isHttpEnabled()) { @@ -447,8 +449,7 @@ public class Node implements Closeable { b.bind(IndicesService.class).toInstance(indicesService); b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService, threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase())); - b.bind(SearchTransportService.class).toInstance(new SearchTransportService(settings, - settingsModule.getClusterSettings(), transportService)); + b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, bigArrays, scriptModule.getScriptService())); b.bind(Transport.class).toInstance(transport); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java new file mode 100644 index 00000000000..c15b2553e5d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestRemoteClusterInfoAction.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.cluster; + +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoRequest; +import org.elasticsearch.action.admin.cluster.remote.RemoteInfoResponse; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.RestBuilderListener; + +import java.io.IOException; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public final class RestRemoteClusterInfoAction extends BaseRestHandler { + + public RestRemoteClusterInfoAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(GET, "_remote/info", this); + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) + throws IOException { + return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(), + new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(RemoteInfoResponse response, XContentBuilder builder) throws Exception { + response.toXContent(builder, request); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } + @Override + public boolean canTripCircuitBreaker() { + return false; + } +} diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 8ce4ec0f8dc..e7451001238 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -190,18 +190,10 @@ public class RestShardsAction extends AbstractCatAction { table.addCell(shard.id()); IndexMetaData indexMeta = state.getState().getMetaData().getIndexSafe(shard.index()); - boolean usesShadowReplicas = false; - if (indexMeta != null) { - usesShadowReplicas = indexMeta.isIndexUsingShadowReplicas(); - } if (shard.primary()) { table.addCell("p"); } else { - if (usesShadowReplicas) { - table.addCell("s"); - } else { - table.addCell("r"); - } + table.addCell("r"); } table.addCell(shard.state()); table.addCell(commonStats == null ? null : commonStats.getDocs().getCount()); diff --git a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java index 5c76328610d..4edde7f9bc6 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/subphase/InnerHitsContext.java @@ -19,26 +19,18 @@ package org.elasticsearch.search.fetch.subphase; -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreScorer; -import org.apache.lucene.search.ConstantScoreWeight; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.DocValuesTermsQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.search.Weight; import org.apache.lucene.search.join.BitSetProducer; -import org.apache.lucene.util.BitSet; +import org.apache.lucene.search.join.ParentChildrenBlockJoinQuery; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; @@ -48,9 +40,9 @@ import org.elasticsearch.index.mapper.ObjectMapper; import org.elasticsearch.index.mapper.ParentFieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.UidFieldMapper; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SubSearchContext; @@ -131,7 +123,8 @@ public final class InnerHitsContext { } BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter); Query childFilter = childObjectMapper.nestedTypeFilter(); - Query q = Queries.filtered(query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext)); + int parentDocId = hitContext.readerContext().docBase + hitContext.docId(); + Query q = Queries.filtered(query(), new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId)); if (size() == 0) { return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0); @@ -156,120 +149,6 @@ public final class InnerHitsContext { } } - // A filter that only emits the nested children docs of a specific nested parent doc - static class NestedChildrenQuery extends Query { - - private final BitSetProducer parentFilter; - private final Query childFilter; - private final int docId; - private final LeafReader leafReader; - - NestedChildrenQuery(BitSetProducer parentFilter, Query childFilter, FetchSubPhase.HitContext hitContext) { - this.parentFilter = parentFilter; - this.childFilter = childFilter; - this.docId = hitContext.docId(); - this.leafReader = hitContext.readerContext().reader(); - } - - @Override - public boolean equals(Object obj) { - if (sameClassAs(obj) == false) { - return false; - } - NestedChildrenQuery other = (NestedChildrenQuery) obj; - return parentFilter.equals(other.parentFilter) - && childFilter.equals(other.childFilter) - && docId == other.docId - && leafReader.getCoreCacheKey() == other.leafReader.getCoreCacheKey(); - } - - @Override - public int hashCode() { - int hash = classHash(); - hash = 31 * hash + parentFilter.hashCode(); - hash = 31 * hash + childFilter.hashCode(); - hash = 31 * hash + docId; - hash = 31 * hash + leafReader.getCoreCacheKey().hashCode(); - return hash; - } - - @Override - public String toString(String field) { - return "NestedChildren(parent=" + parentFilter + ",child=" + childFilter + ")"; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { - final Weight childWeight = childFilter.createWeight(searcher, false); - return new ConstantScoreWeight(this) { - @Override - public Scorer scorer(LeafReaderContext context) throws IOException { - // Nested docs only reside in a single segment, so no need to evaluate all segments - if (!context.reader().getCoreCacheKey().equals(leafReader.getCoreCacheKey())) { - return null; - } - - // If docId == 0 then we a parent doc doesn't have child docs, because child docs are stored - // before the parent doc and because parent doc is 0 we can safely assume that there are no child docs. - if (docId == 0) { - return null; - } - - final BitSet parents = parentFilter.getBitSet(context); - final int firstChildDocId = parents.prevSetBit(docId - 1) + 1; - // A parent doc doesn't have child docs, so we can early exit here: - if (firstChildDocId == docId) { - return null; - } - - final Scorer childrenScorer = childWeight.scorer(context); - if (childrenScorer == null) { - return null; - } - DocIdSetIterator childrenIterator = childrenScorer.iterator(); - final DocIdSetIterator it = new DocIdSetIterator() { - - int doc = -1; - - @Override - public int docID() { - return doc; - } - - @Override - public int nextDoc() throws IOException { - return advance(doc + 1); - } - - @Override - public int advance(int target) throws IOException { - target = Math.max(firstChildDocId, target); - if (target >= docId) { - // We're outside the child nested scope, so it is done - return doc = NO_MORE_DOCS; - } else { - int advanced = childrenIterator.advance(target); - if (advanced >= docId) { - // We're outside the child nested scope, so it is done - return doc = NO_MORE_DOCS; - } else { - return doc = advanced; - } - } - } - - @Override - public long cost() { - return Math.min(childrenIterator.cost(), docId - firstChildDocId); - } - - }; - return new ConstantScoreScorer(this, score(), it); - } - }; - } - } - } public static final class ParentChildInnerHits extends BaseInnerHits { diff --git a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java index a896039f7a3..ddb4edec990 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortBuilder.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.SortField; import org.apache.lucene.util.BitSet; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoDistance; @@ -491,12 +490,11 @@ public class GeoDistanceSortBuilder extends SortBuilder @Override public SortFieldAndFormat build(QueryShardContext context) throws IOException { - final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0); // validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed // on 2.x created indexes GeoPoint[] localPoints = points.toArray(new GeoPoint[points.size()]); - if (!indexCreatedBeforeV2_0 && !GeoValidationMethod.isIgnoreMalformed(validation)) { + if (GeoValidationMethod.isIgnoreMalformed(validation) == false) { for (GeoPoint point : localPoints) { if (GeoUtils.isValidLatitude(point.lat()) == false) { throw new ElasticsearchParseException( diff --git a/core/src/test/java/org/elasticsearch/VersionTests.java b/core/src/test/java/org/elasticsearch/VersionTests.java index 35c36bee643..c84e069f80a 100644 --- a/core/src/test/java/org/elasticsearch/VersionTests.java +++ b/core/src/test/java/org/elasticsearch/VersionTests.java @@ -33,44 +33,43 @@ import java.util.Locale; import java.util.Map; import java.util.Set; -import static org.elasticsearch.Version.V_2_2_0; -import static org.elasticsearch.Version.V_5_0_0_alpha1; +import static org.elasticsearch.Version.V_5_3_0_UNRELEASED; +import static org.elasticsearch.Version.V_6_0_0_alpha1_UNRELEASED; import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.sameInstance; public class VersionTests extends ESTestCase { public void testVersionComparison() throws Exception { - assertThat(V_2_2_0.before(V_5_0_0_alpha1), is(true)); - assertThat(V_2_2_0.before(V_2_2_0), is(false)); - assertThat(V_5_0_0_alpha1.before(V_2_2_0), is(false)); + assertThat(V_5_3_0_UNRELEASED.before(V_6_0_0_alpha1_UNRELEASED), is(true)); + assertThat(V_5_3_0_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false)); + assertThat(V_6_0_0_alpha1_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false)); - assertThat(V_2_2_0.onOrBefore(V_5_0_0_alpha1), is(true)); - assertThat(V_2_2_0.onOrBefore(V_2_2_0), is(true)); - assertThat(V_5_0_0_alpha1.onOrBefore(V_2_2_0), is(false)); + assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_6_0_0_alpha1_UNRELEASED), is(true)); + assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(true)); + assertThat(V_6_0_0_alpha1_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(false)); - assertThat(V_2_2_0.after(V_5_0_0_alpha1), is(false)); - assertThat(V_2_2_0.after(V_2_2_0), is(false)); - assertThat(V_5_0_0_alpha1.after(V_2_2_0), is(true)); + assertThat(V_5_3_0_UNRELEASED.after(V_6_0_0_alpha1_UNRELEASED), is(false)); + assertThat(V_5_3_0_UNRELEASED.after(V_5_3_0_UNRELEASED), is(false)); + assertThat(V_6_0_0_alpha1_UNRELEASED.after(V_5_3_0_UNRELEASED), is(true)); - assertThat(V_2_2_0.onOrAfter(V_5_0_0_alpha1), is(false)); - assertThat(V_2_2_0.onOrAfter(V_2_2_0), is(true)); - assertThat(V_5_0_0_alpha1.onOrAfter(V_2_2_0), is(true)); + assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_6_0_0_alpha1_UNRELEASED), is(false)); + assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true)); + assertThat(V_6_0_0_alpha1_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true)); assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1"))); assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2"))); assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24"))); assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0"))); - assertThat(V_2_2_0, is(lessThan(V_5_0_0_alpha1))); - assertThat(V_2_2_0.compareTo(V_2_2_0), is(0)); - assertThat(V_5_0_0_alpha1, is(greaterThan(V_2_2_0))); + assertThat(V_5_3_0_UNRELEASED, is(lessThan(V_6_0_0_alpha1_UNRELEASED))); + assertThat(V_5_3_0_UNRELEASED.compareTo(V_5_3_0_UNRELEASED), is(0)); + assertThat(V_6_0_0_alpha1_UNRELEASED, is(greaterThan(V_5_3_0_UNRELEASED))); } public void testMin() { @@ -99,9 +98,11 @@ public class VersionTests extends ESTestCase { public void testMinimumIndexCompatibilityVersion() { assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED.minimumIndexCompatibilityVersion()); - assertEquals(Version.V_2_0_0, Version.V_5_0_0.minimumIndexCompatibilityVersion()); - assertEquals(Version.V_2_0_0, Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion()); - assertEquals(Version.V_2_0_0, Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), + Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion()); + assertEquals(Version.fromId(2000099), + Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion()); } public void testVersionConstantPresent() { @@ -155,7 +156,8 @@ public class VersionTests extends ESTestCase { public void testIndexCreatedVersion() { // an actual index has a IndexMetaData.SETTING_INDEX_UUID - final Version version = randomFrom(Version.V_2_0_0, Version.V_2_3_0, Version.V_5_0_0_alpha1); + final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2, + Version.V_5_2_0_UNRELEASED, Version.V_6_0_0_alpha1_UNRELEASED); assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build())); } @@ -230,7 +232,7 @@ public class VersionTests extends ESTestCase { }); assertSame(Version.CURRENT, Version.fromString(Version.CURRENT.toString())); - assertSame(Version.fromString("2.0.0-SNAPSHOT"), Version.fromString("2.0.0")); + assertEquals(Version.fromString("2.0.0-SNAPSHOT"), Version.fromId(2000099)); expectThrows(IllegalArgumentException.class, () -> { Version.fromString("5.0.0-SNAPSHOT"); @@ -325,8 +327,8 @@ public class VersionTests extends ESTestCase { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED)); - assertFalse(isCompatible(Version.V_2_0_0, Version.V_6_0_0_alpha1_UNRELEASED)); - assertFalse(isCompatible(Version.V_2_0_0, Version.V_5_0_0)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha1_UNRELEASED)); + assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0)); } public boolean isCompatible(Version left, Version right) { diff --git a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java index 24b9601ad4f..5786482e79e 100644 --- a/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/bulk/byscroll/AsyncBulkByScrollActionTests.java @@ -34,10 +34,10 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkItemResponse.Failure; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit; -import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit; +import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; @@ -199,7 +199,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { client.scrollsToReject = randomIntBetween(0, testRequest.getMaxRetries() - 1); DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff(); action.setScroll(scrollId()); - action.startNextScroll(timeValueNanos(System.nanoTime()), 0); + TimeValue now = timeValueNanos(System.nanoTime()); + action.startNextScroll(now, now, 0); assertBusy(() -> assertEquals(client.scrollsToReject + 1, client.scrollAttempts.get())); if (listener.isDone()) { Object result = listener.get(); @@ -213,7 +214,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { client.scrollsToReject = testRequest.getMaxRetries() + randomIntBetween(1, 100); DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff(); action.setScroll(scrollId()); - action.startNextScroll(timeValueNanos(System.nanoTime()), 0); + TimeValue now = timeValueNanos(System.nanoTime()); + action.startNextScroll(now, now, 0); assertBusy(() -> assertEquals(testRequest.getMaxRetries() + 1, client.scrollAttempts.get())); assertBusy(() -> assertTrue(listener.isDone())); ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get()); @@ -438,7 +440,9 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { // Set throttle to 1 request per second to make the math simpler testTask.rethrottle(1f); // Make the last batch look nearly instant but have 100 documents - action.startNextScroll(timeValueNanos(System.nanoTime()), 100); + TimeValue lastBatchStartTime = timeValueNanos(System.nanoTime()); + TimeValue now = timeValueNanos(lastBatchStartTime.nanos() + 1); + action.startNextScroll(lastBatchStartTime, now, 100); // So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish) assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L))); @@ -451,14 +455,13 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { if (randomBoolean()) { client.lastScroll.get().listener.onResponse(searchResponse); - // The delay is still 100ish seconds because there hasn't been much time between when we requested the bulk and when we got it. - assertThat(capturedDelay.get().seconds(), either(equalTo(100L)).or(equalTo(99L))); + assertEquals(99, capturedDelay.get().seconds()); } else { // Let's rethrottle between the starting the scroll and getting the response testTask.rethrottle(10f); client.lastScroll.get().listener.onResponse(searchResponse); // The delay uses the new throttle - assertThat(capturedDelay.get().seconds(), either(equalTo(10L)).or(equalTo(9L))); + assertEquals(9, capturedDelay.get().seconds()); } // Running the command ought to increment the delay counter on the task. @@ -483,7 +486,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { CountDownLatch successLatch = new CountDownLatch(1); DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff() { @Override - void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) { + void startNextScroll(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) { successLatch.countDown(); } }; @@ -574,7 +577,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { } public void testCancelBeforeStartNextScroll() throws Exception { - cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.startNextScroll(timeValueNanos(System.nanoTime()), 0)); + TimeValue now = timeValueNanos(System.nanoTime()); + cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.startNextScroll(now, now, 0)); } public void testCancelBeforeRefreshAndFinish() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java b/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java index c30e14c3020..4a602c11003 100644 --- a/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/main/MainResponseTests.java @@ -72,7 +72,7 @@ public class MainResponseTests extends ESTestCase { public void testToXContent() throws IOException { Build build = new Build("buildHash", "2016-11-15".toString(), true); - Version version = Version.V_2_4_5; + Version version = Version.CURRENT; MainResponse response = new MainResponse("nodeName", version, new ClusterName("clusterName"), "clusterUuid", build, true); XContentBuilder builder = XContentFactory.jsonBuilder(); response.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -81,11 +81,11 @@ public class MainResponseTests extends ESTestCase { + "\"cluster_name\":\"clusterName\"," + "\"cluster_uuid\":\"clusterUuid\"," + "\"version\":{" - + "\"number\":\"2.4.5\"," + + "\"number\":\"" + version.toString() + "\"," + "\"build_hash\":\"buildHash\"," + "\"build_date\":\"2016-11-15\"," + "\"build_snapshot\":true," - + "\"lucene_version\":\"5.5.2\"}," + + "\"lucene_version\":\"" + version.luceneVersion.toString() + "\"}," + "\"tagline\":\"You Know, for Search\"" + "}", builder.string()); } diff --git a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java index 15c735cafa6..d73b6709121 100644 --- a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterConnectionTests.java @@ -19,8 +19,13 @@ package org.elasticsearch.action.search; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.Build; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; +import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; @@ -33,25 +38,31 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.http.HttpInfo; import org.elasticsearch.mocksocket.MockServerSocket; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportConnectionListener; +import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; -import java.net.UnknownHostException; -import java.nio.channels.AlreadyConnectedException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -519,4 +530,187 @@ public class RemoteClusterConnectionTests extends ESTestCase { } } } + + private static void installNodeStatsHandler(TransportService service, DiscoveryNode...nodes) { + service.registerRequestHandler(NodesInfoAction.NAME, NodesInfoRequest::new, ThreadPool.Names.SAME, false, false, + (request, channel) -> { + List nodeInfos = new ArrayList<>(); + int port = 80; + for (DiscoveryNode node : nodes) { + HttpInfo http = new HttpInfo(new BoundTransportAddress(new TransportAddress[]{node.getAddress()}, + new TransportAddress(node.getAddress().address().getAddress(), port++)), 100); + nodeInfos.add(new NodeInfo(node.getVersion(), Build.CURRENT, node, null, null, null, null, null, null, http, null, + null, null)); + } + channel.sendResponse(new NodesInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList())); + }); + + } + + public void testGetConnectionInfo() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService transport1 = startTransport("seed_node", knownNodes, Version.CURRENT); + MockTransportService transport2 = startTransport("seed_node_1", knownNodes, Version.CURRENT); + MockTransportService transport3 = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { + DiscoveryNode node1 = transport1.getLocalDiscoNode(); + DiscoveryNode node2 = transport3.getLocalDiscoNode(); + DiscoveryNode node3 = transport2.getLocalDiscoNode(); + knownNodes.add(transport1.getLocalDiscoNode()); + knownNodes.add(transport3.getLocalDiscoNode()); + knownNodes.add(transport2.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + List seedNodes = Arrays.asList(node3, node1, node2); + Collections.shuffle(seedNodes, random()); + + try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + int maxNumConnections = randomIntBetween(1, 5); + try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", + seedNodes, service, maxNumConnections, n -> true)) { + // test no nodes connected + RemoteConnectionInfo remoteConnectionInfo = assertSerialization(getRemoteConnectionInfo(connection)); + assertNotNull(remoteConnectionInfo); + assertEquals(0, remoteConnectionInfo.numNodesConnected); + assertEquals(0, remoteConnectionInfo.seedNodes.size()); + assertEquals(0, remoteConnectionInfo.httpAddresses.size()); + assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); + assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); + updateSeedNodes(connection, seedNodes); + expectThrows(RemoteTransportException.class, () -> getRemoteConnectionInfo(connection)); + + for (MockTransportService s : Arrays.asList(transport1, transport2, transport3)) { + installNodeStatsHandler(s, node1, node2, node3); + } + + remoteConnectionInfo = getRemoteConnectionInfo(connection); + remoteConnectionInfo = assertSerialization(remoteConnectionInfo); + assertNotNull(remoteConnectionInfo); + assertEquals(connection.getNumNodesConnected(), remoteConnectionInfo.numNodesConnected); + assertEquals(Math.min(3, maxNumConnections), connection.getNumNodesConnected()); + assertEquals(3, remoteConnectionInfo.seedNodes.size()); + assertEquals(remoteConnectionInfo.httpAddresses.size(), Math.min(3, maxNumConnections)); + assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster); + assertEquals("test-cluster", remoteConnectionInfo.clusterAlias); + for (TransportAddress address : remoteConnectionInfo.httpAddresses) { + assertTrue("port range mismatch: " + address.getPort(), address.getPort() >= 80 && address.getPort() <= 90); + } + } + } + } + } + + public void testRemoteConnectionInfo() throws IOException { + RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats); + + RemoteConnectionInfo stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 4, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster_1", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 15)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 87)), + 4, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 4, 3, TimeValue.timeValueMinutes(325)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + + stats1 = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)), + 5, 3, TimeValue.timeValueMinutes(30)); + assertSerialization(stats1); + assertNotEquals(stats, stats1); + } + + private RemoteConnectionInfo assertSerialization(RemoteConnectionInfo info) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setVersion(Version.CURRENT); + info.writeTo(out); + StreamInput in = out.bytes().streamInput(); + in.setVersion(Version.CURRENT); + RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(in); + assertEquals(info, remoteConnectionInfo); + assertEquals(info.hashCode(), remoteConnectionInfo.hashCode()); + return randomBoolean() ? info : remoteConnectionInfo; + } + } + + public void testRenderConnectionInfoXContent() throws IOException { + RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80)), + 4, 3, TimeValue.timeValueMinutes(30)); + stats = assertSerialization(stats); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + stats.toXContent(builder, null); + builder.endObject(); + assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"http_addresses\":[\"0.0.0.0:80\"],\"connected\":true," + + "\"num_nodes_connected\":3,\"max_connections_per_cluster\":4,\"initial_connect_timeout\":\"30m\"}}", builder.string()); + + stats = new RemoteConnectionInfo("some_other_cluster", + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1), new TransportAddress(TransportAddress.META_ADDRESS,2)), + Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80), new TransportAddress(TransportAddress.META_ADDRESS,81)), + 2, 0, TimeValue.timeValueSeconds(30)); + stats = assertSerialization(stats); + builder = XContentFactory.jsonBuilder(); + builder.startObject(); + stats.toXContent(builder, null); + builder.endObject(); + assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"],\"http_addresses\":[\"0.0.0.0:80\",\"0.0.0.0:81\"]," + + "\"connected\":false,\"num_nodes_connected\":0,\"max_connections_per_cluster\":2,\"initial_connect_timeout\":\"30s\"}}", + builder.string()); + } + + private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception { + AtomicReference statsRef = new AtomicReference<>(); + AtomicReference exceptionRef = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + connection.getConnectionInfo(new ActionListener() { + @Override + public void onResponse(RemoteConnectionInfo remoteConnectionInfo) { + statsRef.set(remoteConnectionInfo); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + latch.await(); + if (exceptionRef.get() != null) { + throw exceptionRef.get(); + } + return statsRef.get(); + } } diff --git a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java index d0f0427e710..81ee9141e2b 100644 --- a/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/RemoteClusterServiceTests.java @@ -143,14 +143,14 @@ public class RemoteClusterServiceTests extends ESTestCase { assertTrue(service.isRemoteClusterRegistered("cluster_2")); assertFalse(service.isRemoteClusterRegistered("foo")); Map> perClusterIndices = service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", - "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, i -> false); + "cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo", "no*match:boo"}, i -> false); String[] localIndices = perClusterIndices.computeIfAbsent(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY, k -> Collections.emptyList()).toArray(new String[0]); assertNotNull(perClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY)); - assertArrayEquals(new String[]{"foo:bar", "foo"}, localIndices); + assertArrayEquals(new String[]{"foo:bar", "foo", "no*match:boo"}, localIndices); assertEquals(2, perClusterIndices.size()); - assertEquals(Arrays.asList("bar", "test"), perClusterIndices.get("cluster_1")); - assertEquals(Arrays.asList("foo:bar", "foo*"), perClusterIndices.get("cluster_2")); + assertEquals(Arrays.asList("bar", "test", "baz", "boo"), perClusterIndices.get("cluster_1")); + assertEquals(Arrays.asList("foo:bar", "foo*", "baz", "boo"), perClusterIndices.get("cluster_2")); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar", diff --git a/core/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java b/core/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java new file mode 100644 index 00000000000..2af2da7ba09 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/GroupedActionListenerTests.java @@ -0,0 +1,124 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class GroupedActionListenerTests extends ESTestCase { + + public void testNotifications() throws InterruptedException { + AtomicReference> resRef = new AtomicReference<>(); + ActionListener> result = new ActionListener>() { + @Override + public void onResponse(Collection integers) { + resRef.set(integers); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }; + final int groupSize = randomIntBetween(10, 1000); + AtomicInteger count = new AtomicInteger(); + Collection defaults = randomBoolean() ? Collections.singletonList(-1) : + Collections.emptyList(); + GroupedActionListener listener = new GroupedActionListener<>(result, groupSize, + defaults); + int numThreads = randomIntBetween(2, 5); + Thread[] threads = new Thread[numThreads]; + CyclicBarrier barrier = new CyclicBarrier(numThreads); + for (int i = 0; i < numThreads; i++) { + threads[i] = new Thread() { + @Override + public void run() { + try { + barrier.await(10, TimeUnit.SECONDS); + } catch (Exception e) { + throw new AssertionError(e); + } + int c = 0; + while((c = count.incrementAndGet()) <= groupSize) { + listener.onResponse(c-1); + } + } + }; + threads[i].start(); + } + for (Thread t : threads) { + t.join(); + } + assertNotNull(resRef.get()); + ArrayList list = new ArrayList<>(resRef.get()); + Collections.sort(list); + int expectedSize = groupSize + defaults.size(); + assertEquals(expectedSize, resRef.get().size()); + int expectedValue = defaults.isEmpty() ? 0 : -1; + for (int i = 0; i < expectedSize; i++) { + assertEquals(Integer.valueOf(expectedValue++), list.get(i)); + } + } + + public void testFailed() { + AtomicReference> resRef = new AtomicReference<>(); + AtomicReference excRef = new AtomicReference<>(); + + ActionListener> result = new ActionListener>() { + @Override + public void onResponse(Collection integers) { + resRef.set(integers); + } + + @Override + public void onFailure(Exception e) { + excRef.set(e); + } + }; + Collection defaults = randomBoolean() ? Collections.singletonList(-1) : + Collections.emptyList(); + int size = randomIntBetween(3, 4); + GroupedActionListener listener = new GroupedActionListener<>(result, size, + defaults); + listener.onResponse(0); + IOException ioException = new IOException(); + RuntimeException rtException = new RuntimeException(); + listener.onFailure(rtException); + listener.onFailure(ioException); + if (size == 4) { + listener.onResponse(2); + } + assertNotNull(excRef.get()); + assertEquals(rtException, excRef.get()); + assertEquals(1, excRef.get().getSuppressed().length); + assertEquals(ioException, excRef.get().getSuppressed()[0]); + assertNull(resRef.get()); + listener.onResponse(1); + assertNull(resRef.get()); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java index 7447e9fb559..182d2f8645d 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ReplicationOperationTests.java @@ -132,33 +132,6 @@ public class ReplicationOperationTests extends ESTestCase { assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints)); } - - public void testReplicationWithShadowIndex() throws Exception { - final String index = "test"; - final ShardId shardId = new ShardId(index, "_na_", 0); - - final ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); - final long primaryTerm = state.getMetaData().index(index).primaryTerm(0); - final IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId); - final ShardRouting primaryShard = indexShardRoutingTable.primaryShard(); - - Request request = new Request(shardId); - PlainActionFuture listener = new PlainActionFuture<>(); - final TestReplicationOperation op = new TestReplicationOperation(request, - new TestPrimary(primaryShard, primaryTerm), listener, false, - new TestReplicaProxy(), () -> state, logger, "test"); - op.execute(); - assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); - assertThat(request.processedOnReplicas, equalTo(Collections.emptySet())); - assertTrue("listener is not marked as done", listener.isDone()); - ShardInfo shardInfo = listener.actionGet().getShardInfo(); - assertThat(shardInfo.getFailed(), equalTo(0)); - assertThat(shardInfo.getFailures(), arrayWithSize(0)); - assertThat(shardInfo.getSuccessful(), equalTo(1)); - assertThat(shardInfo.getTotal(), equalTo(indexShardRoutingTable.getSize())); - } - - public void testDemotedPrimary() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -310,7 +283,7 @@ public class ReplicationOperationTests extends ESTestCase { final ShardRouting primaryShard = shardRoutingTable.primaryShard(); final TestReplicationOperation op = new TestReplicationOperation(request, new TestPrimary(primaryShard, primaryTerm), - listener, randomBoolean(), new TestReplicaProxy(), () -> state, logger, "test"); + listener, new TestReplicaProxy(), () -> state, logger, "test"); if (passesActiveShardCheck) { assertThat(op.checkActiveShardCount(), nullValue()); @@ -519,13 +492,14 @@ public class ReplicationOperationTests extends ESTestCase { class TestReplicationOperation extends ReplicationOperation { TestReplicationOperation(Request request, Primary primary, ActionListener listener, Replicas replicas, Supplier clusterStateSupplier) { - this(request, primary, listener, true, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test"); + this(request, primary, listener, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test"); } TestReplicationOperation(Request request, Primary primary, - ActionListener listener, boolean executeOnReplicas, - Replicas replicas, Supplier clusterStateSupplier, Logger logger, String opType) { - super(request, primary, listener, executeOnReplicas, replicas, clusterStateSupplier, logger, opType); + ActionListener listener, + Replicas replicas, Supplier clusterStateSupplier, + Logger logger, String opType) { + super(request, primary, listener, replicas, clusterStateSupplier, logger, opType); } } diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index abe0e9977dd..bf15974d3e5 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -497,8 +497,7 @@ public class TransportReplicationActionTests extends ESTestCase { createReplicatedOperation( Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + TransportReplicationAction.PrimaryShardReference primaryShardReference) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -550,8 +549,7 @@ public class TransportReplicationActionTests extends ESTestCase { createReplicatedOperation( Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + TransportReplicationAction.PrimaryShardReference primaryShardReference) { return new NoopReplicationOperation(request, actionListener) { public void execute() throws Exception { assertPhase(task, "primary"); @@ -650,35 +648,6 @@ public class TransportReplicationActionTests extends ESTestCase { assertEquals(0, shardFailedRequests.length); } - public void testShadowIndexDisablesReplication() throws Exception { - final String index = "test"; - final ShardId shardId = new ShardId(index, "_na_", 0); - - ClusterState state = stateWithActivePrimary(index, true, randomInt(5)); - MetaData.Builder metaData = MetaData.builder(state.metaData()); - Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings()); - settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true); - metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings)); - state = ClusterState.builder(state).metaData(metaData).build(); - setState(clusterService, state); - AtomicBoolean executed = new AtomicBoolean(); - ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard(); - action.new AsyncPrimaryAction(new Request(shardId), primaryShard.allocationId().getId(), - createTransportChannel(new PlainActionFuture<>()), null) { - @Override - protected ReplicationOperation> createReplicatedOperation( - Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { - assertFalse(executeOnReplicas); - assertFalse(executed.getAndSet(true)); - return new NoopReplicationOperation(request, actionListener); - } - - }.run(); - assertThat(executed.get(), equalTo(true)); - } - public void testSeqNoIsSetOnPrimary() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -738,8 +707,7 @@ public class TransportReplicationActionTests extends ESTestCase { createReplicatedOperation( Request request, ActionListener> actionListener, - TransportReplicationAction.PrimaryShardReference primaryShardReference, - boolean executeOnReplicas) { + TransportReplicationAction.PrimaryShardReference primaryShardReference) { assertIndexShardCounter(1); if (throwExceptionOnCreation) { throw new ElasticsearchException("simulated exception, during createReplicatedOperation"); @@ -1150,7 +1118,7 @@ public class TransportReplicationActionTests extends ESTestCase { class NoopReplicationOperation extends ReplicationOperation> { NoopReplicationOperation(Request request, ActionListener> listener) { - super(request, null, listener, true, null, null, TransportReplicationActionTests.this.logger, "noop"); + super(request, null, listener, null, null, TransportReplicationActionTests.this.logger, "noop"); } @Override diff --git a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java index 8a00a430dbc..07c5a7e157f 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchCliTests.java @@ -79,22 +79,19 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { false, output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")), (foreground, pidFile, quiet, esSettings) -> {}, - "foo" - ); + "foo"); runTest( ExitCodes.USAGE, false, output -> assertThat(output, containsString("Positional arguments not allowed, found [foo, bar]")), (foreground, pidFile, quiet, esSettings) -> {}, - "foo", "bar" - ); + "foo", "bar"); runTest( ExitCodes.USAGE, false, output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")), (foreground, pidFile, quiet, esSettings) -> {}, - "-E", "foo=bar", "foo", "-E", "baz=qux" - ); + "-E", "foo=bar", "foo", "-E", "baz=qux"); } public void testThatPidFileCanBeConfigured() throws Exception { @@ -157,18 +154,25 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase { assertThat(settings, hasEntry("foo", "bar")); assertThat(settings, hasEntry("baz", "qux")); }, - "-Efoo=bar", "-E", "baz=qux" - ); + "-Efoo=bar", "-E", "baz=qux"); } public void testElasticsearchSettingCanNotBeEmpty() throws Exception { runTest( ExitCodes.USAGE, false, - output -> assertThat(output, containsString("Setting [foo] must not be empty")), + output -> assertThat(output, containsString("setting [foo] must not be empty")), (foreground, pidFile, quiet, esSettings) -> {}, - "-E", "foo=" - ); + "-E", "foo="); + } + + public void testElasticsearchSettingCanNotBeDuplicated() throws Exception { + runTest( + ExitCodes.USAGE, + false, + output -> assertThat(output, containsString("setting [foo] already set, saw [bar] and [baz]")), + (foreground, pidFile, quiet, initialEnv) -> {}, + "-E", "foo=bar", "-E", "foo=baz"); } public void testUnknownOption() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/IpFieldBwCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/IpFieldBwCompatIT.java deleted file mode 100644 index e1aa8d1425d..00000000000 --- a/core/src/test/java/org/elasticsearch/bwcompat/IpFieldBwCompatIT.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.bwcompat; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; - -import java.util.Arrays; -import java.util.Collection; - -import org.elasticsearch.Version; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.aggregations.AggregationBuilders; -import org.elasticsearch.search.aggregations.bucket.range.Range; -import org.elasticsearch.search.aggregations.bucket.terms.Terms; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalSettingsPlugin; - -@ESIntegTestCase.SuiteScopeTestCase -public class IpFieldBwCompatIT extends ESIntegTestCase { - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(InternalSettingsPlugin.class); // uses index.merge.enabled - } - - @Override - public void setupSuiteScopeCluster() throws Exception { - assertAcked(prepareCreate("old_index") - .setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_3.id) - .addMapping("type", "ip_field", "type=ip")); - assertAcked(prepareCreate("new_index") - .addMapping("type", "ip_field", "type=ip")); - - indexRandom(true, - client().prepareIndex("old_index", "type", "1").setSource("ip_field", "127.0.0.1"), - client().prepareIndex("new_index", "type", "1").setSource("ip_field", "127.0.0.1"), - client().prepareIndex("new_index", "type", "2").setSource("ip_field", "::1")); - } - - public void testSort() { - SearchResponse response = client().prepareSearch("old_index", "new_index") - .addSort(SortBuilders.fieldSort("ip_field")).get(); - assertNoFailures(response); - assertEquals(3, response.getHits().getTotalHits()); - assertEquals("::1", response.getHits().getAt(0).getSortValues()[0]); - assertEquals("127.0.0.1", response.getHits().getAt(1).getSortValues()[0]); - assertEquals("127.0.0.1", response.getHits().getAt(2).getSortValues()[0]); - } - - public void testRangeAgg() { - SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(AggregationBuilders.ipRange("ip_range").field("ip_field") - .addMaskRange("127.0.0.1/16") - .addMaskRange("::1/64")).get(); - assertNoFailures(response); - assertEquals(3, response.getHits().getTotalHits()); - Range range = response.getAggregations().get("ip_range"); - assertEquals(2, range.getBuckets().size()); - assertEquals("::1/64", range.getBuckets().get(0).getKeyAsString()); - assertEquals(3, range.getBuckets().get(0).getDocCount()); - assertEquals("127.0.0.1/16", range.getBuckets().get(1).getKeyAsString()); - assertEquals(2, range.getBuckets().get(1).getDocCount()); - } - - public void testTermsAgg() { - SearchResponse response = client().prepareSearch("old_index", "new_index") - .addAggregation(AggregationBuilders.terms("ip_terms").field("ip_field")).get(); - assertNoFailures(response); - assertEquals(3, response.getHits().getTotalHits()); - Terms terms = response.getAggregations().get("ip_terms"); - assertEquals(2, terms.getBuckets().size()); - assertEquals(2, terms.getBucketByKey("127.0.0.1").getDocCount()); - assertEquals(1, terms.getBucketByKey("::1").getDocCount()); - } -} diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 553ab15d670..1d6a634a877 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -316,13 +316,11 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { ElasticsearchAssertions.assertNoFailures(searchRsp); assertEquals(numDocs, searchRsp.getHits().getTotalHits()); GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(indexName).get(); - Version versionCreated = Version.fromId(Integer.parseInt(getSettingsResponse.getSetting(indexName, "index.version.created"))); - if (versionCreated.onOrAfter(Version.V_2_4_0)) { - searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.existsQuery("field.with.dots")); - searchRsp = searchReq.get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertEquals(numDocs, searchRsp.getHits().getTotalHits()); - } + searchReq = client().prepareSearch(indexName) + .setQuery(QueryBuilders.existsQuery("field.with.dots")); + searchRsp = searchReq.get(); + ElasticsearchAssertions.assertNoFailures(searchRsp); + assertEquals(numDocs, searchRsp.getHits().getTotalHits()); } boolean findPayloadBoostInExplanation(Explanation expl) { diff --git a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index af8c758b5ed..942d7a222ec 100644 --- a/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -134,53 +134,6 @@ public class DiskUsageTests extends ESTestCase { assertEquals(test1Path.getParent().getParent().getParent().toAbsolutePath().toString(), routingToPath.get(test_1)); } - public void testFillShardsWithShadowIndices() { - final Index index = new Index("non-shadow", "0xcafe0000"); - ShardRouting s0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - s0 = ShardRoutingHelper.initialize(s0, "node1"); - s0 = ShardRoutingHelper.moveToStarted(s0); - Path i0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0"); - CommonStats commonStats0 = new CommonStats(); - commonStats0.store = new StoreStats(100); - final Index index2 = new Index("shadow", "0xcafe0001"); - ShardRouting s1 = ShardRouting.newUnassigned(new ShardId(index2, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); - s1 = ShardRoutingHelper.initialize(s1, "node2"); - s1 = ShardRoutingHelper.moveToStarted(s1); - Path i1Path = createTempDir().resolve("indices").resolve(index2.getUUID()).resolve("0"); - CommonStats commonStats1 = new CommonStats(); - commonStats1.store = new StoreStats(1000); - ShardStats[] stats = new ShardStats[] { - new ShardStats(s0, new ShardPath(false, i0Path, i0Path, s0.shardId()), commonStats0 , null, null), - new ShardStats(s1, new ShardPath(false, i1Path, i1Path, s1.shardId()), commonStats1 , null, null) - }; - ImmutableOpenMap.Builder shardSizes = ImmutableOpenMap.builder(); - ImmutableOpenMap.Builder routingToPath = ImmutableOpenMap.builder(); - ClusterState state = ClusterState.builder(new ClusterName("blarg")) - .version(0) - .metaData(MetaData.builder() - .put(IndexMetaData.builder("non-shadow") - .settings(Settings.builder() - .put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0000") - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(0)) - .put(IndexMetaData.builder("shadow") - .settings(Settings.builder() - .put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0001") - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) - .numberOfShards(1) - .numberOfReplicas(0))) - .build(); - logger.info("--> calling buildShardLevelInfo with state: {}", state); - InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, state); - assertEquals(2, shardSizes.size()); - assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s0))); - assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s1))); - assertEquals(100L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s0)).longValue()); - assertEquals(0L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s1)).longValue()); - } - public void testFillDiskUsage() { ImmutableOpenMap.Builder newLeastAvaiableUsages = ImmutableOpenMap.builder(); ImmutableOpenMap.Builder newMostAvaiableUsages = ImmutableOpenMap.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java new file mode 100644 index 00000000000..d6c8707c1d7 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/ClusterNameExpressionResolverTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +public class ClusterNameExpressionResolverTests extends ESTestCase { + + private ClusterNameExpressionResolver clusterNameResolver = new ClusterNameExpressionResolver(Settings.EMPTY); + private static final Set remoteClusters = new HashSet<>(); + + static { + remoteClusters.add("cluster1"); + remoteClusters.add("cluster2"); + remoteClusters.add("totallyDifferent"); + } + + public void testExactMatch() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totallyDifferent"); + assertEquals(new HashSet<>(Arrays.asList("totallyDifferent")), new HashSet<>(clusters)); + } + + public void testNoWildCardNoMatch() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totallyDifferent2"); + assertTrue(clusters.isEmpty()); + } + + public void testWildCardNoMatch() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totally*2"); + assertTrue(clusters.isEmpty()); + } + + public void testSimpleWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "*"); + assertEquals(new HashSet<>(Arrays.asList("cluster1", "cluster2", "totallyDifferent")), new HashSet<>(clusters)); + } + + public void testSuffixWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "cluster*"); + assertEquals(new HashSet<>(Arrays.asList("cluster1", "cluster2")), new HashSet<>(clusters)); + } + + public void testPrefixWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "*Different"); + assertEquals(new HashSet<>(Arrays.asList("totallyDifferent")), new HashSet<>(clusters)); + } + + public void testMiddleWildCard() { + List clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "clu*1"); + assertEquals(new HashSet<>(Arrays.asList("cluster1")), new HashSet<>(clusters)); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java deleted file mode 100644 index 0a39168c853..00000000000 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingBackwardCompatibilityTests.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.cluster.routing; - - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; - -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.util.Arrays; - -public class RoutingBackwardCompatibilityTests extends ESTestCase { - - public void testBackwardCompatibility() throws Exception { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(RoutingBackwardCompatibilityTests.class - .getResourceAsStream("/org/elasticsearch/cluster/routing/shard_routes.txt"), "UTF-8"))) { - for (String line = reader.readLine(); line != null; line = reader.readLine()) { - if (line.startsWith("#")) { // comment - continue; - } - String[] parts = line.split("\t"); - assertEquals(Arrays.toString(parts), 7, parts.length); - final String index = parts[0]; - final int numberOfShards = Integer.parseInt(parts[1]); - final String type = parts[2]; - final String id = parts[3]; - final String routing = "null".equals(parts[4]) ? null : parts[4]; - final int pre20ExpectedShardId = Integer.parseInt(parts[5]); // not needed anymore - old hashing is gone - final int currentExpectedShard = Integer.parseInt(parts[6]); - - OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, - ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); - for (Version version : VersionUtils.allReleasedVersions()) { - if (version.onOrAfter(Version.V_2_0_0) == false) { - // unsupported version, no need to test - continue; - } - final Settings settings = settings(version).build(); - IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(settings).numberOfShards(numberOfShards) - .numberOfReplicas(randomInt(3)).build(); - MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false); - RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build(); - ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData).routingTable(routingTable).build(); - final int shardId = operationRouting.indexShards(clusterState, index, id, routing).shardId().getId(); - assertEquals(currentExpectedShard, shardId); - } - } - } - } -} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index 74d3dda8e36..ee9d69a2205 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -33,11 +33,14 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; @@ -84,4 +87,69 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase { assertThat(shardRouting.currentNodeId(), equalTo("node2")); assertThat(shardRouting.relocatingNodeId(), nullValue()); } + + public void testRelocatingPrimariesWithInitializingReplicas() { + AllocationService allocation = createAllocationService(); + + logger.info("--> building initial cluster state"); + AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing()); + AllocationId replicaId = AllocationId.newInitializing(); + boolean relocatingReplica = randomBoolean(); + if (relocatingReplica) { + replicaId = AllocationId.newRelocation(replicaId); + } + + final IndexMetaData indexMetaData = IndexMetaData.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, + relocatingReplica ? Sets.newHashSet(primaryId.getId(), replicaId.getId()) : Sets.newHashSet(primaryId.getId())) + .build(); + final Index index = indexMetaData.getIndex(); + ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))) + .metaData(MetaData.builder().put(indexMetaData, false)); + + final ShardRouting relocatingPrimary = TestShardRouting.newShardRouting( + new ShardId(index, 0), "node1", "node2", true, ShardRoutingState.RELOCATING, primaryId); + final ShardRouting replica = TestShardRouting.newShardRouting( + new ShardId(index, 0), "node3", relocatingReplica ? "node4" : null, false, + relocatingReplica ? ShardRoutingState.RELOCATING : ShardRoutingState.INITIALIZING, replicaId); + + stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index) + .addIndexShard(new IndexShardRoutingTable.Builder(relocatingPrimary.shardId()) + .addShard(relocatingPrimary) + .addShard(replica) + .build())) + .build()); + + + ClusterState state = stateBuilder.build(); + + logger.info("--> test starting of relocating primary shard with initializing / relocating replica"); + ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(relocatingPrimary.getTargetRelocatingShard())); + assertNotEquals(newState, state); + assertTrue(newState.routingTable().index("test").allPrimaryShardsActive()); + ShardRouting startedReplica = newState.routingTable().index("test").shard(0).replicaShards().get(0); + if (relocatingReplica) { + assertTrue(startedReplica.relocating()); + assertEquals(replica.currentNodeId(), startedReplica.currentNodeId()); + assertEquals(replica.relocatingNodeId(), startedReplica.relocatingNodeId()); + assertEquals(replica.allocationId().getId(), startedReplica.allocationId().getId()); + assertNotEquals(replica.allocationId().getRelocationId(), startedReplica.allocationId().getRelocationId()); + } else { + assertTrue(startedReplica.initializing()); + assertEquals(replica.currentNodeId(), startedReplica.currentNodeId()); + assertNotEquals(replica.allocationId().getId(), startedReplica.allocationId().getId()); + } + + logger.info("--> test starting of relocating primary shard together with initializing / relocating replica"); + List startedShards = new ArrayList<>(); + startedShards.add(relocatingPrimary.getTargetRelocatingShard()); + startedShards.add(relocatingReplica ? replica.getTargetRelocatingShard() : replica); + Collections.shuffle(startedShards, random()); + newState = allocation.applyStartedShards(state, startedShards); + assertNotEquals(newState, state); + assertTrue(newState.routingTable().index("test").shard(0).allShardsStarted()); + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java new file mode 100644 index 00000000000..9044103e43b --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/AddFileKeyStoreCommandTests.java @@ -0,0 +1,149 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Map; + +import org.elasticsearch.cli.Command; +import org.elasticsearch.cli.ExitCodes; +import org.elasticsearch.cli.Terminal; +import org.elasticsearch.cli.UserException; +import org.elasticsearch.env.Environment; + +import static org.hamcrest.Matchers.containsString; + +public class AddFileKeyStoreCommandTests extends KeyStoreCommandTestCase { + @Override + protected Command newCommand() { + return new AddFileKeyStoreCommand() { + @Override + protected Environment createEnv(Terminal terminal, Map settings) { + return env; + } + }; + } + + private Path createRandomFile() throws IOException { + int length = randomIntBetween(10, 20); + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); + } + Path file = env.configFile().resolve("randomfile"); + Files.write(file, bytes); + return file; + } + + private void addFile(KeyStoreWrapper keystore, String setting, Path file) throws Exception { + keystore.setFile(setting, Files.readAllBytes(file)); + keystore.save(env.configFile()); + } + + public void testMissing() throws Exception { + UserException e = expectThrows(UserException.class, this::execute); + assertEquals(ExitCodes.DATA_ERROR, e.exitCode); + assertThat(e.getMessage(), containsString("keystore not found")); + } + + public void testOverwritePromptDefault() throws Exception { + Path file = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file); + terminal.addTextInput(""); + execute("foo", "path/dne"); + assertSecureFile("foo", file); + } + + public void testOverwritePromptExplicitNo() throws Exception { + Path file = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file); + terminal.addTextInput("n"); // explicit no + execute("foo", "path/dne"); + assertSecureFile("foo", file); + } + + public void testOverwritePromptExplicitYes() throws Exception { + Path file1 = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file1); + terminal.addTextInput("y"); + Path file2 = createRandomFile(); + execute("foo", file2.toString()); + assertSecureFile("foo", file2); + } + + public void testOverwriteForceShort() throws Exception { + Path file1 = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file1); + Path file2 = createRandomFile(); + execute("-f", "foo", file2.toString()); + assertSecureFile("foo", file2); + } + + public void testOverwriteForceLong() throws Exception { + Path file1 = createRandomFile(); + KeyStoreWrapper keystore = createKeystore(""); + addFile(keystore, "foo", file1); + Path file2 = createRandomFile(); + execute("--force", "foo", file2.toString()); + assertSecureFile("foo", file2); + } + + public void testForceNonExistent() throws Exception { + createKeystore(""); + Path file = createRandomFile(); + execute("--force", "foo", file.toString()); + assertSecureFile("foo", file); + } + + public void testMissingSettingName() throws Exception { + createKeystore(""); + UserException e = expectThrows(UserException.class, this::execute); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertThat(e.getMessage(), containsString("Missing setting name")); + } + + public void testMissingFileName() throws Exception { + createKeystore(""); + UserException e = expectThrows(UserException.class, () -> execute("foo")); + assertEquals(ExitCodes.USAGE, e.exitCode); + assertThat(e.getMessage(), containsString("Missing file name")); + } + + public void testFileDNE() throws Exception { + createKeystore(""); + UserException e = expectThrows(UserException.class, () -> execute("foo", "path/dne")); + assertEquals(ExitCodes.IO_ERROR, e.exitCode); + assertThat(e.getMessage(), containsString("File [path/dne] does not exist")); + } + + public void testExtraArguments() throws Exception { + createKeystore(""); + Path file = createRandomFile(); + UserException e = expectThrows(UserException.class, () -> execute("foo", file.toString(), "bar")); + assertEquals(e.getMessage(), ExitCodes.USAGE, e.exitCode); + assertThat(e.getMessage(), containsString("Unrecognized extra arguments [bar]")); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java index ef732c1e29c..11c3f107fe7 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/AddStringKeyStoreCommandTests.java @@ -127,7 +127,7 @@ public class AddStringKeyStoreCommandTests extends KeyStoreCommandTestCase { assertEquals("String value must contain only ASCII", e.getMessage()); } - public void testNpe() throws Exception { + public void testMissingSettingName() throws Exception { createKeystore(""); terminal.addTextInput(""); UserException e = expectThrows(UserException.class, this::execute); diff --git a/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java b/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java index 8584d4d1555..5d4741c7291 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java +++ b/core/src/test/java/org/elasticsearch/common/settings/CreateKeyStoreCommandTests.java @@ -47,7 +47,7 @@ public class CreateKeyStoreCommandTests extends KeyStoreCommandTestCase { } public void testNotPosix() throws Exception { - setupEnv(false); + env = setupEnv(false, fileSystems); execute(); Path configDir = env.configFile(); assertNotNull(KeyStoreWrapper.load(configDir)); diff --git a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java index 1e4d24a344e..500b7b627b8 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java @@ -20,7 +20,9 @@ package org.elasticsearch.common.settings; import java.io.IOException; +import java.io.InputStream; import java.nio.file.FileSystem; +import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -53,10 +55,10 @@ public abstract class KeyStoreCommandTestCase extends CommandTestCase { @Before public void setupEnv() throws IOException { - setupEnv(true); // default to posix, but tests may call setupEnv(false) to overwrite + env = setupEnv(true, fileSystems); // default to posix, but tests may call setupEnv(false) to overwrite } - void setupEnv(boolean posix) throws IOException { + static Environment setupEnv(boolean posix, List fileSystems) throws IOException { final Configuration configuration; if (posix) { configuration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build(); @@ -68,7 +70,7 @@ public abstract class KeyStoreCommandTestCase extends CommandTestCase { PathUtilsForTesting.installMock(fs); // restored by restoreFileSystem in ESTestCase Path home = fs.getPath("/", "test-home"); Files.createDirectories(home.resolve("config")); - env = new Environment(Settings.builder().put("path.home", home).build()); + return new Environment(Settings.builder().put("path.home", home).build()); } KeyStoreWrapper createKeystore(String password, String... settings) throws Exception { @@ -94,4 +96,28 @@ public abstract class KeyStoreCommandTestCase extends CommandTestCase { void assertSecureString(KeyStoreWrapper keystore, String setting, String value) throws Exception { assertEquals(value, keystore.getString(setting).toString()); } + + void assertSecureFile(String setting, Path file) throws Exception { + assertSecureFile(loadKeystore(""), setting, file); + } + + void assertSecureFile(KeyStoreWrapper keystore, String setting, Path file) throws Exception { + byte[] expectedBytes = Files.readAllBytes(file); + try (InputStream input = keystore.getFile(setting)) { + for (int i = 0; i < expectedBytes.length; ++i) { + int got = input.read(); + int expected = Byte.toUnsignedInt(expectedBytes[i]); + if (got < 0) { + fail("Got EOF from keystore stream at position " + i + " but expected 0x" + Integer.toHexString(expected)); + } + assertEquals("Byte " + i, expected, got); + } + int eof = input.read(); + if (eof != -1) { + fail("Found extra bytes in file stream from keystore, expected " + expectedBytes.length + + " bytes but found 0x" + Integer.toHexString(eof)); + } + } + + } } diff --git a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java new file mode 100644 index 00000000000..0b42eb59f82 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.FileSystem; +import java.util.ArrayList; +import java.util.List; + +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +public class KeyStoreWrapperTests extends ESTestCase { + + Environment env; + List fileSystems = new ArrayList<>(); + + @After + public void closeMockFileSystems() throws IOException { + IOUtils.close(fileSystems); + } + + @Before + public void setupEnv() throws IOException { + env = KeyStoreCommandTestCase.setupEnv(true, fileSystems); + } + + public void testFileSettingExhaustiveBytes() throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.create(new char[0]); + byte[] bytes = new byte[256]; + for (int i = 0; i < 256; ++i) { + bytes[i] = (byte)i; + } + keystore.setFile("foo", bytes); + keystore.save(env.configFile()); + keystore = KeyStoreWrapper.load(env.configFile()); + keystore.decrypt(new char[0]); + try (InputStream stream = keystore.getFile("foo")) { + for (int i = 0; i < 256; ++i) { + int got = stream.read(); + if (got < 0) { + fail("Expected 256 bytes but read " + i); + } + assertEquals(i, got); + } + assertEquals(-1, stream.read()); // nothing left + } + } +} diff --git a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java index bf10c117b13..308ae0eb350 100644 --- a/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/IndexFolderUpgraderTests.java @@ -68,14 +68,13 @@ public class IndexFolderUpgraderTests extends ESTestCase { public void testUpgradeCustomDataPath() throws IOException { Path customPath = createTempDir(); final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -97,14 +96,13 @@ public class IndexFolderUpgraderTests extends ESTestCase { public void testPartialUpgradeCustomDataPath() throws IOException { Path customPath = createTempDir(); final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build(); try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString()) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) @@ -136,14 +134,13 @@ public class IndexFolderUpgraderTests extends ESTestCase { } public void testUpgrade() throws IOException { - final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + final Settings nodeSettings = Settings.EMPTY; try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID()); Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); @@ -159,8 +156,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { } public void testUpgradeIndices() throws IOException { - final Settings nodeSettings = Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build(); + final Settings nodeSettings = Settings.EMPTY; try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) { Map> indexSettingsMap = new HashMap<>(); for (int i = 0; i < randomIntBetween(2, 5); i++) { @@ -168,7 +164,7 @@ public class IndexFolderUpgraderTests extends ESTestCase { Settings settings = Settings.builder() .put(nodeSettings) .put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5)) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) .build(); diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 9c11ae6b23f..f067212caaf 100644 --- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -380,11 +380,10 @@ public class NodeEnvironmentTests extends ESTestCase { assertThat("index paths uses the regular template", env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID()))); - IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), - Settings.builder().put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false).build()); + IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), Settings.builder().build()); assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid))); - assertThat(env.resolveCustomLocation(s3, sid), equalTo(PathUtils.get("/tmp/foo/" + index.getUUID() + "/0"))); + assertThat(env.resolveCustomLocation(s3, sid), equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0"))); assertThat("shard paths with a custom data_path should contain only regular paths", env.availableShardPaths(sid), diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 6f79922075b..d47221f9e31 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -330,26 +330,10 @@ public class GatewayIndexStateIT extends ESIntegTestCase { final int numNodes = 2; final List nodes; - if (randomBoolean()) { - // test with a regular index - logger.info("--> starting a cluster with " + numNodes + " nodes"); - nodes = internalCluster().startNodes(numNodes); - logger.info("--> create an index"); - createIndex(indexName); - } else { - // test with a shadow replica index - final Path dataPath = createTempDir(); - logger.info("--> created temp data path for shadow replicas [{}]", dataPath); - logger.info("--> starting a cluster with " + numNodes + " nodes"); - final Settings nodeSettings = Settings.builder() - .put("node.add_lock_id_to_custom_path", false) - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString()) - .put("index.store.fs.fs_lock", randomFrom("native", "simple")) - .build(); - nodes = internalCluster().startNodes(numNodes, nodeSettings); - logger.info("--> create a shadow replica index"); - createShadowReplicaIndex(indexName, dataPath, numNodes - 1); - } + logger.info("--> starting a cluster with " + numNodes + " nodes"); + nodes = internalCluster().startNodes(numNodes); + logger.info("--> create an index"); + createIndex(indexName); logger.info("--> waiting for green status"); ensureGreen(); @@ -535,23 +519,4 @@ public class GatewayIndexStateIT extends ESIntegTestCase { + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey())); assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L); } - - - /** - * Creates a shadow replica index and asserts that the index creation was acknowledged. - * Can only be invoked on a cluster where each node has been configured with shared data - * paths and the other necessary settings for shadow replicas. - */ - private void createShadowReplicaIndex(final String name, final Path dataPath, final int numReplicas) { - assert Files.exists(dataPath); - assert numReplicas >= 0; - final Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .build(); - assertAcked(prepareCreate(name).setSettings(idxSettings).get()); - } - } diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index ca330d9d9df..c09c92a7041 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -72,16 +72,6 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { private final DiscoveryNode node3 = newNode("node3"); private TestAllocator testAllocator; - - /** - * needed due to random usage of {@link IndexMetaData#INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING}. removed once - * shadow replicas are removed. - */ - @Override - protected boolean enableWarningsCheck() { - return false; - } - @Before public void buildTestAllocator() { this.testAllocator = new TestAllocator(); @@ -401,79 +391,6 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } - /** - * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation - * deciders say yes, we allocate to that node. - */ - public void testRecoverOnAnyNode() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), "allocId"); - testAllocator.addData(node1, "allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.RED); - } - - /** - * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation - * deciders say throttle, we add it to ignored shards. - */ - public void testRecoverOnAnyNodeThrottle() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(throttleAllocationDeciders(), "allocId"); - testAllocator.addData(node1, "allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); - } - - /** - * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy but allocation - * deciders say no, we still allocate to that node. - */ - public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders(), "allocId"); - testAllocator.addData(node1, "allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); - assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.RED); - } - - /** - * Tests that when recovering using "recover_on_any_node" and we don't find a node with a shard copy we let - * BalancedShardAllocator assign the shard - */ - public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() { - RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), "allocId"); - testAllocator.addData(node1, null, randomBoolean()); - testAllocator.allocateUnassigned(allocation); - assertThat(allocation.routingNodesChanged(), equalTo(false)); - assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); - assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); - assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); - } - - private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders, String... allocIds) { - MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true)) - .numberOfShards(1).numberOfReplicas(0).putInSyncAllocationIds(0, Sets.newHashSet(allocIds))) - .build(); - - RoutingTable routingTable = RoutingTable.builder() - .addAsRestore(metaData.index(shardId.getIndex()), new SnapshotRecoverySource(new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID())), Version.CURRENT, shardId.getIndexName())) - .build(); - ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .metaData(metaData) - .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); - return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); - } - private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, UnassignedInfo.Reason reason, String... activeAllocationIds) { MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index a5cf35105c2..775f7e8f1b5 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -65,6 +65,8 @@ import static java.util.Collections.unmodifiableMap; import static org.hamcrest.Matchers.equalTo; public class ReplicaShardAllocatorTests extends ESAllocationTestCase { + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; private final ShardId shardId = new ShardId("test", "_na_", 0); private final DiscoveryNode node1 = newNode("node1"); private final DiscoveryNode node2 = newNode("node2"); @@ -119,8 +121,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testSimpleFullMatchAllocation() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -132,8 +134,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testSyncIdMatch() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM" ,MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -145,8 +147,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testFileChecksumMatch() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(nodeToMatch, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(nodeToMatch, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -160,7 +162,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { */ public void testNoPrimaryData() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); - testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -172,7 +174,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { */ public void testNoDataForReplicaOnAnyNode() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -184,8 +186,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { */ public void testNoMatchingFilesForReplicaOnAnyNode() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -197,8 +199,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { */ public void testNoOrThrottleDecidersRemainsInUnassigned() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -222,8 +224,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { return Decision.YES; } }))); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -231,8 +233,9 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testDelayedAllocation() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), - Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)) + .build(), UnassignedInfo.Reason.NODE_LEFT); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); if (randomBoolean()) { // we sometime return empty list of files, make sure we test this as well testAllocator.addData(node2, null); @@ -244,7 +247,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT); - testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.allocateUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -253,9 +256,9 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testCancelRecoveryBetterSyncId() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node3, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.processExistingRecoveries(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); @@ -264,9 +267,10 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node3, randomBoolean() ? "MATCH" : "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node3, randomBoolean() ? "MATCH" : "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", + MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.processExistingRecoveries(allocation); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); @@ -274,8 +278,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testNotCancellingRecovery() { RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders()); - testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")) - .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM")); + testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) + .addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.processExistingRecoveries(allocation); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java index 80e453d665e..385770426f5 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -47,36 +47,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF /** Unit test(s) for IndexService */ public class IndexServiceTests extends ESSingleNodeTestCase { - public void testDetermineShadowEngineShouldBeUsed() { - IndexSettings regularSettings = new IndexSettings( - IndexMetaData - .builder("regular") - .settings(Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 2) - .put(SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build()) - .build(), - Settings.EMPTY); - - IndexSettings shadowSettings = new IndexSettings( - IndexMetaData - .builder("shadow") - .settings(Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, 2) - .put(SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build()) - .build(), - Settings.EMPTY); - - assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(true, regularSettings)); - assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(false, regularSettings)); - assertFalse("no shadow replicas for primary shard with shadow settings", IndexService.useShadowEngine(true, shadowSettings)); - assertTrue("shadow replicas for replica shards with shadow settings",IndexService.useShadowEngine(false, shadowSettings)); - } - public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); diff --git a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java b/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java deleted file mode 100644 index 2bf9f0efbfd..00000000000 --- a/core/src/test/java/org/elasticsearch/index/IndexWithShadowReplicasIT.java +++ /dev/null @@ -1,905 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; -import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShadowIndexShard; -import org.elasticsearch.index.store.FsDirectoryService; -import org.elasticsearch.index.translog.TranslogStats; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.recovery.PeerRecoveryTargetService; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.snapshots.SnapshotState; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; - -/** - * Tests for indices that use shadow replicas and a shared filesystem - */ -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) -public class IndexWithShadowReplicasIT extends ESIntegTestCase { - - private Settings nodeSettings(Path dataPath) { - return nodeSettings(dataPath.toString()); - } - - private Settings nodeSettings(String dataPath) { - return Settings.builder() - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false) - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath) - .put(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING.getKey(), randomFrom("native", "simple")) - .build(); - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(MockTransportService.TestPlugin.class); - } - - public void testCannotCreateWithBadPath() throws Exception { - Settings nodeSettings = nodeSettings("/badpath"); - internalCluster().startNodes(1, nodeSettings); - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, "/etc/foo") - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); - try { - assertAcked(prepareCreate("foo").setSettings(idxSettings)); - fail("should have failed"); - } catch (IllegalArgumentException e) { - assertTrue(e.getMessage(), - e.getMessage().contains("custom path [/etc/foo] is not a sub-path of path.shared_data")); - } - } - - /** - * Tests the case where we create an index without shadow replicas, snapshot it and then restore into - * an index with shadow replicas enabled. - */ - public void testRestoreToShadow() throws ExecutionException, InterruptedException { - final Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(3, nodeSettings); - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build(); - assertAcked(prepareCreate("foo").setSettings(idxSettings)); - ensureGreen(); - final int numDocs = randomIntBetween(10, 100); - for (int i = 0; i < numDocs; i++) { - client().prepareIndex("foo", "doc", ""+i).setSource("foo", "bar").get(); - } - assertNoFailures(client().admin().indices().prepareFlush().setForce(true).execute().actionGet()); - - assertAcked(client().admin().cluster().preparePutRepository("test-repo") - .setType("fs").setSettings(Settings.builder() - .put("location", randomRepoPath()))); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("foo").get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); - assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS)); - - Settings shadowSettings = Settings.builder() - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2).build(); - - logger.info("--> restore the index into shadow replica index"); - RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap") - .setIndexSettings(shadowSettings).setWaitForCompletion(true) - .setRenamePattern("(.+)").setRenameReplacement("$1-copy") - .execute().actionGet(); - assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0)); - ensureGreen(); - refresh(); - Index index = resolveIndex("foo-copy"); - for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { - - if (service.hasIndex(index)) { - IndexShard shard = service.indexServiceSafe(index).getShardOrNull(0); - if (shard.routingEntry().primary()) { - assertFalse(shard instanceof ShadowIndexShard); - } else { - assertTrue(shard instanceof ShadowIndexShard); - } - } - } - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch("foo-copy").setQuery(matchAllQuery()).get(); - assertHitCount(resp, numDocs); - - } - - @TestLogging("org.elasticsearch.gateway:TRACE") - public void testIndexWithFewDocuments() throws Exception { - final Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(3, nodeSettings); - final String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - // So basically, the primary should fail and the replica will need to - // replay the translog, this is what this tests - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - - IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get(); - assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations()); - assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations()); - Index index = resolveIndex(IDX); - for (IndicesService service : internalCluster().getInstances(IndicesService.class)) { - IndexService indexService = service.indexService(index); - if (indexService != null) { - IndexShard shard = indexService.getShard(0); - TranslogStats translogStats = shard.translogStats(); - assertTrue(translogStats != null || shard instanceof ShadowIndexShard); - if (translogStats != null) { - assertEquals(2, translogStats.estimatedNumberOfOperations()); - } - } - } - - // Check that we can get doc 1 and 2, because we are doing realtime - // gets and getting from the primary - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - flushAndRefresh(IDX); - client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get(); - refresh(); - - // Check that we can get doc 1 and 2 without realtime - gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get(); - gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get(); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - logger.info("--> restarting all nodes"); - if (randomBoolean()) { - logger.info("--> rolling restart"); - internalCluster().rollingRestart(); - } else { - logger.info("--> full restart"); - internalCluster().fullRestart(); - } - - client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); - ensureGreen(IDX); - flushAndRefresh(IDX); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 4); - - logger.info("--> deleting index"); - assertAcked(client().admin().indices().prepareDelete(IDX)); - } - - public void testReplicaToPrimaryPromotion() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - String node1 = internalCluster().startNode(nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - // Node1 has the primary, now node2 has the replica - internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); - flushAndRefresh(IDX); - - logger.info("--> stopping node1 [{}]", node1); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); - ensureClusterSizeConsistency(); // wait for the new node to be elected and process the node leave - ensureYellow(IDX); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 2); - - gResp1 = client().prepareGet(IDX, "doc", "1").get(); - gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.toString(), gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "foobar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "foobar").get(); - gResp1 = client().prepareGet(IDX, "doc", "1").get(); - gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.toString(), gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("foobar")); - assertThat(gResp2.getSource().get("foo"), equalTo("foobar")); - } - - public void testPrimaryRelocation() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - String node1 = internalCluster().startNode(nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - client().admin().cluster().prepareHealth().setWaitForNodes("2").get(); - flushAndRefresh(IDX); - - // now prevent primary from being allocated on node 1 move to node_3 - String node3 = internalCluster().startNode(nodeSettings); - Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); - client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); - - ensureGreen(IDX); - // check if primary has relocated to node3 - assertEquals(internalCluster().clusterService(node3).localNode().getId(), - client().admin().cluster().prepareState().get().getState().routingTable().index(IDX).shard(0).primaryShard().currentNodeId()); - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 2); - - gResp1 = client().prepareGet(IDX, "doc", "1").get(); - gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.toString(), gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get(); - gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").get(); - gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").get(); - assertTrue(gResp1.isExists()); - assertTrue(gResp2.isExists()); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - } - - public void testPrimaryRelocationWithConcurrentIndexing() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - String node1 = internalCluster().startNode(nodeSettings); - final String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - flushAndRefresh(IDX); - String node3 = internalCluster().startNode(nodeSettings); - final AtomicInteger counter = new AtomicInteger(0); - final CountDownLatch started = new CountDownLatch(1); - - final int numPhase1Docs = scaledRandomIntBetween(25, 200); - final int numPhase2Docs = scaledRandomIntBetween(25, 200); - final CountDownLatch phase1finished = new CountDownLatch(1); - final CountDownLatch phase2finished = new CountDownLatch(1); - final CopyOnWriteArrayList exceptions = new CopyOnWriteArrayList<>(); - Thread thread = new Thread() { - @Override - public void run() { - started.countDown(); - while (counter.get() < (numPhase1Docs + numPhase2Docs)) { - try { - final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", - Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - } catch (Exception e) { - exceptions.add(e); - } - final int docCount = counter.get(); - if (docCount == numPhase1Docs) { - phase1finished.countDown(); - } - } - logger.info("--> stopping indexing thread"); - phase2finished.countDown(); - } - }; - thread.start(); - started.await(); - phase1finished.await(); // wait for a certain number of documents to be indexed - logger.info("--> excluding {} from allocation", node1); - // now prevent primary from being allocated on node 1 move to node_3 - Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); - client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); - // wait for more documents to be indexed post-recovery, also waits for - // indexing thread to stop - phase2finished.await(); - ExceptionsHelper.rethrowAndSuppress(exceptions); - ensureGreen(IDX); - thread.join(); - logger.info("--> performing query"); - flushAndRefresh(); - - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, counter.get()); - assertHitCount(resp, numPhase1Docs + numPhase2Docs); - } - - public void testPrimaryRelocationWhereRecoveryFails() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = Settings.builder() - .put("node.add_lock_id_to_custom_path", false) - .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath) - .build(); - - String node1 = internalCluster().startNode(nodeSettings); - final String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - // Node1 has the primary, now node2 has the replica - String node2 = internalCluster().startNode(nodeSettings); - ensureGreen(IDX); - flushAndRefresh(IDX); - String node3 = internalCluster().startNode(nodeSettings); - final AtomicInteger counter = new AtomicInteger(0); - final CountDownLatch started = new CountDownLatch(1); - - final int numPhase1Docs = scaledRandomIntBetween(25, 200); - final int numPhase2Docs = scaledRandomIntBetween(25, 200); - final int numPhase3Docs = scaledRandomIntBetween(25, 200); - final CountDownLatch phase1finished = new CountDownLatch(1); - final CountDownLatch phase2finished = new CountDownLatch(1); - final CountDownLatch phase3finished = new CountDownLatch(1); - - final AtomicBoolean keepFailing = new AtomicBoolean(true); - - MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, node1)); - mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, node3), - new MockTransportService.DelegateTransport(mockTransportService.original()) { - - @Override - protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, - TransportRequestOptions options) throws IOException { - if (keepFailing.get() && action.equals(PeerRecoveryTargetService.Actions.TRANSLOG_OPS)) { - logger.info("--> failing translog ops"); - throw new ElasticsearchException("failing on purpose"); - } - super.sendRequest(connection, requestId, action, request, options); - } - }); - - Thread thread = new Thread() { - @Override - public void run() { - started.countDown(); - while (counter.get() < (numPhase1Docs + numPhase2Docs + numPhase3Docs)) { - final IndexResponse indexResponse = client().prepareIndex(IDX, "doc", - Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get(); - assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult()); - final int docCount = counter.get(); - if (docCount == numPhase1Docs) { - phase1finished.countDown(); - } else if (docCount == (numPhase1Docs + numPhase2Docs)) { - phase2finished.countDown(); - } - } - logger.info("--> stopping indexing thread"); - phase3finished.countDown(); - } - }; - thread.start(); - started.await(); - phase1finished.await(); // wait for a certain number of documents to be indexed - logger.info("--> excluding {} from allocation", node1); - // now prevent primary from being allocated on node 1 move to node_3 - Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build(); - client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet(); - // wait for more documents to be indexed post-recovery, also waits for - // indexing thread to stop - phase2finished.await(); - // stop failing - keepFailing.set(false); - // wait for more docs to be indexed - phase3finished.await(); - ensureGreen(IDX); - thread.join(); - logger.info("--> performing query"); - flushAndRefresh(); - - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, counter.get()); - } - - public void testIndexWithShadowReplicasCleansUp() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - final int nodeCount = randomIntBetween(2, 5); - logger.info("--> starting {} nodes", nodeCount); - final List nodes = internalCluster().startNodes(nodeCount, nodeSettings); - final String IDX = "test"; - final Tuple numPrimariesAndReplicas = randomPrimariesAndReplicas(nodeCount); - final int numPrimaries = numPrimariesAndReplicas.v1(); - final int numReplicas = numPrimariesAndReplicas.v2(); - logger.info("--> creating index {} with {} primary shards and {} replicas", IDX, numPrimaries, numReplicas); - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - flushAndRefresh(IDX); - - GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get(); - GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get(); - assertThat(gResp1.getSource().get("foo"), equalTo("bar")); - assertThat(gResp2.getSource().get("foo"), equalTo("bar")); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, 2); - - logger.info("--> deleting index " + IDX); - assertAcked(client().admin().indices().prepareDelete(IDX)); - assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); - assertPathHasBeenCleared(dataPath); - //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved. - //assertIndicesDirsDeleted(nodes); - } - - /** - * Tests that shadow replicas can be "naturally" rebalanced and relocated - * around the cluster. By "naturally" I mean without using the reroute API - */ - // This test failed on CI when trying to assert that all the shard data has been deleted - // from the index path. It has not been reproduced locally. Despite the IndicesService - // deleting the index and hence, deleting all the shard data for the index, the test - // failure still showed some Lucene files in the data directory for that index. Not sure - // why that is, so turning on more logging here. - @TestLogging("org.elasticsearch.indices:TRACE,org.elasticsearch.env:TRACE,_root:DEBUG") - public void testShadowReplicaNaturalRelocation() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - final List nodes = internalCluster().startNodes(2, nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - int docCount = randomIntBetween(10, 100); - List builders = new ArrayList<>(); - for (int i = 0; i < docCount; i++) { - builders.add(client().prepareIndex(IDX, "doc", i + "").setSource("foo", "bar")); - } - indexRandom(true, true, true, builders); - flushAndRefresh(IDX); - - // start a third node, with 5 shards each on the other nodes, they - // should relocate some to the third node - final String node3 = internalCluster().startNode(nodeSettings); - nodes.add(node3); - - assertBusy(new Runnable() { - @Override - public void run() { - client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - RoutingNodes nodes = resp.getState().getRoutingNodes(); - for (RoutingNode node : nodes) { - logger.info("--> node has {} shards (needs at least 2)", node.numberOfOwningShards()); - assertThat("at least 2 shards on node", node.numberOfOwningShards(), greaterThanOrEqualTo(2)); - } - } - }); - ensureYellow(IDX); - - logger.info("--> performing query"); - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get(); - assertHitCount(resp, docCount); - - assertAcked(client().admin().indices().prepareDelete(IDX)); - assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); - assertPathHasBeenCleared(dataPath); - //TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved. - //assertIndicesDirsDeleted(nodes); - } - - public void testShadowReplicasUsingFieldData() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(3, nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get(); - ensureGreen(IDX); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get(); - flushAndRefresh(IDX); - - SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).addDocValueField("foo").addSort("foo", SortOrder.ASC).get(); - assertHitCount(resp, 4); - assertOrderedSearchHits(resp, "2", "3", "4", "1"); - SearchHit[] hits = resp.getHits().getHits(); - assertThat(hits[0].field("foo").getValue().toString(), equalTo("bar")); - assertThat(hits[1].field("foo").getValue().toString(), equalTo("baz")); - assertThat(hits[2].field("foo").getValue().toString(), equalTo("eggplant")); - assertThat(hits[3].field("foo").getValue().toString(), equalTo("foo")); - } - - /** wait until none of the nodes have shards allocated on them */ - private void assertNoShardsOn(final List nodeList) throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - RoutingNodes nodes = resp.getState().getRoutingNodes(); - for (RoutingNode node : nodes) { - logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards()); - if (nodeList.contains(node.node().getName())) { - assertThat("no shards on node", node.numberOfOwningShards(), equalTo(0)); - } - } - } - }, 1, TimeUnit.MINUTES); - } - - /** wait until the node has the specified number of shards allocated on it */ - private void assertShardCountOn(final String nodeName, final int shardCount) throws Exception { - assertBusy(new Runnable() { - @Override - public void run() { - ClusterStateResponse resp = client().admin().cluster().prepareState().get(); - RoutingNodes nodes = resp.getState().getRoutingNodes(); - for (RoutingNode node : nodes) { - logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards()); - if (nodeName.equals(node.node().getName())) { - assertThat(node.numberOfOwningShards(), equalTo(shardCount)); - } - } - } - }, 1, TimeUnit.MINUTES); - } - - public void testIndexOnSharedFSRecoversToAnyNode() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - Settings fooSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "foo").build(); - Settings barSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "bar").build(); - - List allNodes = internalCluster().startNodes(fooSettings, fooSettings, barSettings, barSettings); - List fooNodes = allNodes.subList(0, 2); - List barNodes = allNodes.subList(2, 4); - String IDX = "test"; - - Settings includeFoo = Settings.builder() - .put("index.routing.allocation.include.affinity", "foo") - .build(); - Settings includeBar = Settings.builder() - .put("index.routing.allocation.include.affinity", "bar") - .build(); - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true) - .put(includeFoo) // start with requiring the shards on "foo" - .build(); - - // only one node, so all primaries will end up on node1 - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get(); - ensureGreen(IDX); - - // Index some documents - client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get(); - client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get(); - flushAndRefresh(IDX); - - // put shards on "bar" - client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeBar).get(); - - // wait for the shards to move from "foo" nodes to "bar" nodes - assertNoShardsOn(fooNodes); - - // put shards back on "foo" - client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeFoo).get(); - - // wait for the shards to move from "bar" nodes to "foo" nodes - assertNoShardsOn(barNodes); - - // Stop a foo node - logger.info("--> stopping first 'foo' node"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(0))); - - // Ensure that the other foo node has all the shards now - assertShardCountOn(fooNodes.get(1), 5); - - // Assert no shards on the "bar" nodes - assertNoShardsOn(barNodes); - - // Stop the second "foo" node - logger.info("--> stopping second 'foo' node"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(1))); - - // The index should still be able to be allocated (on the "bar" nodes), - // all the "foo" nodes are gone - ensureGreen(IDX); - - // Start another "foo" node and make sure the index moves back - logger.info("--> starting additional 'foo' node"); - String newFooNode = internalCluster().startNode(fooSettings); - - assertShardCountOn(newFooNode, 5); - assertNoShardsOn(barNodes); - } - - public void testDeletingClosedIndexRemovesFiles() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath.getParent()); - - final int numNodes = randomIntBetween(2, 5); - logger.info("--> starting {} nodes", numNodes); - final List nodes = internalCluster().startNodes(numNodes, nodeSettings); - final String IDX = "test"; - final Tuple numPrimariesAndReplicas = randomPrimariesAndReplicas(numNodes); - final int numPrimaries = numPrimariesAndReplicas.v1(); - final int numReplicas = numPrimariesAndReplicas.v2(); - logger.info("--> creating index {} with {} primary shards and {} replicas", IDX, numPrimaries, numReplicas); - - assert numPrimaries > 0; - assert numReplicas >= 0; - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - ensureGreen(IDX); - - int docCount = randomIntBetween(10, 100); - List builders = new ArrayList<>(); - for (int i = 0; i < docCount; i++) { - builders.add(client().prepareIndex(IDX, "doc", i + "").setSource("foo", "bar")); - } - indexRandom(true, true, true, builders); - flushAndRefresh(IDX); - - logger.info("--> closing index {}", IDX); - client().admin().indices().prepareClose(IDX).get(); - ensureGreen(IDX); - - logger.info("--> deleting closed index"); - client().admin().indices().prepareDelete(IDX).get(); - assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class)); - assertPathHasBeenCleared(dataPath); - assertIndicesDirsDeleted(nodes); - } - - public void testNodeJoinsWithoutShadowReplicaConfigured() throws Exception { - Path dataPath = createTempDir(); - Settings nodeSettings = nodeSettings(dataPath); - - internalCluster().startNodes(2, nodeSettings); - String IDX = "test"; - - Settings idxSettings = Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) - .put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString()) - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) - .build(); - - prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get(); - - client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get(); - client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get(); - flushAndRefresh(IDX); - - internalCluster().startNodes(1); - ensureYellow(IDX); - - final ClusterHealthResponse clusterHealth = client().admin().cluster() - .prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .execute() - .actionGet(); - assertThat(clusterHealth.getNumberOfNodes(), equalTo(3)); - // the new node is not configured for a shadow replica index, so no shards should have been assigned to it - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); - } - - private static void assertIndicesDirsDeleted(final List nodes) throws IOException { - for (String node : nodes) { - final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class, node); - assertThat(nodeEnv.availableIndexFolders(), equalTo(Collections.emptySet())); - } - } - - private static Tuple randomPrimariesAndReplicas(final int numNodes) { - final int numPrimaries; - final int numReplicas; - if (randomBoolean()) { - // test with some nodes having no shards - numPrimaries = 1; - numReplicas = randomIntBetween(0, numNodes - 2); - } else { - // test with all nodes having at least one shard - numPrimaries = randomIntBetween(1, 5); - numReplicas = numNodes - 1; - } - return Tuple.tuple(numPrimaries, numReplicas); - } - -} diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index 9d9631e1b00..12071f0eac7 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -38,10 +38,7 @@ import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -105,19 +102,6 @@ public class AnalysisRegistryTests extends ESTestCase { assertTrue(e.getMessage().contains("[index.analysis.analyzer.default_index] is not supported")); } - public void testBackCompatOverrideDefaultIndexAnalyzer() { - Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), - VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1)); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), - singletonMap("default_index", analyzerProvider("default_index")), emptyMap(), emptyMap(), emptyMap(), emptyMap()); - assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(StandardAnalyzer.class)); - assertWarnings("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] " + - "instead for index [index]"); - } - public void testOverrideDefaultSearchAnalyzer() { Version version = VersionUtils.randomVersion(random()); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); @@ -128,22 +112,6 @@ public class AnalysisRegistryTests extends ESTestCase { assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); } - public void testBackCompatOverrideDefaultIndexAndSearchAnalyzer() { - Version version = VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), - VersionUtils.getPreviousVersion(Version.V_5_0_0_alpha1)); - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - Map> analyzers = new HashMap<>(); - analyzers.put("default_index", analyzerProvider("default_index")); - analyzers.put("default_search", analyzerProvider("default_search")); - IndexAnalyzers indexAnalyzers = registry.build(IndexSettingsModule.newIndexSettings("index", settings), - analyzers, Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); - assertThat(indexAnalyzers.getDefaultIndexAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertThat(indexAnalyzers.getDefaultSearchQuoteAnalyzer().analyzer(), instanceOf(EnglishAnalyzer.class)); - assertWarnings("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] " + - "instead for index [index]"); - } - public void testConfigureCamelCaseTokenFilter() throws IOException { Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(); Settings indexSettings = Settings.builder() diff --git a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 7dc55b43700..214515d1702 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -56,18 +56,18 @@ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase { public void testThatInstancesAreTheSameAlwaysForKeywordAnalyzer() { assertThat(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.CURRENT), - is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_2_0_0))); + is(PreBuiltAnalyzers.KEYWORD.getAnalyzer(Version.V_5_0_0))); } public void testThatInstancesAreCachedAndReused() { assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT), PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.CURRENT)); // same lucene version should be cached - assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_0), - PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_1)); + assertSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_2_UNRELEASED), + PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_2_3_UNRELEASED)); - assertNotSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_0_0), - PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_2_2_0)); + assertNotSame(PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_0), + PreBuiltAnalyzers.ARABIC.getAnalyzer(Version.V_5_0_1)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java deleted file mode 100644 index 53708b28dfb..00000000000 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ /dev/null @@ -1,1000 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.apache.lucene.codecs.Codec; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.NumericDocValuesField; -import org.apache.lucene.document.TextField; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy; -import org.apache.lucene.index.LiveIndexWriterConfig; -import org.apache.lucene.index.MergePolicy; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.SnapshotDeletionPolicy; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SeqNoFieldMapper; -import org.elasticsearch.index.mapper.SourceFieldMapper; -import org.elasticsearch.index.mapper.Uid; -import org.elasticsearch.index.mapper.UidFieldMapper; -import org.elasticsearch.index.shard.RefreshListeners; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.DirectoryUtils; -import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.translog.Translog; -import org.elasticsearch.index.translog.TranslogConfig; -import org.elasticsearch.test.DummyShardLock; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.hamcrest.MatcherAssert; -import org.junit.After; -import org.junit.Before; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -public class ShadowEngineTests extends ESTestCase { - - protected final ShardId shardId = new ShardId("index", "_na_", 1); - - protected ThreadPool threadPool; - - private Store store; - private Store storeReplica; - - - protected Engine primaryEngine; - protected Engine replicaEngine; - - private IndexSettings defaultSettings; - private String codecName; - private Path dirPath; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - CodecService codecService = new CodecService(null, logger); - String name = Codec.getDefault().getName(); - if (Arrays.asList(codecService.availableCodecs()).contains(name)) { - // some codecs are read only so we only take the ones that we have in the service and randomly - // selected by lucene test case. - codecName = name; - } else { - codecName = "default"; - } - defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build()); // TODO randomize more settings - - threadPool = new TestThreadPool(getClass().getName()); - dirPath = createTempDir(); - store = createStore(dirPath); - storeReplica = createStore(dirPath); - Lucene.cleanLuceneIndex(store.directory()); - Lucene.cleanLuceneIndex(storeReplica.directory()); - primaryEngine = createInternalEngine(store, createTempDir("translog-primary")); - LiveIndexWriterConfig currentIndexWriterConfig = ((InternalEngine)primaryEngine).getCurrentIndexWriterConfig(); - - assertEquals(primaryEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); - if (randomBoolean()) { - primaryEngine.config().setEnableGcDeletes(false); - } - - replicaEngine = createShadowEngine(storeReplica); - - assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - if (randomBoolean()) { - replicaEngine.config().setEnableGcDeletes(false); - } - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - replicaEngine.close(); - storeReplica.close(); - primaryEngine.close(); - store.close(); - terminate(threadPool); - } - - private ParseContext.Document testDocumentWithTextField() { - ParseContext.Document document = testDocument(); - document.add(new TextField("value", "test", Field.Store.YES)); - return document; - } - - private ParseContext.Document testDocument() { - return new ParseContext.Document(); - } - - - private ParsedDocument testParsedDocument(String id, String type, String routing, ParseContext.Document document, BytesReference source, Mapping mappingsUpdate) { - Field uidField = new Field("_uid", Uid.createUid(type, id), UidFieldMapper.Defaults.FIELD_TYPE); - Field versionField = new NumericDocValuesField("_version", 0); - SeqNoFieldMapper.SequenceID seqID = SeqNoFieldMapper.SequenceID.emptySeqID(); - document.add(uidField); - document.add(versionField); - document.add(seqID.seqNo); - document.add(seqID.seqNoDocValue); - document.add(seqID.primaryTerm); - document.add(new LongPoint("point_field", 42)); // so that points report memory/disk usage - return new ParsedDocument(versionField, seqID, id, type, routing, Arrays.asList(document), source, XContentType.JSON, - mappingsUpdate); - } - - protected Store createStore(Path p) throws IOException { - return createStore(newMockFSDirectory(p)); - } - - - protected Store createStore(final Directory directory) throws IOException { - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY); - final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { - @Override - public Directory newDirectory() throws IOException { - return directory; - } - }; - return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - } - - protected SnapshotDeletionPolicy createSnapshotDeletionPolicy() { - return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); - } - - protected ShadowEngine createShadowEngine(Store store) { - return createShadowEngine(defaultSettings, store); - } - - protected InternalEngine createInternalEngine(Store store, Path translogPath) { - return createInternalEngine(defaultSettings, store, translogPath); - } - - protected ShadowEngine createShadowEngine(IndexSettings indexSettings, Store store) { - return new ShadowEngine(config(indexSettings, store, null, null, null)); - } - - protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath) { - return createInternalEngine(indexSettings, store, translogPath, newMergePolicy()); - } - - protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null); - return new InternalEngine(config); - } - - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, - RefreshListeners refreshListeners) { - IndexWriterConfig iwc = newIndexWriterConfig(); - final EngineConfig.OpenMode openMode; - try { - if (Lucene.indexExists(store.directory()) == false) { - openMode = EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG; - } else { - openMode = EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG; - } - } catch (IOException e) { - throw new ElasticsearchException("can't find index?", e); - } - Engine.EventListener eventListener = new Engine.EventListener() { - @Override - public void onFailedEngine(String reason, @Nullable Exception e) { - // we don't need to notify anybody in this test - } - }; - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, createSnapshotDeletionPolicy(), - mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, null, - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, - TimeValue.timeValueMinutes(5), refreshListeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); - - return config; - } - -// protected Term newUid(String id) { -// return new Term("_uid", id); -// } - - protected Term newUid(ParsedDocument doc) { - return new Term("_uid", doc.uid()); - } - - private Engine.Index indexForDoc(ParsedDocument doc) { - return new Engine.Index(newUid(doc), doc); - } - - protected static final BytesReference B_1 = new BytesArray(new byte[]{1}); - protected static final BytesReference B_2 = new BytesArray(new byte[]{2}); - protected static final BytesReference B_3 = new BytesArray(new byte[]{3}); - - public void testCommitStats() throws IOException { - // create a doc and refresh - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - - CommitStats stats1 = replicaEngine.commitStats(); - assertThat(stats1.getGeneration(), greaterThan(0L)); - assertThat(stats1.getId(), notNullValue()); - assertThat(stats1.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY)); - - // flush the primary engine - primaryEngine.flush(); - // flush on replica to make flush visible - replicaEngine.flush(); - - CommitStats stats2 = replicaEngine.commitStats(); - assertThat(stats2.getGeneration(), greaterThan(stats1.getGeneration())); - assertThat(stats2.getId(), notNullValue()); - assertThat(stats2.getId(), not(equalTo(stats1.getId()))); - assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY)); - assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY)); - assertThat(stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY))); - } - - public void testSegments() throws Exception { - primaryEngine.close(); // recreate without merging - primaryEngine = createInternalEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE); - List segments = primaryEngine.segments(false); - assertThat(segments.isEmpty(), equalTo(true)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(0L)); - assertThat(primaryEngine.segmentsStats(false).getMemoryInBytes(), equalTo(0L)); - - // create a doc and refresh - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - - ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null); - primaryEngine.index(indexForDoc(doc2)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(1)); - SegmentsStats stats = primaryEngine.segmentsStats(false); - assertThat(stats.getCount(), equalTo(1L)); - assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getNormsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getPointsMemoryInBytes(), greaterThan(0L)); - assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L)); - assertThat(segments.get(0).isCommitted(), equalTo(false)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertTrue(segments.get(0).isCompound()); - assertThat(segments.get(0).ramTree, nullValue()); - - // Check that the replica sees nothing - segments = replicaEngine.segments(false); - assertThat(segments.size(), equalTo(0)); - stats = replicaEngine.segmentsStats(false); - assertThat(stats.getCount(), equalTo(0L)); - assertThat(stats.getTermsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getStoredFieldsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getNormsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getPointsMemoryInBytes(), equalTo(0L)); - assertThat(stats.getDocValuesMemoryInBytes(), equalTo(0L)); - assertThat(segments.size(), equalTo(0)); - - // flush the primary engine - primaryEngine.flush(); - // refresh the replica - replicaEngine.refresh("tests"); - - // Check that the primary AND replica sees segments now - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(1)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(1L)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - segments = replicaEngine.segments(false); - assertThat(segments.size(), equalTo(1)); - assertThat(replicaEngine.segmentsStats(false).getCount(), equalTo(1L)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - - ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null); - primaryEngine.index(indexForDoc(doc3)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(2L)); - assertThat(primaryEngine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(primaryEngine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getPointsMemoryInBytes(), greaterThan(stats.getPointsMemoryInBytes())); - assertThat(primaryEngine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes())); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - assertThat(segments.get(1).isCommitted(), equalTo(false)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - // Make visible to shadow replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - segments = replicaEngine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(replicaEngine.segmentsStats(false).getCount(), equalTo(2L)); - assertThat(replicaEngine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(replicaEngine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getPointsMemoryInBytes(), greaterThan(stats.getPointsMemoryInBytes())); - assertThat(replicaEngine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes())); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(2)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - assertThat(segments.get(1).isCommitted(), equalTo(true)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - primaryEngine.delete(new Engine.Delete("test", "1", newUid(doc))); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(2)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(2L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - assertThat(segments.get(1).isCommitted(), equalTo(true)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - // Make visible to shadow replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - ParsedDocument doc4 = testParsedDocument("4", "test", null, testDocumentWithTextField(), B_3, null); - primaryEngine.index(indexForDoc(doc4)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(false); - assertThat(segments.size(), equalTo(3)); - assertThat(primaryEngine.segmentsStats(false).getCount(), equalTo(3L)); - assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); - assertThat(segments.get(0).isCommitted(), equalTo(true)); - assertThat(segments.get(0).isSearch(), equalTo(true)); - assertThat(segments.get(0).getNumDocs(), equalTo(1)); - assertThat(segments.get(0).getDeletedDocs(), equalTo(1)); - assertThat(segments.get(0).isCompound(), equalTo(true)); - - assertThat(segments.get(1).isCommitted(), equalTo(true)); - assertThat(segments.get(1).isSearch(), equalTo(true)); - assertThat(segments.get(1).getNumDocs(), equalTo(1)); - assertThat(segments.get(1).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(1).isCompound(), equalTo(true)); - - assertThat(segments.get(2).isCommitted(), equalTo(false)); - assertThat(segments.get(2).isSearch(), equalTo(true)); - assertThat(segments.get(2).getNumDocs(), equalTo(1)); - assertThat(segments.get(2).getDeletedDocs(), equalTo(0)); - assertThat(segments.get(2).isCompound(), equalTo(true)); - } - - public void testVerboseSegments() throws Exception { - primaryEngine.close(); // recreate without merging - primaryEngine = createInternalEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE); - List segments = primaryEngine.segments(true); - assertThat(segments.isEmpty(), equalTo(true)); - - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(true); - assertThat(segments.size(), equalTo(1)); - assertThat(segments.get(0).ramTree, notNullValue()); - - ParsedDocument doc2 = testParsedDocument("2", "test", null, testDocumentWithTextField(), B_2, null); - primaryEngine.index(indexForDoc(doc2)); - primaryEngine.refresh("test"); - ParsedDocument doc3 = testParsedDocument("3", "test", null, testDocumentWithTextField(), B_3, null); - primaryEngine.index(indexForDoc(doc3)); - primaryEngine.refresh("test"); - - segments = primaryEngine.segments(true); - assertThat(segments.size(), equalTo(3)); - assertThat(segments.get(0).ramTree, notNullValue()); - assertThat(segments.get(1).ramTree, notNullValue()); - assertThat(segments.get(2).ramTree, notNullValue()); - - // Now make the changes visible to the replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - segments = replicaEngine.segments(true); - assertThat(segments.size(), equalTo(3)); - assertThat(segments.get(0).ramTree, notNullValue()); - assertThat(segments.get(1).ramTree, notNullValue()); - assertThat(segments.get(2).ramTree, notNullValue()); - - } - - public void testShadowEngineIgnoresWriteOperations() throws Exception { - // create a document - ParseContext.Document document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null); - try { - replicaEngine.index(indexForDoc(doc)); - fail("should have thrown an exception"); - } catch (UnsupportedOperationException e) {} - replicaEngine.refresh("test"); - - // its not there... - Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - Engine.GetResult getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // index a document - document = testDocument(); - document.add(new TextField("value", "test1", Field.Store.YES)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - try { - replicaEngine.index(indexForDoc(doc)); - fail("should have thrown an exception"); - } catch (UnsupportedOperationException e) {} - replicaEngine.refresh("test"); - - // its still not there... - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // Now, add a document to the primary so we can test shadow engine deletes - document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.flush(); - replicaEngine.refresh("test"); - - // Now the replica can see it - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - - // And the replica can retrieve it - getResult = replicaEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // try to delete it on the replica - try { - replicaEngine.delete(new Engine.Delete("test", "1", newUid(doc))); - fail("should have thrown an exception"); - } catch (UnsupportedOperationException e) {} - replicaEngine.flush(); - replicaEngine.refresh("test"); - primaryEngine.refresh("test"); - - // it's still there! - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - getResult = replicaEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // it's still there on the primary also! - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - getResult = primaryEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - } - - public void testSimpleOperations() throws Exception { - Engine.Searcher searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - searchResult.close(); - - // create a document - ParseContext.Document document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // not on the replica either... - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // but, we can still get it (in realtime) - Engine.GetResult getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // can't get it from the replica, because it's not in the translog for a shadow replica - getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // but, not there non realtime - getResult = primaryEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - getResult.release(); - - // now its there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - - // also in non realtime - getResult = primaryEngine.get(new Engine.Get(false, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // still not in the replica because no flush - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // now do an update - document = testDocument(); - document.add(new TextField("value", "test1", Field.Store.YES)); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_2), SourceFieldMapper.Defaults.FIELD_TYPE)); - doc = testParsedDocument("1", "test", null, document, B_2, null); - primaryEngine.index(indexForDoc(doc)); - - // its not updated yet... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // but, we can still get it (in realtime) - getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // refresh and it should be updated - primaryEngine.refresh("test"); - - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // flush, now shadow replica should have the files - primaryEngine.flush(); - - // still not in the replica because the replica hasn't refreshed - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - replicaEngine.refresh("test"); - - // the replica finally sees it because primary has flushed and replica refreshed - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // now delete - primaryEngine.delete(new Engine.Delete("test", "1", newUid(doc))); - - // its not deleted yet - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // but, get should not see it (in realtime) - getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(false)); - getResult.release(); - - // refresh and it should be deleted - primaryEngine.refresh("test"); - - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // add it back - document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // refresh and it should be there - primaryEngine.refresh("test"); - - // now its there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // now flush - primaryEngine.flush(); - - // and, verify get (in real time) - getResult = primaryEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // the replica should see it if we refresh too! - replicaEngine.refresh("test"); - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - getResult = replicaEngine.get(new Engine.Get(true, newUid(doc))); - assertThat(getResult.exists(), equalTo(true)); - assertThat(getResult.docIdAndVersion(), notNullValue()); - getResult.release(); - - // make sure we can still work with the engine - // now do an update - document = testDocument(); - document.add(new TextField("value", "test1", Field.Store.YES)); - doc = testParsedDocument("1", "test", null, document, B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not updated yet... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); - searchResult.close(); - - // refresh and it should be updated - primaryEngine.refresh("test"); - - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - - // Make visible to shadow replica - primaryEngine.flush(); - replicaEngine.refresh("test"); - - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); - searchResult.close(); - } - - public void testSearchResultRelease() throws Exception { - Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - searchResult.close(); - - // create a document - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - - // its not there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - searchResult.close(); - - // flush & refresh and it should everywhere - primaryEngine.flush(); - primaryEngine.refresh("test"); - replicaEngine.refresh("test"); - - // now its there... - searchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - - searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - // don't release the replica search result yet... - - // delete, refresh and do a new search, it should not be there - primaryEngine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc))); - primaryEngine.flush(); - primaryEngine.refresh("test"); - replicaEngine.refresh("test"); - Engine.Searcher updateSearchResult = primaryEngine.acquireSearcher("test"); - MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - updateSearchResult.close(); - - // the non released replica search result should not see the deleted yet... - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - } - - public void testFailEngineOnCorruption() throws IOException { - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.flush(); - MockDirectoryWrapper leaf = DirectoryUtils.getLeaf(replicaEngine.config().getStore().directory(), MockDirectoryWrapper.class); - leaf.setRandomIOExceptionRate(1.0); - leaf.setRandomIOExceptionRateOnOpen(1.0); - try { - replicaEngine.refresh("foo"); - fail("exception expected"); - } catch (Exception ex) { - - } - try { - Engine.Searcher searchResult = replicaEngine.acquireSearcher("test"); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - searchResult.close(); - fail("exception expected"); - } catch (AlreadyClosedException ex) { - // all is well - } - } - - public void testExtractShardId() { - try (Engine.Searcher test = replicaEngine.acquireSearcher("test")) { - ShardId shardId = ShardUtils.extractShardId(test.getDirectoryReader()); - assertNotNull(shardId); - assertEquals(shardId, replicaEngine.config().getShardId()); - } - } - - /** - * Random test that throws random exception and ensures all references are - * counted down / released and resources are closed. - */ - public void testFailStart() throws IOException { - // Need a commit point for this - ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null); - primaryEngine.index(indexForDoc(doc)); - primaryEngine.flush(); - - // this test fails if any reader, searcher or directory is not closed - MDW FTW - final int iters = scaledRandomIntBetween(10, 100); - for (int i = 0; i < iters; i++) { - MockDirectoryWrapper wrapper = newMockFSDirectory(dirPath); - wrapper.setFailOnOpenInput(randomBoolean()); - wrapper.setAllowRandomFileNotFoundException(randomBoolean()); - wrapper.setRandomIOExceptionRate(randomDouble()); - wrapper.setRandomIOExceptionRateOnOpen(randomDouble()); - try (Store store = createStore(wrapper)) { - int refCount = store.refCount(); - assertTrue("refCount: "+ store.refCount(), store.refCount() > 0); - ShadowEngine holder; - try { - holder = createShadowEngine(store); - } catch (EngineCreationFailureException ex) { - assertEquals(store.refCount(), refCount); - continue; - } - assertEquals(store.refCount(), refCount+1); - final int numStarts = scaledRandomIntBetween(1, 5); - for (int j = 0; j < numStarts; j++) { - try { - assertEquals(store.refCount(), refCount + 1); - holder.close(); - holder = createShadowEngine(store); - assertEquals(store.refCount(), refCount + 1); - } catch (EngineCreationFailureException ex) { - // all is fine - assertEquals(store.refCount(), refCount); - break; - } - } - holder.close(); - assertEquals(store.refCount(), refCount); - } - } - } - - public void testSettings() { - CodecService codecService = new CodecService(null, logger); - assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - } - - public void testShadowEngineCreationRetry() throws Exception { - final Path srDir = createTempDir(); - final Store srStore = createStore(srDir); - Lucene.cleanLuceneIndex(srStore.directory()); - - final AtomicBoolean succeeded = new AtomicBoolean(false); - final CountDownLatch latch = new CountDownLatch(1); - - // Create a shadow Engine, which will freak out because there is no - // index yet - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - latch.await(); - } catch (InterruptedException e) { - // ignore interruptions - } - try (ShadowEngine srEngine = createShadowEngine(srStore)) { - succeeded.set(true); - } catch (Exception e) { - fail("should have been able to create the engine!"); - } - } - }); - t.start(); - - // count down latch - // now shadow engine should try to be created - latch.countDown(); - - // Create an InternalEngine, which creates the index so the shadow - // replica will handle it correctly - Store pStore = createStore(srDir); - InternalEngine pEngine = createInternalEngine(pStore, createTempDir("translog-primary")); - - // create a document - ParseContext.Document document = testDocumentWithTextField(); - document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); - ParsedDocument doc = testParsedDocument("1", "test", null, document, B_1, null); - pEngine.index(indexForDoc(doc)); - pEngine.flush(true, true); - - t.join(); - assertTrue("ShadowEngine should have been able to be created", succeeded.get()); - // (shadow engine is already shut down in the try-with-resources) - IOUtils.close(srStore, pEngine, pStore); - } - - public void testNoTranslog() { - try { - replicaEngine.getTranslog(); - fail("shadow engine has no translog"); - } catch (UnsupportedOperationException ex) { - // all good - } - } - - public void testRefreshListenersFails() throws IOException { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), - new RefreshListeners(null, null, null, logger)); - Exception e = expectThrows(IllegalArgumentException.class, () -> new ShadowEngine(config)); - assertEquals("ShadowEngine doesn't support RefreshListeners", e.getMessage()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java index 95f04e799b6..cef6806631f 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/AbstractFieldDataTestCase.java @@ -31,16 +31,14 @@ import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import org.elasticsearch.index.mapper.BinaryFieldMapper; import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.GeoPointFieldMapper; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperService; @@ -53,7 +51,6 @@ import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.After; import org.junit.Before; @@ -61,7 +58,6 @@ import java.io.IOException; import java.util.Collection; import java.util.List; -import static org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.sameInstance; @@ -135,9 +131,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase { @Before public void setup() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_0); // we need 2.x so that fielddata is allowed on string fields - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); - indexService = createIndex("test", settings); + indexService = createIndex("test"); mapperService = indexService.mapperService(); indicesFieldDataCache = getInstanceFromNode(IndicesService.class).getIndicesFieldDataCache(); ifdService = indexService.fieldData(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java b/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java index 2fc2d8e382c..57273d213b3 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/AbstractNumericFieldMapperTestCase.java @@ -18,23 +18,18 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.Before; import java.io.IOException; import java.util.Collection; import java.util.Set; -import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.hamcrest.Matchers.containsString; public abstract class AbstractNumericFieldMapperTestCase extends ESSingleNodeTestCase { @@ -117,20 +112,6 @@ public abstract class AbstractNumericFieldMapperTestCase extends ESSingleNodeTes ); assertThat(e.getMessage(), containsString("name cannot be empty string")); } - - // before 5.x - Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5); - Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); - indexService = createIndex("test_old", oldIndexSettings); - parser = indexService.mapperService().documentMapperParser(); - for (String type : TYPES) { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("").field("type", type).endObject().endObject() - .endObject().endObject().string(); - - DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); - assertEquals(mapping, defaultMapper.mappingSource().string()); - } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java index 8088c8576fe..1651091240f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ExternalFieldMapperTests.java @@ -54,7 +54,8 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { } public void testExternalValues() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); MapperRegistry mapperRegistry = new MapperRegistry( @@ -102,7 +103,8 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { } public void testExternalValuesWithMultifield() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); Map mapperParsers = new HashMap<>(); @@ -167,7 +169,8 @@ public class ExternalFieldMapperTests extends ESSingleNodeTestCase { } public void testExternalValuesWithMultifieldTwoLevels() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); IndexService indexService = createIndex("test", settings); Map mapperParsers = new HashMap<>(); diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java index 596746938a2..26637734494 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilderTests.java @@ -148,32 +148,16 @@ public class GeoBoundingBoxQueryBuilderTests extends AbstractQueryTestCase parseQuery(json2)); } + public void testExceptionUsingAnalyzerOnNumericField() { + assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); + QueryShardContext shardContext = createShardContext(); + MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder(DOUBLE_FIELD_NAME, 6.075210893508043E-4); + matchQueryBuilder.analyzer("simple"); + NumberFormatException e = expectThrows(NumberFormatException.class, () -> matchQueryBuilder.toQuery(shardContext)); + assertEquals("For input string: \"e\"", e.getMessage()); + } + @Override protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { mapperService.merge("t_boost", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("t_boost", diff --git a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 63fa1eea017..990137184fe 100644 --- a/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -85,13 +85,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase 0); + QueryShardContext shardContext = createShardContext(); + MultiMatchQueryBuilder multiMatchQueryBuilder = new MultiMatchQueryBuilder(6.075210893508043E-4); + multiMatchQueryBuilder.field(DOUBLE_FIELD_NAME); + multiMatchQueryBuilder.analyzer("simple"); + NumberFormatException e = expectThrows(NumberFormatException.class, () -> multiMatchQueryBuilder.toQuery(shardContext)); + assertEquals("For input string: \"e\"", e.getMessage()); + } + public void testFuzzinessOnNonStringField() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); MultiMatchQueryBuilder query = new MultiMatchQueryBuilder(42).field(INT_FIELD_NAME).field(BOOLEAN_FIELD_NAME); diff --git a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index 1431391d798..1d1af2b2fc5 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -410,7 +410,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase public void onFailure(Exception e) { listener.onFailure(e); } - }, true, new ReplicasRef(), () -> null, logger, opType) { + }, new ReplicasRef(), () -> null, logger, opType) { + @Override protected List getShards(ShardId shardId, ClusterState state) { return replicationGroup.shardRoutings(); diff --git a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 97e224f04a4..12f749e6819 100644 --- a/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -243,11 +243,6 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC }, null, config); } - - @Override - public Engine newReadOnlyEngine(EngineConfig config) { - throw new UnsupportedOperationException(); - } }; } else { return null; diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index b106a308098..b328e86e58d 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -284,7 +284,7 @@ public class IndexShardTests extends IndexShardTestCase { true, ShardRoutingState.INITIALIZING, AllocationId.newRelocation(AllocationId.newInitializing()))); } else if (randomBoolean()) { // simulate promotion - indexShard = newShard(shardId, false); + indexShard = newStartedShard(false); ShardRouting replicaRouting = indexShard.routingEntry(); indexShard.updatePrimaryTerm(indexShard.getPrimaryTerm() + 1); ShardRouting primaryRouting = TestShardRouting.newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null, diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index 85bd4b6166c..fda2f8ef7d0 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -111,15 +111,10 @@ public class ShardPathTests extends ESTestCase { final Path customPath; if (useCustomDataPath) { final Path path = createTempDir(); - final boolean includeNodeId = randomBoolean(); indexSettings = indexSettingsBuilder.put(IndexMetaData.SETTING_DATA_PATH, "custom").build(); nodeSettings = Settings.builder().put(Environment.PATH_SHARED_DATA_SETTING.getKey(), path.toAbsolutePath().toAbsolutePath()) - .put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), includeNodeId).build(); - if (includeNodeId) { - customPath = path.resolve("custom").resolve("0"); - } else { - customPath = path.resolve("custom"); - } + .build(); + customPath = path.resolve("custom").resolve("0"); } else { customPath = null; indexSettings = indexSettingsBuilder.build(); diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java index 57d025128d8..0d730eff9f5 100644 --- a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityServiceTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; @@ -51,20 +49,9 @@ public class SimilarityServiceTests extends ESTestCase { } } - // Pre v3 indices could override built-in similarities - public void testOverrideBuiltInSimilarityPreV3() { - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) - .put("index.similarity.BM25.type", "classic") - .build(); - IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); - SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); - assertTrue(service.getSimilarity("BM25") instanceof ClassicSimilarityProvider); - } - - // Tests #16594 public void testOverrideDefaultSimilarity() { - Settings settings = Settings.builder().put("index.similarity.default.type", "classic").build(); + Settings settings = Settings.builder().put("index.similarity.default.type", "classic") + .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); SimilarityService service = new SimilarityService(indexSettings, Collections.emptyMap()); assertTrue(service.getDefaultSimilarity() instanceof ClassicSimilarity); diff --git a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java index e9183876aec..3e7f4650c3e 100644 --- a/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java +++ b/core/src/test/java/org/elasticsearch/index/similarity/SimilarityTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.similarity; -import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; -import org.apache.lucene.search.similarities.DFISimilarity; import org.apache.lucene.search.similarities.AfterEffectL; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.BasicModelG; +import org.apache.lucene.search.similarities.BooleanSimilarity; +import org.apache.lucene.search.similarities.ClassicSimilarity; +import org.apache.lucene.search.similarities.DFISimilarity; import org.apache.lucene.search.similarities.DFRSimilarity; import org.apache.lucene.search.similarities.DistributionSPL; import org.apache.lucene.search.similarities.IBSimilarity; @@ -34,19 +34,15 @@ import org.apache.lucene.search.similarities.LMJelinekMercerSimilarity; import org.apache.lucene.search.similarities.LambdaTTF; import org.apache.lucene.search.similarities.NormalizationH2; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import java.io.IOException; import java.util.Collection; @@ -248,32 +244,4 @@ public class SimilarityTests extends ESSingleNodeTestCase { assertThat(e.getMessage(), equalTo("Unknown Similarity type [unknown_similarity] for field [field1]")); } } - - public void testSimilarityDefaultBackCompat() throws IOException { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties") - .startObject("field1") - .field("similarity", "default") - .field("type", "text") - .endObject() - .endObject() - .endObject() - .endObject().string(); - Settings settings = Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_2_0)) - .build(); - - DocumentMapperParser parser = createIndex("test_v2.x", settings).mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse("type", new CompressedXContent(mapping)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity(), instanceOf(ClassicSimilarityProvider.class)); - assertThat(documentMapper.mappers().getMapper("field1").fieldType().similarity().name(), equalTo("classic")); - - parser = createIndex("test_v3.x").mapperService().documentMapperParser(); - try { - parser.parse("type", new CompressedXContent(mapping)); - fail("Expected MappingParsingException"); - } catch (MapperParsingException e) { - assertThat(e.getMessage(), equalTo("Unknown Similarity type [default] for field [field1]")); - } - } } diff --git a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java index 5f1578488cf..8a63c237e90 100644 --- a/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java +++ b/core/src/test/java/org/elasticsearch/index/snapshots/blobstore/FileInfoTests.java @@ -40,6 +40,9 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public class FileInfoTests extends ESTestCase { + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; + public void testToFromXContent() throws IOException { final int iters = scaledRandomIntBetween(1, 10); for (int iter = 0; iter < iters; iter++) { @@ -48,7 +51,8 @@ public class FileInfoTests extends ESTestCase { for (int i = 0; i < hash.length; i++) { hash.bytes[i] = randomByte(); } - StoreFileMetaData meta = new StoreFileMetaData("foobar", Math.abs(randomLong()), randomAlphaOfLengthBetween(1, 10), Version.LATEST, hash); + StoreFileMetaData meta = new StoreFileMetaData("foobar", Math.abs(randomLong()), randomAlphaOfLengthBetween(1, 10), + Version.LATEST, hash); ByteSizeValue size = new ByteSizeValue(Math.abs(randomLong())); BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("_foobar", meta, size); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); @@ -140,14 +144,16 @@ public class FileInfoTests extends ESTestCase { } public void testGetPartSize() { - BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666"), new ByteSizeValue(6)); + BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666", + MIN_SUPPORTED_LUCENE_VERSION), new ByteSizeValue(6)); int numBytes = 0; for (int i = 0; i < info.numberOfParts(); i++) { numBytes += info.partBytes(i); } assertEquals(numBytes, 36); - info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666"), new ByteSizeValue(6)); + info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666", + MIN_SUPPORTED_LUCENE_VERSION), new ByteSizeValue(6)); numBytes = 0; for (int i = 0; i < info.numberOfParts(); i++) { numBytes += info.partBytes(i); @@ -155,7 +161,8 @@ public class FileInfoTests extends ESTestCase { assertEquals(numBytes, 35); final int numIters = randomIntBetween(10, 100); for (int j = 0; j < numIters; j++) { - StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666"); + StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666", + MIN_SUPPORTED_LUCENE_VERSION); info = new BlobStoreIndexShardSnapshot.FileInfo("foo", metaData, new ByteSizeValue(randomIntBetween(1, 1000))); numBytes = 0; for (int i = 0; i < info.numberOfParts(); i++) { diff --git a/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java b/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java index 3a56763cd59..0a72037b7d8 100644 --- a/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/FsDirectoryServiceTests.java @@ -39,33 +39,6 @@ import java.util.Arrays; public class FsDirectoryServiceTests extends ESTestCase { - public void testHasSleepWrapperOnSharedFS() throws IOException { - Settings build = randomBoolean() ? - Settings.builder().put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build() : - Settings.builder().put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).build();; - IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); - IndexStore store = new IndexStore(settings); - Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); - Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); - FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path); - Directory directory = fsDirectoryService.newDirectory(); - assertTrue(directory.getClass().toString(), directory instanceof SleepingLockWrapper); - } - - public void testHasNoSleepWrapperOnNormalFS() throws IOException { - Settings build = Settings.builder().put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "simplefs").build(); - IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build); - IndexStore store = new IndexStore(settings); - Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0"); - Files.createDirectories(tempDir); - ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0)); - FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path); - Directory directory = fsDirectoryService.newDirectory(); - assertFalse(directory instanceof SleepingLockWrapper); - assertTrue(directory instanceof SimpleFSDirectory); - } - public void testPreload() throws IOException { doTestPreload(); doTestPreload("nvd", "dvd", "tim"); diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java index 156c2132358..dfc24d73c97 100644 --- a/core/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/core/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -98,7 +98,10 @@ import static org.hamcrest.Matchers.notNullValue; public class StoreTests extends ESTestCase { - private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); + private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", + Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); + private static final Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; public void testRefCount() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); @@ -169,7 +172,8 @@ public class StoreTests extends ESTestCase { indexInput.seek(0); BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024)); long length = indexInput.length(); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum), dir.createOutput("foo1.bar", IOContext.DEFAULT)); + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum, + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); while (length > 0) { if (random().nextInt(10) == 0) { verifyingOutput.writeByte(indexInput.readByte()); @@ -200,7 +204,8 @@ public class StoreTests extends ESTestCase { public void testVerifyingIndexOutputOnEmptyFile() throws IOException { Directory dir = newDirectory(); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0)), + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0), + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); try { Store.verify(verifyingOutput); @@ -229,7 +234,8 @@ public class StoreTests extends ESTestCase { indexInput.seek(0); BytesRef ref = new BytesRef(scaledRandomIntBetween(1, 1024)); long length = indexInput.length(); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum), dir.createOutput("foo1.bar", IOContext.DEFAULT)); + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, checksum, + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); length -= 8; // we write the checksum in the try / catch block below while (length > 0) { if (random().nextInt(10) == 0) { @@ -283,7 +289,8 @@ public class StoreTests extends ESTestCase { public void testVerifyingIndexOutputWithBogusInput() throws IOException { Directory dir = newDirectory(); int length = scaledRandomIntBetween(10, 1024); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, ""), dir.createOutput("foo1.bar", IOContext.DEFAULT)); + IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo1.bar", length, "", + MIN_SUPPORTED_LUCENE_VERSION), dir.createOutput("foo1.bar", IOContext.DEFAULT)); try { while (length > 0) { verifyingOutput.writeByte((byte) random().nextInt()); @@ -831,8 +838,8 @@ public class StoreTests extends ESTestCase { } protected Store.MetadataSnapshot createMetaDataSnapshot() { - StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666"); - StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666"); + StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); + StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); Map storeFileMetaDataMap = new HashMap<>(); storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1); storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2); diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java index b94e864fdd6..e87dc24c8f8 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesServiceTests.java @@ -106,23 +106,6 @@ public class IndicesServiceTests extends ESSingleNodeTestCase { return true; } - public void testCanDeleteIndexContent() throws IOException { - final IndicesService indicesService = getIndicesService(); - IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_DATA_PATH, "/foo/bar") - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 4)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 3)) - .build()); - assertFalse("shard on shared filesystem", indicesService.canDeleteIndexContents(idxSettings.getIndex(), idxSettings)); - - final IndexMetaData.Builder newIndexMetaData = IndexMetaData.builder(idxSettings.getIndexMetaData()); - newIndexMetaData.state(IndexMetaData.State.CLOSE); - idxSettings = IndexSettingsModule.newIndexSettings(newIndexMetaData.build()); - assertTrue("shard on shared filesystem, but closed, so it should be deletable", - indicesService.canDeleteIndexContents(idxSettings.getIndex(), idxSettings)); - } - public void testCanDeleteShardContent() { IndicesService indicesService = getIndicesService(); IndexMetaData meta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas( diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 61d325a64e8..960d135371c 100644 --- a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -23,10 +23,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ar.ArabicNormalizationFilter; -import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.apache.lucene.analysis.de.GermanAnalyzer; -import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.fa.PersianNormalizationFilter; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter; @@ -123,83 +120,6 @@ public class AnalysisModuleTests extends ModuleTestCase { assertTokenFilter("arabic_normalization", ArabicNormalizationFilter.class); } - public void testAnalyzerAlias() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "keyword") - .put("index.analysis.analyzer.foobar_search.alias","default_search") - .put("index.analysis.analyzer.foobar_search.type","english") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings); - assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class))); - assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(EnglishAnalyzer.class))); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices.", - "setting [index.analysis.analyzer.foobar_search.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testAnalyzerAliasReferencesAlias() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "german") - .put("index.analysis.analyzer.foobar_search.alias","default_search") - .put("index.analysis.analyzer.foobar_search.type", "default") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings); - - assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(GermanAnalyzer.class))); - // analyzer types are bound early before we resolve aliases - assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices.", - "setting [index.analysis.analyzer.foobar_search.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testAnalyzerAliasDefault() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "keyword") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings); - assertThat(indexAnalyzers.get("default").analyzer(), is(instanceOf(KeywordAnalyzer.class))); - assertThat(indexAnalyzers.get("default_search").analyzer(), is(instanceOf(KeywordAnalyzer.class))); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testAnalyzerAliasMoreThanOnce() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.foobar.alias","default") - .put("index.analysis.analyzer.foobar.type", "keyword") - .put("index.analysis.analyzer.foobar1.alias","default") - .put("index.analysis.analyzer.foobar1.type", "english") - // analyzer aliases are only allowed in 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .build(); - AnalysisRegistry newRegistry = getNewRegistry(settings); - IllegalStateException ise = expectThrows(IllegalStateException.class, () -> getIndexAnalyzers(newRegistry, settings)); - assertEquals("alias [default] is already used by [foobar]", ise.getMessage()); - assertWarnings("setting [index.analysis.analyzer.foobar.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices.", - "setting [index.analysis.analyzer.foobar1.alias] is only allowed on index [test] because it was created before " + - "5.x; analyzer aliases can no longer be created on new indices."); - } - public void testAnalyzerAliasNotAllowedPost5x() throws IOException { Settings settings = Settings.builder() .put("index.analysis.analyzer.foobar.type", "standard") @@ -218,7 +138,7 @@ public class AnalysisModuleTests extends ModuleTestCase { Settings settings2 = Settings.builder() .loadFromStream(yaml, getClass().getResourceAsStream(yaml)) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0) .build(); AnalysisRegistry newRegistry = getNewRegistry(settings2); IndexAnalyzers indexAnalyzers = getIndexAnalyzers(newRegistry, settings2); @@ -231,8 +151,10 @@ public class AnalysisModuleTests extends ModuleTestCase { // analysis service has the expected version assertThat(indexAnalyzers.get("standard").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("standard").analyzer().getVersion()); - assertEquals(Version.V_2_0_0.luceneVersion, indexAnalyzers.get("thai").analyzer().getVersion()); + assertEquals(Version.V_5_0_0.luceneVersion, + indexAnalyzers.get("standard").analyzer().getVersion()); + assertEquals(Version.V_5_0_0.luceneVersion, + indexAnalyzers.get("thai").analyzer().getVersion()); assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), indexAnalyzers.get("custom7").analyzer().getVersion()); @@ -355,40 +277,6 @@ public class AnalysisModuleTests extends ModuleTestCase { } } - public void testUnderscoreInAnalyzerNameAlias() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.valid_name.tokenizer", "keyword") - .put("index.analysis.analyzer.valid_name.alias", "_invalid_name") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - // analyzer aliases are only allowed for 2.x indices - .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_3_5)) - .build(); - try { - getIndexAnalyzers(settings); - fail("This should fail with IllegalArgumentException because the analyzers alias starts with _"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("analyzer name must not start with '_'. got \"_invalid_name\"")); - } - assertWarnings("setting [index.analysis.analyzer.valid_name.alias] is only allowed on index [test] because it was " + - "created before 5.x; analyzer aliases can no longer be created on new indices."); - } - - public void testDeprecatedPositionOffsetGap() throws IOException { - Settings settings = Settings.builder() - .put("index.analysis.analyzer.custom.tokenizer", "standard") - .put("index.analysis.analyzer.custom.position_offset_gap", "128") - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .build(); - try { - getIndexAnalyzers(settings); - fail("Analyzer should fail if it has position_offset_gap"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Option [position_offset_gap] in Custom Analyzer [custom] " + - "has been renamed, please use [position_increment_gap] instead.")); - } - } - public void testRegisterHunspellDictionary() throws Exception { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index d3158c620d1..cf22c95ac69 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -41,9 +41,12 @@ import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.action.support.master.TransportMasterNodeActionUtils; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction.ShardEntry; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -217,23 +220,29 @@ public class ClusterStateChanges extends AbstractComponent { } public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) { - List entries = failedShards.stream().map(failedShard -> - new ShardStateAction.ShardEntry(failedShard.getRoutingEntry().shardId(), failedShard.getRoutingEntry().allocationId().getId(), + List entries = failedShards.stream().map(failedShard -> + new ShardEntry(failedShard.getRoutingEntry().shardId(), failedShard.getRoutingEntry().allocationId().getId(), 0L, failedShard.getMessage(), failedShard.getFailure())) .collect(Collectors.toList()); - try { - return shardFailedClusterStateTaskExecutor.execute(clusterState, entries).resultingState; - } catch (Exception e) { - throw ExceptionsHelper.convertToRuntime(e); - } + return runTasks(shardFailedClusterStateTaskExecutor, clusterState, entries); } public ClusterState applyStartedShards(ClusterState clusterState, List startedShards) { - List entries = startedShards.stream().map(startedShard -> - new ShardStateAction.ShardEntry(startedShard.shardId(), startedShard.allocationId().getId(), 0L, "shard started", null)) + List entries = startedShards.stream().map(startedShard -> + new ShardEntry(startedShard.shardId(), startedShard.allocationId().getId(), 0L, "shard started", null)) .collect(Collectors.toList()); + return runTasks(shardStartedClusterStateTaskExecutor, clusterState, entries); + } + + private ClusterState runTasks(ClusterStateTaskExecutor executor, ClusterState clusterState, List entries) { try { - return shardStartedClusterStateTaskExecutor.execute(clusterState, entries).resultingState; + ClusterTasksResult result = executor.execute(clusterState, entries); + for (ClusterStateTaskExecutor.TaskResult taskResult : result.executionResults.values()) { + if (taskResult.isSuccess() == false) { + throw taskResult.getFailure(); + } + } + return result.resultingState; } catch (Exception e) { throw ExceptionsHelper.convertToRuntime(e); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 064e9d78b51..55e47dabdbf 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -91,15 +91,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice terminate(threadPool); } - /** - * needed due to random usage of {@link IndexMetaData#INDEX_SHADOW_REPLICAS_SETTING}. removed once - * shadow replicas are removed. - */ - @Override - protected boolean enableWarningsCheck() { - return false; - } - public void testRandomClusterStateUpdates() { // we have an IndicesClusterStateService per node in the cluster final Map clusterStateServiceMap = new HashMap<>(); @@ -249,10 +240,6 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice Settings.Builder settingsBuilder = Settings.builder() .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3)) .put(SETTING_NUMBER_OF_REPLICAS, randomInt(2)); - if (randomBoolean()) { - settingsBuilder.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) - .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true); - } CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE); state = cluster.createIndex(state, request); assertTrue(state.metaData().hasIndex(name)); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java index 2c00c59c343..0d1a5928fb8 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStatusTests.java @@ -36,7 +36,8 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; public class RecoveryStatusTests extends ESSingleNodeTestCase { - + private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT + .minimumIndexCompatibilityVersion().luceneVersion; public void testRenameTempFiles() throws IOException { IndexService service = createIndex("foo"); @@ -51,7 +52,8 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase { public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { } }, version -> {}); - try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store())) { + try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength() + , "9z51nw", MIN_SUPPORTED_LUCENE_VERSION), status.store())) { indexOutput.writeInt(1); IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar"); assertSame(openIndexOutput, indexOutput); @@ -60,7 +62,8 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase { } try { - status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store()); + status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw", + MIN_SUPPORTED_LUCENE_VERSION), status.store()); fail("file foo.bar is already opened and registered"); } catch (IllegalStateException ex) { assertEquals("output for file [foo.bar] has already been created", ex.getMessage()); diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java index 04afdd58391..d8d1fa6ccab 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginInfoTests.java @@ -182,12 +182,12 @@ public class PluginInfoTests extends ESTestCase { "description", "fake desc", "name", "my_plugin", "version", "1.0", - "elasticsearch.version", Version.V_2_0_0.toString()); + "elasticsearch.version", Version.V_5_0_0.toString()); try { PluginInfo.readFromProperties(pluginDir); fail("expected old elasticsearch version exception"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("was designed for version [2.0.0]")); + assertTrue(e.getMessage().contains("was designed for version [5.0.0]")); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java index 653e8d0a20f..d8aab691d2a 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoDistanceIT.java @@ -65,7 +65,8 @@ public class GeoDistanceIT extends ESIntegTestCase { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); private IndexRequestBuilder indexCity(String idx, String name, String... latLons) throws Exception { XContentBuilder source = jsonBuilder().startObject().field("city", name); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java index 7f158e0732f..fc080dd0f04 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridIT.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations.bucket; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; import com.carrotsearch.hppc.cursors.ObjectIntCursor; + import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -64,7 +65,8 @@ public class GeoHashGridIT extends ESIntegTestCase { return Arrays.asList(InternalSettingsPlugin.class); // uses index.version.created } - private Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + private Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); static ObjectIntMap expectedDocCountsForGeoHash = null; static ObjectIntMap multiValuedExpectedDocCountsForGeoHash = null; diff --git a/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java b/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java deleted file mode 100644 index c4c180ab858..00000000000 --- a/core/src/test/java/org/elasticsearch/search/fetch/subphase/NestedChildrenFilterTests.java +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.search.fetch.subphase; - -import org.apache.lucene.document.Document; -import org.apache.lucene.document.Field; -import org.apache.lucene.document.LegacyIntField; -import org.apache.lucene.document.StringField; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TotalHitCountCollector; -import org.apache.lucene.search.Weight; -import org.apache.lucene.search.join.BitSetProducer; -import org.apache.lucene.search.join.QueryBitSetProducer; -import org.apache.lucene.store.Directory; -import org.elasticsearch.search.fetch.FetchSubPhase; -import org.elasticsearch.search.fetch.subphase.InnerHitsContext.NestedInnerHits.NestedChildrenQuery; -import org.elasticsearch.test.ESTestCase; - -import java.util.ArrayList; -import java.util.List; - -import static org.hamcrest.Matchers.equalTo; - -public class NestedChildrenFilterTests extends ESTestCase { - public void testNestedChildrenFilter() throws Exception { - int numParentDocs = scaledRandomIntBetween(0, 32); - int maxChildDocsPerParent = scaledRandomIntBetween(8, 16); - - Directory dir = newDirectory(); - RandomIndexWriter writer = new RandomIndexWriter(random(), dir); - for (int i = 0; i < numParentDocs; i++) { - int numChildDocs = scaledRandomIntBetween(0, maxChildDocsPerParent); - List docs = new ArrayList<>(numChildDocs + 1); - for (int j = 0; j < numChildDocs; j++) { - Document childDoc = new Document(); - childDoc.add(new StringField("type", "child", Field.Store.NO)); - docs.add(childDoc); - } - - Document parenDoc = new Document(); - parenDoc.add(new StringField("type", "parent", Field.Store.NO)); - parenDoc.add(new LegacyIntField("num_child_docs", numChildDocs, Field.Store.YES)); - docs.add(parenDoc); - writer.addDocuments(docs); - } - - IndexReader reader = writer.getReader(); - writer.close(); - - IndexSearcher searcher = new IndexSearcher(reader); - FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("type", "parent"))); - Query childFilter = new TermQuery(new Term("type", "child")); - int checkedParents = 0; - final Weight parentsWeight = searcher.createNormalizedWeight(new TermQuery(new Term("type", "parent")), false); - for (LeafReaderContext leaf : reader.leaves()) { - DocIdSetIterator parents = parentsWeight.scorer(leaf).iterator(); - for (int parentDoc = parents.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS ; parentDoc = parents.nextDoc()) { - int expectedChildDocs = leaf.reader().document(parentDoc).getField("num_child_docs").numericValue().intValue(); - hitContext.reset(null, leaf, parentDoc, searcher); - NestedChildrenQuery nestedChildrenFilter = new NestedChildrenQuery(parentFilter, childFilter, hitContext); - TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); - searcher.search(new ConstantScoreQuery(nestedChildrenFilter), totalHitCountCollector); - assertThat(totalHitCountCollector.getTotalHits(), equalTo(expectedChildDocs)); - checkedParents++; - } - } - assertThat(checkedParents, equalTo(numParentDocs)); - reader.close(); - dir.close(); - } - -} diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java index 027781a9b2b..12a64d80a14 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoBoundingBoxIT.java @@ -51,7 +51,8 @@ public class GeoBoundingBoxIT extends ESIntegTestCase { } public void testSimpleBoundingBoxTest() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); @@ -122,7 +123,8 @@ public class GeoBoundingBoxIT extends ESIntegTestCase { } public void testLimit2BoundingBox() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); @@ -174,7 +176,8 @@ public class GeoBoundingBoxIT extends ESIntegTestCase { } public void testCompleteLonRange() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java index 3594f51c722..fdaf3126433 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoDistanceIT.java @@ -101,7 +101,8 @@ public class GeoDistanceIT extends ESIntegTestCase { @Before public void setupTestIndex() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java index 5e09b36f6b5..a44c165e738 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoFilterIT.java @@ -367,7 +367,8 @@ public class GeoFilterIT extends ESIntegTestCase { public void testBulk() throws Exception { byte[] bulkAction = unZipData("/org/elasticsearch/search/geo/gzippedmap.gz"); - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder() .startObject() diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java b/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java index e3678683a09..7906165b090 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoPolygonIT.java @@ -55,7 +55,8 @@ public class GeoPolygonIT extends ESIntegTestCase { @Override protected void setupSuiteScopeCluster() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("location").field("type", "geo_point"); diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java index a1cc9b4d4dc..6eff821c5c3 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceIT.java @@ -58,7 +58,8 @@ public class GeoDistanceIT extends ESIntegTestCase { } public void testDistanceSortingMVFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("locations").field("type", "geo_point"); @@ -187,7 +188,8 @@ public class GeoDistanceIT extends ESIntegTestCase { // Regression bug: // https://github.com/elastic/elasticsearch/issues/2851 public void testDistanceSortingWithMissingGeoPoint() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("locations").field("type", "geo_point"); @@ -231,7 +233,8 @@ public class GeoDistanceIT extends ESIntegTestCase { } public void testDistanceSortingNestedFields() throws Exception { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().startObject().startObject("company").startObject("properties") .startObject("name").field("type", "text").endObject().startObject("branches").field("type", "nested") @@ -379,7 +382,8 @@ public class GeoDistanceIT extends ESIntegTestCase { * Issue 3073 */ public void testGeoDistanceFilter() throws IOException { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); double lat = 40.720611; double lon = -73.998776; diff --git a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java index 6529b990255..200043a6668 100644 --- a/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java +++ b/core/src/test/java/org/elasticsearch/search/sort/GeoDistanceSortBuilderIT.java @@ -69,7 +69,8 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { * |___________________________ * 1 2 3 4 5 6 7 */ - Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -134,7 +135,8 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { * d1 = (0, 1), (0, 4), (0, 10); so avg. distance is 5, median distance is 4 * d2 = (0, 1), (0, 5), (0, 6); so avg. distance is 4, median distance is 5 */ - Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); @@ -194,7 +196,8 @@ public class GeoDistanceSortBuilderIT extends ESIntegTestCase { * |______________________ * 1 2 3 4 5 6 */ - Version version = randomBoolean() ? Version.CURRENT : VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); + Version version = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build(); assertAcked(prepareCreate("index").setSettings(settings).addMapping("type", LOCATION_FIELD, "type=geo_point")); XContentBuilder d1Builder = jsonBuilder(); diff --git a/distribution/bwc-zip/build.gradle b/distribution/bwc-zip/build.gradle index ae7d366eab9..7bb5cce51b6 100644 --- a/distribution/bwc-zip/build.gradle +++ b/distribution/bwc-zip/build.gradle @@ -65,6 +65,7 @@ task addUpstream(type: LoggedExec) { } task fetchLatest(type: LoggedExec) { + onlyIf { project.gradle.startParameter.isOffline() == false } dependsOn addUpstream workingDir = checkoutDir commandLine = ['git', 'fetch', 'upstream'] diff --git a/docs/reference/aggregations.asciidoc b/docs/reference/aggregations.asciidoc index 586e582a05f..f2fdd9a16de 100644 --- a/docs/reference/aggregations.asciidoc +++ b/docs/reference/aggregations.asciidoc @@ -11,7 +11,7 @@ the execution defines what this document set is (e.g. a top-level aggregation ex query/filters of the search request). There are many different types of aggregations, each with its own purpose and output. To better understand these types, -it is often easier to break them into three main families: +it is often easier to break them into four main families: <>:: A family of aggregations that build buckets, where each bucket is associated with a _key_ and a document diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc new file mode 100644 index 00000000000..304e7c8de0b --- /dev/null +++ b/docs/reference/cluster/remote-info.asciidoc @@ -0,0 +1,35 @@ +[[cluster-remote-info]] +== Remote Cluster Info + +The cluster remote info API allows to retrieve all of the configured +remote cluster information. + +[source,js] +---------------------------------- +GET /_remote/info +---------------------------------- +// CONSOLE + +This command returns returns connection and endpoint information keyed by +the configured remote cluster alias. + +[float] +[[connection-info]] + +`seeds`:: + The configured initial seed transport addresses of the remote cluster. + +`http_addresses`:: + The published http addresses of all connected remote nodes. + +`connected`:: + True if there is at least one connection to the remote cluster. + +`num_nodes_connected`:: + The number of connected nodes in the remote cluster. + +`max_connection_per_cluster`:: + The maximum number of connections maintained for the remote cluster. + +`initial_connect_timeout`:: + The initial connect timeout for remote cluster connections. diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index afb5088d85e..873021c4206 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -40,11 +40,6 @@ index settings, aliases, mappings, and index templates. * <> * <> -[float] -[[shadow-replicas]] -== Replica configurations -* <> - [float] [[monitoring]] == Monitoring: @@ -95,8 +90,6 @@ include::indices/analyze.asciidoc[] include::indices/templates.asciidoc[] -include::indices/shadow-replicas.asciidoc[] - include::indices/stats.asciidoc[] include::indices/segments.asciidoc[] diff --git a/docs/reference/indices/shadow-replicas.asciidoc b/docs/reference/indices/shadow-replicas.asciidoc deleted file mode 100644 index dd255a0e644..00000000000 --- a/docs/reference/indices/shadow-replicas.asciidoc +++ /dev/null @@ -1,124 +0,0 @@ -[[indices-shadow-replicas]] -== Shadow replica indices - -deprecated[5.2.0, Shadow replicas don't see much usage and we are planning to remove them] - -If you would like to use a shared filesystem, you can use the shadow replicas -settings to choose where on disk the data for an index should be kept, as well -as how Elasticsearch should replay operations on all the replica shards of an -index. - -In order to fully utilize the `index.data_path` and `index.shadow_replicas` -settings, you need to allow Elasticsearch to use the same data directory for -multiple instances by setting `node.add_lock_id_to_custom_path` to false in -elasticsearch.yml: - -[source,yaml] --------------------------------------------------- -node.add_lock_id_to_custom_path: false --------------------------------------------------- - -You will also need to indicate to the security manager where the custom indices -will be, so that the correct permissions can be applied. You can do this by -setting the `path.shared_data` setting in elasticsearch.yml: - -[source,yaml] --------------------------------------------------- -path.shared_data: /opt/data --------------------------------------------------- - -This means that Elasticsearch can read and write to files in any subdirectory of -the `path.shared_data` setting. - -You can then create an index with a custom data path, where each node will use -this path for the data: - -[WARNING] -======================== -Because shadow replicas do not index the document on replica shards, it's -possible for the replica's known mapping to be behind the index's known mapping -if the latest cluster state has not yet been processed on the node containing -the replica. Because of this, it is highly recommended to use pre-defined -mappings when using shadow replicas. -======================== - -[source,js] --------------------------------------------------- -PUT /my_index -{ - "index" : { - "number_of_shards" : 1, - "number_of_replicas" : 4, - "data_path": "/opt/data/my_index", - "shadow_replicas": true - } -} --------------------------------------------------- -// CONSOLE -// TEST[skip:no way to configure path.shared_data for /opt/data] - -[WARNING] -======================== -In the above example, the "/opt/data/my_index" path is a shared filesystem that -must be available on every node in the Elasticsearch cluster. You must also -ensure that the Elasticsearch process has the correct permissions to read from -and write to the directory used in the `index.data_path` setting. -======================== - -The `data_path` does not have to contain the index name, in this case, -"my_index" was used but it could easily also have been "/opt/data/" - -An index that has been created with the `index.shadow_replicas` setting set to -"true" will not replicate document operations to any of the replica shards, -instead, it will only continually refresh. Once segments are available on the -filesystem where the shadow replica resides (after an Elasticsearch "flush"), a -regular refresh (governed by the `index.refresh_interval`) can be used to make -the new data searchable. - -NOTE: Since documents are only indexed on the primary shard, realtime GET -requests could fail to return a document if executed on the replica shard, -therefore, GET API requests automatically have the `?preference=_primary` flag -set if there is no preference flag already set. - -In order to ensure the data is being synchronized in a fast enough manner, you -may need to tune the flush threshold for the index to a desired number. A flush -is needed to fsync segment files to disk, so they will be visible to all other -replica nodes. Users should test what flush threshold levels they are -comfortable with, as increased flushing can impact indexing performance. - -The Elasticsearch cluster will still detect the loss of a primary shard, and -transform the replica into a primary in this situation. This transformation will -take slightly longer, since no `IndexWriter` is maintained for each shadow -replica. - -Below is the list of settings that can be changed using the update -settings API: - -`index.data_path` (string):: - Path to use for the index's data. Note that by default Elasticsearch will - append the node ordinal by default to the path to ensure multiple instances - of Elasticsearch on the same machine do not share a data directory. - -`index.shadow_replicas`:: - Boolean value indicating this index should use shadow replicas. Defaults to - `false`. - -`index.shared_filesystem`:: - Boolean value indicating this index uses a shared filesystem. Defaults to - the `true` if `index.shadow_replicas` is set to true, `false` otherwise. - -`index.shared_filesystem.recover_on_any_node`:: - Boolean value indicating whether the primary shards for the index should be - allowed to recover on any node in the cluster. If a node holding a copy of - the shard is found, recovery prefers that node. Defaults to `false`. - -=== Node level settings related to shadow replicas - -These are non-dynamic settings that need to be configured in `elasticsearch.yml` - -`node.add_lock_id_to_custom_path`:: - Boolean setting indicating whether Elasticsearch should append the node's - ordinal to the custom data path. For example, if this is enabled and a path - of "/tmp/foo" is used, the first locally-running node will use "/tmp/foo/0", - the second will use "/tmp/foo/1", the third "/tmp/foo/2", etc. Defaults to - `true`. diff --git a/docs/reference/migration/migrate_6_0/indices.asciidoc b/docs/reference/migration/migrate_6_0/indices.asciidoc index 7062ac7cb1e..2e198be59cb 100644 --- a/docs/reference/migration/migrate_6_0/indices.asciidoc +++ b/docs/reference/migration/migrate_6_0/indices.asciidoc @@ -29,6 +29,11 @@ PUT _template/template_2 // CONSOLE -=== Shadow Replicas are deprecated +=== Shadow Replicas have been removed -<> don't see much usage and we are planning to remove them. +Shadow replicas don't see enough usage, and have been removed. This includes the +following settings: + +- `index.shared_filesystem` +- `index.shadow_replicas` +- `node.add_lock_id_to_custom_path` diff --git a/docs/reference/migration/migrate_6_0/settings.asciidoc b/docs/reference/migration/migrate_6_0/settings.asciidoc index ec25ffb601c..44acb999394 100644 --- a/docs/reference/migration/migrate_6_0/settings.asciidoc +++ b/docs/reference/migration/migrate_6_0/settings.asciidoc @@ -1,6 +1,32 @@ [[breaking_60_settings_changes]] === Settings changes +==== Duplicate keys in configuration file + +In previous versions of Elasticsearch, the configuration file was allowed to +contain duplicate keys. For example: + +[source,yaml] +-------------------------------------------------- +node: + name: my-node + +node + attr: + rack: my-rack +-------------------------------------------------- + +In Elasticsearch 6.0.0, this is no longer permitted. Instead, this must be +specified in a single key as: + +[source,yaml] +-------------------------------------------------- +node: + name: my-node + attr: + rack: my-rack +-------------------------------------------------- + ==== Coercion of boolean settings Previously, Elasticsearch recognized the strings `true`, `false`, `on`, `off`, `yes`, `no`, `0`, `1` as booleans. Elasticsearch 6.0 diff --git a/docs/reference/modules/scripting/security.asciidoc b/docs/reference/modules/scripting/security.asciidoc index be1806175c1..eaeb1c62143 100644 --- a/docs/reference/modules/scripting/security.asciidoc +++ b/docs/reference/modules/scripting/security.asciidoc @@ -51,12 +51,11 @@ Bad: [[modules-scripting-security-do-no-weaken]] === Do not weaken script security settings By default Elasticsearch will run inline, stored, and filesystem scripts for -sandboxed languages, namely the scripting language Painless, the template +the builtin languages, namely the scripting language Painless, the template language Mustache, and the expression language Expressions. These *ought* to be safe to expose to trusted users and to your application servers because they -have strong security sandboxes. By default Elasticsearch will only run -filesystem scripts for non-sandboxed languages and enabling them is a poor -choice because: +have strong security sandboxes. The Elasticsearch committers do not support any +non-sandboxed scripting languages and using any would be a poor choice because: 1. This drops a layer of security, leaving only Elasticsearch's builtin <>. 2. Non-sandboxed scripts have unchecked access to Elasticsearch's internals and @@ -130,8 +129,8 @@ in the following form: `${pluginName}_${operation}`. The following example disables scripting for `update` and `plugin` operations, regardless of the script source or language. Scripts can still be executed -from sandboxed languages as part of `aggregations`, `search` and plugins -execution though, as the above defaults still get applied. +as part of `aggregations`, `search` and plugins execution though, as the above +defaults still get applied. [source,yaml] ----------------------------------- diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index eccba57dee1..41ba6e5c87a 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -60,9 +60,10 @@ GET /_search?q=tag:wow // CONSOLE // TEST[setup:twitter] -By default elasticsearch rejects search requests that would query more than -1000 shards. The reason is that such large numbers of shards make the job of -the coordinating node very CPU and memory intensive. It is usually a better -idea to organize data in such a way that there are fewer larger shards. In -case you would like to bypass this limit, which is discouraged, you can update -the `action.search.shard_count.limit` cluster setting to a greater value. +By default elasticsearch doesn't reject any search requests based on the number +of shards the request hits. While elasticsearch will optimize the search execution +on the coordinating node a large number of shards can have a significant impact +CPU and memory wise. It is usually a better idea to organize data in such a way +that there are fewer larger shards. In case you would like to configure a soft +limit, you can update the `action.search.shard_count.limit` cluster setting in order +to reject search requests that hit too many shards. diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java index 2722388f028..16995f60dff 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/EqualsTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.painless; * under the License. */ +import org.apache.lucene.util.Constants; + // TODO: Figure out a way to test autobox caching properly from methods such as Integer.valueOf(int); public class EqualsTests extends ScriptTestCase { public void testTypesEquals() { @@ -130,10 +132,11 @@ public class EqualsTests extends ScriptTestCase { } public void testBranchEqualsDefAndPrimitive() { + assumeFalse("test fails on Windows", Constants.WINDOWS); assertEquals(true, exec("def x = 1000; int y = 1000; return x == y;")); - exec("def x = 1000; int y = 1000; return x === y;"); + assertEquals(false, exec("def x = 1000; int y = 1000; return x === y;")); assertEquals(true, exec("def x = 1000; int y = 1000; return y == x;")); - exec("def x = 1000; int y = 1000; return y === x;"); + assertEquals(false, exec("def x = 1000; int y = 1000; return y === x;")); } public void testBranchNotEquals() { @@ -147,10 +150,11 @@ public class EqualsTests extends ScriptTestCase { } public void testBranchNotEqualsDefAndPrimitive() { + assumeFalse("test fails on Windows", Constants.WINDOWS); assertEquals(false, exec("def x = 1000; int y = 1000; return x != y;")); - exec("def x = 1000; int y = 1000; return x !== y;"); + assertEquals(true, exec("def x = 1000; int y = 1000; return x !== y;")); assertEquals(false, exec("def x = 1000; int y = 1000; return y != x;")); - exec("def x = 1000; int y = 1000; return y !== x;"); + assertEquals(true, exec("def x = 1000; int y = 1000; return y !== x;")); } public void testRightHandNull() { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java index 2aa032154bb..d8f246b74ef 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorFieldMapperTests.java @@ -35,8 +35,6 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -73,7 +71,6 @@ import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.Before; import java.io.IOException; @@ -85,7 +82,6 @@ import java.util.List; import java.util.Map; import java.util.function.Function; -import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; @@ -485,7 +481,6 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { } public void testEmptyName() throws Exception { - // after 5.x String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties").startObject("").field("type", "percolator").endObject().endObject() .endObject().endObject().string(); @@ -495,14 +490,6 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase { () -> parser.parse("type1", new CompressedXContent(mapping)) ); assertThat(e.getMessage(), containsString("name cannot be empty string")); - - // before 5.x - Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5); - Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); - DocumentMapperParser parser2x = createIndex("test_old", oldIndexSettings).mapperService().documentMapperParser(); - - DocumentMapper defaultMapper = parser2x.parse("type1", new CompressedXContent(mapping)); - assertEquals(mapping, defaultMapper.mappingSource().string()); } public void testImplicitlySetDefaultScriptLang() throws Exception { diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java index 036de6f0e22..1cd1df230a4 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuilders.java @@ -69,7 +69,7 @@ final class RemoteRequestBuilders { if (searchRequest.source().sorts() != null) { boolean useScan = false; // Detect if we should use search_type=scan rather than a sort - if (remoteVersion.before(Version.V_2_1_0)) { + if (remoteVersion.before(Version.fromId(2010099))) { for (SortBuilder sort : searchRequest.source().sorts()) { if (sort instanceof FieldSortBuilder) { FieldSortBuilder f = (FieldSortBuilder) sort; @@ -90,7 +90,7 @@ final class RemoteRequestBuilders { params.put("sort", sorts.toString()); } } - if (remoteVersion.before(Version.V_2_0_0)) { + if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 need prompting to return interesting fields. Note that timestamp isn't available at all.... searchRequest.source().storedField("_parent").storedField("_routing").storedField("_ttl"); } @@ -172,7 +172,7 @@ final class RemoteRequestBuilders { } static HttpEntity scrollEntity(String scroll, Version remoteVersion) { - if (remoteVersion.before(Version.V_2_0_0)) { + if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body return new StringEntity(scroll, ContentType.TEXT_PLAIN); } @@ -186,7 +186,7 @@ final class RemoteRequestBuilders { } static HttpEntity clearScrollEntity(String scroll, Version remoteVersion) { - if (remoteVersion.before(Version.V_2_0_0)) { + if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body return new StringEntity(scroll, ContentType.TEXT_PLAIN); } diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java index 974fd9438d2..6b7b6ca3aa0 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -128,7 +128,8 @@ public class RemoteScrollableHitSource extends ScrollableHitSource { private void logFailure(Exception e) { if (e instanceof ResponseException) { ResponseException re = (ResponseException) e; - if (remoteVersion.before(Version.V_2_0_0) && re.getResponse().getStatusLine().getStatusCode() == 404) { + if (remoteVersion.before(Version.fromId(2000099)) + && re.getResponse().getStatusLine().getStatusCode() == 404) { logger.debug((Supplier) () -> new ParameterizedMessage( "Failed to clear scroll [{}] from pre-2.0 Elasticsearch. This is normal if the request terminated " + "normally as the scroll has already been cleared automatically.", scrollId), e); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 779c89f7ee8..8c082227f86 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -96,12 +96,12 @@ public class RemoteRequestBuildersTests extends ESTestCase { SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder()); // Test sort:_doc for versions that support it. - Version remoteVersion = Version.fromId(between(Version.V_2_1_0_ID, Version.CURRENT.id)); + Version remoteVersion = Version.fromId(between(2010099, Version.CURRENT.id)); searchRequest.source().sort("_doc"); assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sort", "_doc:asc")); // Test search_type scan for versions that don't support sort:_doc. - remoteVersion = Version.fromId(between(0, Version.V_2_1_0_ID - 1)); + remoteVersion = Version.fromId(between(0, 2010099 - 1)); assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("search_type", "scan")); // Test sorting by some field. Version doesn't matter. diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 2a67306425c..f63b05e96be 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -144,7 +144,7 @@ public class RemoteScrollableHitSourceTests extends ESTestCase { assertTrue(called.get()); called.set(false); sourceWithMockedRemoteCall(false, ContentType.APPLICATION_JSON, "main/2_3_3.json").lookupRemoteVersion(v -> { - assertEquals(Version.V_2_3_3, v); + assertEquals(Version.fromId(2030399), v); called.set(true); }); assertTrue(called.get()); diff --git a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java index e16a8f05203..6afefc5cf03 100644 --- a/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java +++ b/plugins/mapper-murmur3/src/test/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapperTests.java @@ -22,10 +22,7 @@ package org.elasticsearch.index.mapper.murmur3; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; @@ -39,7 +36,6 @@ import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.test.VersionUtils; import org.junit.Before; import java.util.Arrays; @@ -47,7 +43,6 @@ import java.util.Collection; import java.util.Collections; import java.util.function.Supplier; -import static com.carrotsearch.randomizedtesting.RandomizedTest.getRandom; import static org.hamcrest.Matchers.containsString; public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { @@ -157,20 +152,5 @@ public class Murmur3FieldMapperTests extends ESSingleNodeTestCase { () -> parser.parse("type", new CompressedXContent(mapping)) ); assertThat(e.getMessage(), containsString("name cannot be empty string")); - - // before 5.x - Version oldVersion = VersionUtils.randomVersionBetween(getRandom(), Version.V_2_0_0, Version.V_2_3_5); - Settings oldIndexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, oldVersion).build(); - IndexService indexService2x = createIndex("test_old", oldIndexSettings); - - Supplier queryShardContext = () -> { - return indexService2x.newQueryShardContext(0, null, () -> { throw new UnsupportedOperationException(); }); - }; - DocumentMapperParser parser = new DocumentMapperParser(indexService2x.getIndexSettings(), indexService2x.mapperService(), - indexService2x.getIndexAnalyzers(), indexService2x.xContentRegistry(), indexService2x.similarityService(), mapperRegistry, - queryShardContext); - - DocumentMapper defaultMapper = parser.parse("type", new CompressedXContent(mapping)); - assertEquals(mapping, defaultMapper.mappingSource().string()); } } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index aab56b97c46..50f2ac571a4 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -54,6 +54,11 @@ bundlePlugin { } } +integTestCluster { + keystoreSetting 's3.client.default.access_key', 'myaccesskey' + keystoreSetting 's3.client.default.secret_key', 'mysecretkey' +} + thirdPartyAudit.excludes = [ // classes are missing 'javax.servlet.ServletContextEvent', diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java index e91faa2ebf2..872e713c546 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/AwsS3Service.java @@ -150,5 +150,5 @@ interface AwsS3Service extends LifecycleComponent { /** * Creates an {@code AmazonS3} client from the given repository metadata and node settings. */ - AmazonS3 client(RepositoryMetaData metadata, Settings repositorySettings); + AmazonS3 client(Settings repositorySettings); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java index a9dbb61c44d..eb2f22782f4 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/InternalAwsS3Service.java @@ -38,13 +38,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import static org.elasticsearch.repositories.s3.S3Repository.getValue; @@ -53,92 +51,84 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se // pkg private for tests static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - /** - * (acceskey, endpoint) -> client - */ - private Map, AmazonS3Client> clients = new HashMap<>(); + private final Map clientsSettings; - InternalAwsS3Service(Settings settings) { + private final Map clientsCache = new HashMap<>(); + + InternalAwsS3Service(Settings settings, Map clientsSettings) { super(settings); + this.clientsSettings = clientsSettings; } @Override - public synchronized AmazonS3 client(RepositoryMetaData metadata, Settings repositorySettings) { + public synchronized AmazonS3 client(Settings repositorySettings) { String clientName = CLIENT_NAME.get(repositorySettings); - String foundEndpoint = findEndpoint(logger, repositorySettings, settings, clientName); - - AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, settings, repositorySettings, clientName); - - Tuple clientDescriptor = new Tuple<>(foundEndpoint, credentials.getCredentials().getAWSAccessKeyId()); - AmazonS3Client client = clients.get(clientDescriptor); + AmazonS3Client client = clientsCache.get(clientName); if (client != null) { return client; } - Integer maxRetries = getValue(metadata.settings(), settings, + S3ClientSettings clientSettings = clientsSettings.get(clientName); + if (clientSettings == null) { + throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. " + + "Existing client configs: " + + Strings.collectionToDelimitedString(clientsSettings.keySet(), ",")); + } + + String endpoint = findEndpoint(logger, clientSettings, repositorySettings); + Integer maxRetries = getValue(repositorySettings, settings, S3Repository.Repository.MAX_RETRIES_SETTING, S3Repository.Repositories.MAX_RETRIES_SETTING); - boolean useThrottleRetries = getValue(metadata.settings(), settings, + boolean useThrottleRetries = getValue(repositorySettings, settings, S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING); // If the user defined a path style access setting, we rely on it, // otherwise we use the default value set by the SDK Boolean pathStyleAccess = null; - if (S3Repository.Repository.PATH_STYLE_ACCESS_SETTING.exists(metadata.settings()) || - S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING.exists(settings)) { - pathStyleAccess = getValue(metadata.settings(), settings, + if (S3Repository.Repository.PATH_STYLE_ACCESS_SETTING.exists(repositorySettings) || + S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING.exists(settings)) { + pathStyleAccess = getValue(repositorySettings, settings, S3Repository.Repository.PATH_STYLE_ACCESS_SETTING, S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING); } logger.debug("creating S3 client with client_name [{}], endpoint [{}], max_retries [{}], " + - "use_throttle_retries [{}], path_style_access [{}]", - clientName, foundEndpoint, maxRetries, useThrottleRetries, pathStyleAccess); + "use_throttle_retries [{}], path_style_access [{}]", + clientName, endpoint, maxRetries, useThrottleRetries, pathStyleAccess); - client = new AmazonS3Client( - credentials, - buildConfiguration(logger, repositorySettings, settings, clientName, maxRetries, foundEndpoint, useThrottleRetries)); + AWSCredentialsProvider credentials = buildCredentials(logger, deprecationLogger, clientSettings, repositorySettings); + ClientConfiguration configuration = buildConfiguration(logger, clientSettings, repositorySettings, maxRetries, endpoint, useThrottleRetries); + + client = new AmazonS3Client(credentials, configuration); if (pathStyleAccess != null) { client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess)); } - if (!foundEndpoint.isEmpty()) { - client.setEndpoint(foundEndpoint); + if (Strings.hasText(endpoint)) { + client.setEndpoint(endpoint); } - clients.put(clientDescriptor, client); + clientsCache.put(clientName, client); return client; } // pkg private for tests - static ClientConfiguration buildConfiguration(Logger logger, Settings repositorySettings, Settings settings, - String clientName, Integer maxRetries, String endpoint, - boolean useThrottleRetries) { + static ClientConfiguration buildConfiguration(Logger logger, S3ClientSettings clientSettings, Settings repositorySettings, + Integer maxRetries, String endpoint, boolean useThrottleRetries) { ClientConfiguration clientConfiguration = new ClientConfiguration(); // the response metadata cache is only there for diagnostics purposes, // but can force objects from every response to the old generation. clientConfiguration.setResponseMetadataCacheSize(0); - Protocol protocol = getConfigValue(repositorySettings, settings, clientName, S3Repository.PROTOCOL_SETTING, - S3Repository.Repository.PROTOCOL_SETTING, S3Repository.Repositories.PROTOCOL_SETTING); + Protocol protocol = getRepoValue(repositorySettings, S3Repository.Repository.PROTOCOL_SETTING, clientSettings.protocol); clientConfiguration.setProtocol(protocol); - String proxyHost = getConfigValue(null, settings, clientName, - S3Repository.PROXY_HOST_SETTING, null, CLOUD_S3.PROXY_HOST_SETTING); - if (Strings.hasText(proxyHost)) { - Integer proxyPort = getConfigValue(null, settings, clientName, - S3Repository.PROXY_PORT_SETTING, null, CLOUD_S3.PROXY_PORT_SETTING); - try (SecureString proxyUsername = getConfigValue(null, settings, clientName, - S3Repository.PROXY_USERNAME_SETTING, null, CLOUD_S3.PROXY_USERNAME_SETTING); - SecureString proxyPassword = getConfigValue(null, settings, clientName, - S3Repository.PROXY_PASSWORD_SETTING, null, CLOUD_S3.PROXY_PASSWORD_SETTING)) { - - clientConfiguration - .withProxyHost(proxyHost) - .withProxyPort(proxyPort) - .withProxyUsername(proxyUsername.toString()) - .withProxyPassword(proxyPassword.toString()); - } + if (Strings.hasText(clientSettings.proxyHost)) { + // TODO: remove this leniency, these settings should exist together and be validated + clientConfiguration.setProxyHost(clientSettings.proxyHost); + clientConfiguration.setProxyPort(clientSettings.proxyPort); + clientConfiguration.setProxyUsername(clientSettings.proxyUsername); + clientConfiguration.setProxyPassword(clientSettings.proxyPassword); } if (maxRetries != null) { @@ -146,64 +136,56 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se clientConfiguration.setMaxErrorRetry(maxRetries); } clientConfiguration.setUseThrottleRetries(useThrottleRetries); - - TimeValue readTimeout = getConfigValue(null, settings, clientName, - S3Repository.READ_TIMEOUT_SETTING, null, CLOUD_S3.READ_TIMEOUT); - clientConfiguration.setSocketTimeout((int)readTimeout.millis()); + clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis); return clientConfiguration; } - public static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, - Settings settings, Settings repositorySettings, String clientName) { - try (SecureString key = getConfigValue(repositorySettings, settings, clientName, S3Repository.ACCESS_KEY_SETTING, - S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); - SecureString secret = getConfigValue(repositorySettings, settings, clientName, S3Repository.SECRET_KEY_SETTING, - S3Repository.Repository.SECRET_SETTING, S3Repository.Repositories.SECRET_SETTING)) { - - if (key.length() == 0 && secret.length() == 0) { - logger.debug("Using instance profile credentials"); - return new PrivilegedInstanceProfileCredentialsProvider(); - } else { - logger.debug("Using basic key/secret credentials"); - return new StaticCredentialsProvider(new BasicAWSCredentials(key.toString(), secret.toString())); + // pkg private for tests + static AWSCredentialsProvider buildCredentials(Logger logger, DeprecationLogger deprecationLogger, + S3ClientSettings clientSettings, Settings repositorySettings) { + BasicAWSCredentials credentials = clientSettings.credentials; + if (S3Repository.Repository.KEY_SETTING.exists(repositorySettings)) { + if (S3Repository.Repository.SECRET_SETTING.exists(repositorySettings) == false) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.Repository.KEY_SETTING + + " must be accompanied by setting [" + S3Repository.Repository.SECRET_SETTING + "]"); } + // backcompat for reading keys out of repository settings + deprecationLogger.deprecated("Using s3 access/secret key from repository settings. Instead " + + "store these in named clients and the elasticsearch keystore for secure settings."); + try (SecureString key = S3Repository.Repository.KEY_SETTING.get(repositorySettings); + SecureString secret = S3Repository.Repository.SECRET_SETTING.get(repositorySettings)) { + credentials = new BasicAWSCredentials(key.toString(), secret.toString()); + } + } else if (S3Repository.Repository.SECRET_SETTING.exists(repositorySettings)) { + throw new IllegalArgumentException("Repository setting [" + S3Repository.Repository.SECRET_SETTING + + " must be accompanied by setting [" + S3Repository.Repository.KEY_SETTING + "]"); + } + if (credentials == null) { + logger.debug("Using instance profile credentials"); + return new PrivilegedInstanceProfileCredentialsProvider(); + } else { + logger.debug("Using basic key/secret credentials"); + return new StaticCredentialsProvider(credentials); } } // pkg private for tests /** Returns the endpoint the client should use, based on the available endpoint settings found. */ - static String findEndpoint(Logger logger, Settings repositorySettings, Settings settings, String clientName) { - String endpoint = getConfigValue(repositorySettings, settings, clientName, S3Repository.ENDPOINT_SETTING, - S3Repository.Repository.ENDPOINT_SETTING, S3Repository.Repositories.ENDPOINT_SETTING); - if (Strings.isNullOrEmpty(endpoint)) { - // No region has been set so we will use the default endpoint - if (CLOUD_S3.ENDPOINT_SETTING.exists(settings)) { - endpoint = CLOUD_S3.ENDPOINT_SETTING.get(settings); - logger.debug("using explicit s3 endpoint [{}]", endpoint); - } - } else { + static String findEndpoint(Logger logger, S3ClientSettings clientSettings, Settings repositorySettings) { + String endpoint = getRepoValue(repositorySettings, S3Repository.Repository.ENDPOINT_SETTING, clientSettings.endpoint); + if (Strings.hasText(endpoint)) { logger.debug("using repository level endpoint [{}]", endpoint); } - return endpoint; } - /** - * Find the setting value, trying first with named configs, - * then falling back to repository and global repositories settings. - */ - private static T getConfigValue(Settings repositorySettings, Settings globalSettings, String clientName, - Setting.AffixSetting configSetting, Setting repositorySetting, Setting globalSetting) { - Setting concreteSetting = configSetting.getConcreteSettingForNamespace(clientName); - if (concreteSetting.exists(globalSettings)) { - return concreteSetting.get(globalSettings); - } else if (repositorySetting == null) { - // no repository setting, just use global setting - return globalSetting.get(globalSettings); - } else { - return getValue(repositorySettings, globalSettings, repositorySetting, globalSetting); + /** Returns the value for a given setting from the repository, or returns the fallback value. */ + private static T getRepoValue(Settings repositorySettings, Setting repositorySetting, T fallback) { + if (repositorySetting.exists(repositorySettings)) { + return repositorySetting.get(repositorySettings); } + return fallback; } @Override @@ -216,7 +198,7 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se @Override protected void doClose() throws ElasticsearchException { - for (AmazonS3Client client : clients.values()) { + for (AmazonS3Client client : clientsCache.values()) { client.shutdown(); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java new file mode 100644 index 00000000000..edaf44289c6 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3ClientSettings.java @@ -0,0 +1,183 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.s3; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import com.amazonaws.ClientConfiguration; +import com.amazonaws.Protocol; +import com.amazonaws.auth.BasicAWSCredentials; +import org.elasticsearch.common.settings.SecureSetting; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.repositories.s3.AwsS3Service.CLOUD_S3; + +/** + * A container for settings used to create an S3 client. + */ +class S3ClientSettings { + + // prefix for s3 client settings + private static final String PREFIX = "s3.client."; + + /** The access key (ie login id) for connecting to s3. */ + static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key", + key -> SecureSetting.secureString(key, S3Repository.Repositories.KEY_SETTING)); + + /** The secret key (ie password) for connecting to s3. */ + static final Setting.AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key", + key -> SecureSetting.secureString(key, S3Repository.Repositories.SECRET_SETTING)); + + /** An override for the s3 endpoint to connect to. */ + static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", + key -> new Setting<>(key, S3Repository.Repositories.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), + Setting.Property.NodeScope)); + + /** The protocol to use to connect to s3. */ + static final Setting.AffixSetting PROTOCOL_SETTING = Setting.affixKeySetting(PREFIX, "protocol", + key -> new Setting<>(key, "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Setting.Property.NodeScope)); + + /** The host name of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting(PREFIX, "proxy.host", + key -> Setting.simpleString(key, Setting.Property.NodeScope)); + + /** The port of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting(PREFIX, "proxy.port", + key -> Setting.intSetting(key, 80, 0, 1<<16, Setting.Property.NodeScope)); + + /** The username of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting(PREFIX, "proxy.username", + key -> SecureSetting.secureString(key, AwsS3Service.PROXY_USERNAME_SETTING)); + + /** The password of a proxy to connect to s3 through. */ + static final Setting.AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting(PREFIX, "proxy.password", + key -> SecureSetting.secureString(key, AwsS3Service.PROXY_PASSWORD_SETTING)); + + /** The socket timeout for connecting to s3. */ + static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", + key -> Setting.timeSetting(key, TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), + Setting.Property.NodeScope)); + + /** Credentials to authenticate with s3. */ + final BasicAWSCredentials credentials; + + /** The s3 endpoint the client should talk to, or empty string to use the default. */ + final String endpoint; + + /** The protocol to use to talk to s3. Defaults to https. */ + final Protocol protocol; + + /** An optional proxy host that requests to s3 should be made through. */ + final String proxyHost; + + /** The port number the proxy host should be connected on. */ + final int proxyPort; + + // these should be "secure" yet the api for the s3 client only takes String, so storing them + // as SecureString here won't really help with anything + /** An optional username for the proxy host, for basic authentication. */ + final String proxyUsername; + + /** An optional password for the proxy host, for basic authentication. */ + final String proxyPassword; + + /** The read timeout for the s3 client. */ + final int readTimeoutMillis; + + private S3ClientSettings(BasicAWSCredentials credentials, String endpoint, Protocol protocol, + String proxyHost, int proxyPort, String proxyUsername, + String proxyPassword, int readTimeoutMillis) { + this.credentials = credentials; + this.endpoint = endpoint; + this.protocol = protocol; + this.proxyHost = proxyHost; + this.proxyPort = proxyPort; + this.proxyUsername = proxyUsername; + this.proxyPassword = proxyPassword; + this.readTimeoutMillis = readTimeoutMillis; + } + + /** + * Load all client settings from the given settings. + * + * Note this will always at least return a client named "default". + */ + static Map load(Settings settings) { + Set clientNames = settings.getGroups(PREFIX).keySet(); + Map clients = new HashMap<>(); + for (String clientName : clientNames) { + clients.put(clientName, getClientSettings(settings, clientName)); + } + if (clients.containsKey("default") == false) { + // this won't find any settings under the default client, + // but it will pull all the fallback static settings + clients.put("default", getClientSettings(settings, "default")); + } + return Collections.unmodifiableMap(clients); + } + + // pkg private for tests + /** Parse settings for a single client. */ + static S3ClientSettings getClientSettings(Settings settings, String clientName) { + try (SecureString accessKey = getConfigValue(settings, clientName, ACCESS_KEY_SETTING, S3Repository.Repositories.KEY_SETTING); + SecureString secretKey = getConfigValue(settings, clientName, SECRET_KEY_SETTING, S3Repository.Repositories.SECRET_SETTING); + SecureString proxyUsername = getConfigValue(settings, clientName, PROXY_USERNAME_SETTING, CLOUD_S3.PROXY_USERNAME_SETTING); + SecureString proxyPassword = getConfigValue(settings, clientName, PROXY_PASSWORD_SETTING, CLOUD_S3.PROXY_PASSWORD_SETTING)) { + BasicAWSCredentials credentials = null; + if (accessKey.length() != 0) { + if (secretKey.length() != 0) { + credentials = new BasicAWSCredentials(accessKey.toString(), secretKey.toString()); + } else { + throw new IllegalArgumentException("Missing secret key for s3 client [" + clientName + "]"); + } + } else if (secretKey.length() != 0) { + throw new IllegalArgumentException("Missing access key for s3 client [" + clientName + "]"); + } + return new S3ClientSettings( + credentials, + getConfigValue(settings, clientName, ENDPOINT_SETTING, S3Repository.Repositories.ENDPOINT_SETTING), + getConfigValue(settings, clientName, PROTOCOL_SETTING, S3Repository.Repositories.PROTOCOL_SETTING), + getConfigValue(settings, clientName, PROXY_HOST_SETTING, AwsS3Service.CLOUD_S3.PROXY_HOST_SETTING), + getConfigValue(settings, clientName, PROXY_PORT_SETTING, AwsS3Service.CLOUD_S3.PROXY_PORT_SETTING), + proxyUsername.toString(), + proxyPassword.toString(), + (int)getConfigValue(settings, clientName, READ_TIMEOUT_SETTING, AwsS3Service.CLOUD_S3.READ_TIMEOUT).millis() + ); + } + } + + private static T getConfigValue(Settings settings, String clientName, + Setting.AffixSetting clientSetting, + Setting globalSetting) { + Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + if (concreteSetting.exists(settings)) { + return concreteSetting.get(settings); + } else { + return globalSetting.get(settings); + } + } + +} diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 50e9b998ad6..2ce6396465a 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -60,45 +60,6 @@ class S3Repository extends BlobStoreRepository { public static final String TYPE = "s3"; - // prefix for s3 client settings - private static final String PREFIX = "s3.client."; - - /** The access key (ie login id) for connecting to s3. */ - public static final AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting(PREFIX, "access_key", - key -> SecureSetting.secureString(key, Repositories.KEY_SETTING)); - - /** The secret key (ie password) for connecting to s3. */ - public static final AffixSetting SECRET_KEY_SETTING = Setting.affixKeySetting(PREFIX, "secret_key", - key -> SecureSetting.secureString(key, Repositories.SECRET_SETTING)); - - /** An override for the s3 endpoint to connect to. */ - public static final AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", - key -> new Setting<>(key, Repositories.ENDPOINT_SETTING, s -> s.toLowerCase(Locale.ROOT), Property.NodeScope)); - - /** The protocol to use to connec to to s3. */ - public static final AffixSetting PROTOCOL_SETTING = Setting.affixKeySetting(PREFIX, "protocol", - key -> new Setting<>(key, "https", s -> Protocol.valueOf(s.toUpperCase(Locale.ROOT)), Property.NodeScope)); - - /** The host name of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_HOST_SETTING = Setting.affixKeySetting(PREFIX, "proxy.host", - key -> Setting.simpleString(key, Property.NodeScope)); - - /** The port of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_PORT_SETTING = Setting.affixKeySetting(PREFIX, "proxy.port", - key -> Setting.intSetting(key, 80, 0, 1<<16, Property.NodeScope)); - - /** The username of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_USERNAME_SETTING = Setting.affixKeySetting(PREFIX, "proxy.username", - key -> SecureSetting.secureString(key, AwsS3Service.PROXY_USERNAME_SETTING)); - - /** The password of a proxy to connect to s3 through. */ - public static final AffixSetting PROXY_PASSWORD_SETTING = Setting.affixKeySetting(PREFIX, "proxy.password", - key -> SecureSetting.secureString(key, AwsS3Service.PROXY_PASSWORD_SETTING)); - - /** The socket timeout for connecting to s3. */ - public static final AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", - key -> Setting.timeSetting(key, TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope)); - /** * Global S3 repositories settings. Starting with: repositories.s3 * NOTE: These are legacy settings. Use the named client config settings above. @@ -328,7 +289,7 @@ class S3Repository extends BlobStoreRepository { "buffer_size [{}], cannedACL [{}], storageClass [{}]", bucket, chunkSize, serverSideEncryption, bufferSize, cannedACL, storageClass); - AmazonS3 client = s3Service.client(metadata, metadata.settings()); + AmazonS3 client = s3Service.client(metadata.settings()); blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index d27c3481357..04814b99e88 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -57,9 +57,17 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { }); } + private final Map clientsSettings; + + public S3RepositoryPlugin(Settings settings) { + // eagerly load client settings so that secure settings are read + clientsSettings = S3ClientSettings.load(settings); + assert clientsSettings.isEmpty() == false : "always at least have 'default'"; + } + // overridable for tests protected AwsS3Service createStorageService(Settings settings) { - return new InternalAwsS3Service(settings); + return new InternalAwsS3Service(settings, clientsSettings); } @Override @@ -80,15 +88,15 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin { return Arrays.asList( // named s3 client configuration settings - S3Repository.ACCESS_KEY_SETTING, - S3Repository.SECRET_KEY_SETTING, - S3Repository.ENDPOINT_SETTING, - S3Repository.PROTOCOL_SETTING, - S3Repository.PROXY_HOST_SETTING, - S3Repository.PROXY_PORT_SETTING, - S3Repository.PROXY_USERNAME_SETTING, - S3Repository.PROXY_PASSWORD_SETTING, - S3Repository.READ_TIMEOUT_SETTING, + S3ClientSettings.ACCESS_KEY_SETTING, + S3ClientSettings.SECRET_KEY_SETTING, + S3ClientSettings.ENDPOINT_SETTING, + S3ClientSettings.PROTOCOL_SETTING, + S3ClientSettings.PROXY_HOST_SETTING, + S3ClientSettings.PROXY_PORT_SETTING, + S3ClientSettings.PROXY_USERNAME_SETTING, + S3ClientSettings.PROXY_PASSWORD_SETTING, + S3ClientSettings.READ_TIMEOUT_SETTING, // Register global cloud aws settings: cloud.aws (might have been registered in ec2 plugin) AwsS3Service.KEY_SETTING, diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java index 51e0f5623a9..cc33fcc243e 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AWSSignersTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.ClientConfiguration; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.s3.AwsSigner; import org.elasticsearch.repositories.s3.S3RepositoryPlugin; import org.elasticsearch.test.ESTestCase; @@ -35,7 +36,7 @@ public class AWSSignersTests extends ESTestCase { */ @BeforeClass public static void instantiatePlugin() { - new S3RepositoryPlugin(); + new S3RepositoryPlugin(Settings.EMPTY); } public void testSigners() { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index ea2274e7ccb..9b94744883a 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -197,7 +197,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); RepositoryMetaData metadata = new RepositoryMetaData("test-repo", "fs", Settings.EMPTY); - AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(metadata, repositorySettings); + AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(repositorySettings); String bucketName = bucket.get("bucket"); logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath); @@ -464,9 +464,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase // We check that settings has been set in elasticsearch.yml integration test file // as described in README - assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue()); - RepositoryMetaData metadata = new RepositoryMetaData("test-repo", "fs", Settings.EMPTY); - AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(metadata, + assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue()); + AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client( Settings.builder().put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), randomBoolean()).build()); try { ObjectListing prevListing = null; diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java index 7d30ffcc1f5..77dbfd1dc5c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AwsS3ServiceImplTests.java @@ -34,8 +34,8 @@ import static org.hamcrest.Matchers.is; public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { - AWSCredentialsProvider credentialsProvider = - InternalAwsS3Service.buildCredentials(logger, deprecationLogger, Settings.EMPTY, Settings.EMPTY, "default"); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(Settings.EMPTY, "default"); + AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, clientSettings, Settings.EMPTY); assertThat(credentialsProvider, instanceOf(InternalAwsS3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } @@ -142,8 +142,12 @@ public class AwsS3ServiceImplTests extends ESTestCase { .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") .build(); launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings( - new Setting[]{S3Repository.Repository.KEY_SETTING, S3Repository.Repository.SECRET_SETTING}); + assertSettingDeprecationsAndWarnings(new Setting[]{ + S3Repository.Repositories.KEY_SETTING, + S3Repository.Repositories.SECRET_SETTING, + S3Repository.Repository.KEY_SETTING, + S3Repository.Repository.SECRET_SETTING}, + "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); } public void testAWSCredentialsWithElasticsearchAwsAndRepositoriesSettingsAndRepositorySettingsBackcompat() { @@ -155,8 +159,14 @@ public class AwsS3ServiceImplTests extends ESTestCase { .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") .build(); launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings( - new Setting[]{S3Repository.Repository.KEY_SETTING, S3Repository.Repository.SECRET_SETTING}); + assertSettingDeprecationsAndWarnings(new Setting[]{ + AwsS3Service.KEY_SETTING, + AwsS3Service.SECRET_SETTING, + S3Repository.Repositories.KEY_SETTING, + S3Repository.Repositories.SECRET_SETTING, + S3Repository.Repository.KEY_SETTING, + S3Repository.Repository.SECRET_SETTING}, + "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); } public void testAWSCredentialsWithElasticsearchAwsAndS3AndRepositoriesSettingsAndRepositorySettingsBackcompat() { @@ -170,15 +180,25 @@ public class AwsS3ServiceImplTests extends ESTestCase { .put(S3Repository.Repositories.SECRET_SETTING.getKey(), "repositories_secret") .build(); launchAWSCredentialsWithElasticsearchSettingsTest(repositorySettings, settings, "repository_key", "repository_secret"); - assertSettingDeprecationsAndWarnings( - new Setting[]{S3Repository.Repository.KEY_SETTING, S3Repository.Repository.SECRET_SETTING}); + assertSettingDeprecationsAndWarnings(new Setting[]{ + AwsS3Service.KEY_SETTING, + AwsS3Service.SECRET_SETTING, + AwsS3Service.CLOUD_S3.KEY_SETTING, + AwsS3Service.CLOUD_S3.SECRET_SETTING, + S3Repository.Repositories.KEY_SETTING, + S3Repository.Repositories.SECRET_SETTING, + S3Repository.Repository.KEY_SETTING, + S3Repository.Repository.SECRET_SETTING}, + "Using s3 access/secret key from repository settings. Instead store these in named clients and the elasticsearch keystore for secure settings."); } protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings singleRepositorySettings, Settings settings, String expectedKey, String expectedSecret) { String configName = InternalAwsS3Service.CLIENT_NAME.get(singleRepositorySettings); - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, deprecationLogger, settings, - singleRepositorySettings, configName).getCredentials(); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); + AWSCredentials credentials = InternalAwsS3Service + .buildCredentials(logger, deprecationLogger, clientSettings, singleRepositorySettings) + .getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } @@ -287,8 +307,9 @@ public class AwsS3ServiceImplTests extends ESTestCase { Boolean useThrottleRetries = S3Repository.getValue(singleRepositorySettings, settings, S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING); - ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(logger, singleRepositorySettings, settings, - "default", maxRetries, null, useThrottleRetries); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, "default"); + ClientConfiguration configuration = InternalAwsS3Service.buildConfiguration(logger, clientSettings, + singleRepositorySettings, maxRetries, null, useThrottleRetries); assertThat(configuration.getResponseMetadataCacheSize(), is(0)); assertThat(configuration.getProtocol(), is(expectedProtocol)); @@ -344,7 +365,8 @@ public class AwsS3ServiceImplTests extends ESTestCase { private void assertEndpoint(Settings repositorySettings, Settings settings, String expectedEndpoint) { String configName = InternalAwsS3Service.CLIENT_NAME.get(repositorySettings); - String foundEndpoint = InternalAwsS3Service.findEndpoint(logger, repositorySettings, settings, configName); + S3ClientSettings clientSettings = S3ClientSettings.getClientSettings(settings, configName); + String foundEndpoint = InternalAwsS3Service.findEndpoint(logger, clientSettings, repositorySettings); assertThat(foundEndpoint, is(expectedEndpoint)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index 247d3c68b27..f1b3ceb28f1 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -60,7 +60,7 @@ public class S3RepositoryTests extends ESTestCase { @Override protected void doClose() {} @Override - public AmazonS3 client(RepositoryMetaData metadata, Settings settings) { + public AmazonS3 client(Settings settings) { return new DummyS3Client(); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java index 17a1b989c99..522ca06614c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/TestAwsS3Service.java @@ -28,6 +28,9 @@ import org.elasticsearch.common.settings.Settings; public class TestAwsS3Service extends InternalAwsS3Service { public static class TestPlugin extends S3RepositoryPlugin { + public TestPlugin(Settings settings) { + super(settings); + } @Override protected AwsS3Service createStorageService(Settings settings) { return new TestAwsS3Service(settings); @@ -37,13 +40,12 @@ public class TestAwsS3Service extends InternalAwsS3Service { IdentityHashMap clients = new IdentityHashMap<>(); public TestAwsS3Service(Settings settings) { - super(settings); + super(settings, S3ClientSettings.load(settings)); } - @Override - public synchronized AmazonS3 client(RepositoryMetaData metadata, Settings repositorySettings) { - return cachedWrapper(super.client(metadata, repositorySettings)); + public synchronized AmazonS3 client(Settings repositorySettings) { + return cachedWrapper(super.client(repositorySettings)); } private AmazonS3 cachedWrapper(AmazonS3 client) { diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml index 6b0286ac81b..74cab3edcb7 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository.yaml @@ -1,12 +1,7 @@ # Integration tests for Repository S3 component # "S3 repository can be registered": - - skip: - features: warnings - do: - warnings: - - "[access_key] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." - - "[secret_key] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." snapshot.create_repository: repository: test_repo_s3_1 verify: false @@ -14,8 +9,6 @@ type: s3 settings: bucket: "my_bucket_name" - access_key: "AKVAIQBF2RECL7FJWGJQ" - secret_key: "vExyMThREXeRMm/b/LRzEB8jWwvzQeXgjqMX+6br" canned_acl: "public-read" storage_class: "standard" diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml index 28ff1e52b87..bca0703d457 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yaml @@ -118,6 +118,30 @@ - match: { hits.total: 6 } - match: { hits.hits.0._index: "test_remote_cluster:test_index" } +--- +"Test wildcard search": + - do: + cluster.get_settings: + include_defaults: true + + - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + + - do: + cluster.put_settings: + flat_settings: true + body: + transient: + search.remote.test_remote_cluster.seeds: $remote_ip + + - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + + - do: + search: + index: "*:test_index" + + - match: { _shards.total: 6 } + - match: { hits.total: 12 } + --- "Search an filtered alias on the remote cluster": diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yaml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yaml new file mode 100644 index 00000000000..7843e30561a --- /dev/null +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/20_info.yaml @@ -0,0 +1,58 @@ +--- +"Fetch remote cluster info for existing cluster": + + - do: + remote.info: {} + - match: { my_remote_cluster.connected: true } + - match: { my_remote_cluster.num_nodes_connected: 1} + - match: { my_remote_cluster.max_connections_per_cluster: 1} + - match: { my_remote_cluster.initial_connect_timeout: "30s" } + - is_true: my_remote_cluster.http_addresses.0 + +--- +"Add transient remote cluster based on the preset cluster and check remote info": + - do: + cluster.get_settings: + include_defaults: true + + - set: { defaults.search.remote.my_remote_cluster.seeds.0: remote_ip } + + - do: + cluster.put_settings: + flat_settings: true + body: + transient: + search.remote.test_remote_cluster.seeds: $remote_ip + + - match: {transient: {search.remote.test_remote_cluster.seeds: $remote_ip}} + + # we do another search here since this will enforce the connection to be established + # otherwise the cluster might not have been connected yet. + - do: + search: + index: test_remote_cluster:test_index + + - match: { _shards.total: 3 } + - match: { hits.total: 6 } + - match: { hits.hits.0._index: "test_remote_cluster:test_index" } + + - do: + remote.info: {} + - set: { my_remote_cluster.http_addresses.0: remote_http } + - match: { test_remote_cluster.http_addresses.0: $remote_http } + + - match: { test_remote_cluster.connected: true } + - match: { my_remote_cluster.connected: true } + + - match: { test_remote_cluster.seeds.0: $remote_ip } + - match: { my_remote_cluster.seeds.0: $remote_ip } + + - match: { my_remote_cluster.num_nodes_connected: 1} + - match: { test_remote_cluster.num_nodes_connected: 1} + + - match: { my_remote_cluster.max_connections_per_cluster: 1} + - match: { test_remote_cluster.max_connections_per_cluster: 1} + + - match: { my_remote_cluster.initial_connect_timeout: "30s" } + - match: { test_remote_cluster.initial_connect_timeout: "30s" } + diff --git a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats index 26c8c8082d1..64f0e977c7d 100644 --- a/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/tests/70_sysv_initd.bats @@ -124,7 +124,11 @@ setup() { # set DATA_DIR to DATA_DIR=/tmp/aoeu,/tmp/asdf sed -i 's/DATA_DIR=.*/DATA_DIR=\/tmp\/aoeu,\/tmp\/asdf/' /etc/init.d/elasticsearch cat /etc/init.d/elasticsearch | grep "DATA_DIR" - service elasticsearch start + run service elasticsearch start + if [ "$status" -ne 0 ]; then + cat /var/log/elasticsearch/* + fail + fi wait_for_elasticsearch_status assert_file_not_exist /tmp/aoeu,/tmp/asdf assert_file_not_exist /tmp/aoeu, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/remote.info.json b/rest-api-spec/src/main/resources/rest-api-spec/api/remote.info.json new file mode 100644 index 00000000000..a90d4ff6984 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/remote.info.json @@ -0,0 +1,12 @@ +{ + "remote.info": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/remote-info.html", + "methods": ["GET"], + "url": { + "path": "/_remote/info", + "paths": ["/_remote/info"], + "params": {} + }, + "body": null + } +} \ No newline at end of file diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml index 8d72d40b10b..dc097765c57 100755 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.shards/10_basic.yaml @@ -169,33 +169,6 @@ $body: | /^(index2 \s+ \d \s+ (p|r) \s+ ((STARTED|INITIALIZING|RELOCATING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){5}$/ ---- -"Test cat shards with shadow replicas": - - skip: - version: " - 5.1.99" - reason: deprecation was added in 5.2.0 - features: "warnings" - - - do: - indices.create: - index: index3 - body: - settings: - number_of_shards: "1" - number_of_replicas: "1" - shadow_replicas: true - shared_filesystem: false - warnings: - - "[index.shadow_replicas] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." - - "[index.shared_filesystem] setting was deprecated in Elasticsearch and will be removed in a future release! See the breaking changes documentation for the next major version." - - - do: - cat.shards: - index: index3 - - match: - $body: | - /^(index3 \s+ \d \s+ (p|s) \s+ ((STARTED|INITIALIZING|RELOCATING) \s+ (\d \s+ (\d+|\d+[.]\d+)(kb|b) \s+)? \d{1,3}.\d{1,3}.\d{1,3}.\d{1,3} \s+ .+|UNASSIGNED \s+) \n?){2}$/ - --- "Test cat shards using wildcards": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/remote.info/10_info.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/remote.info/10_info.yaml new file mode 100644 index 00000000000..34c46059419 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/remote.info/10_info.yaml @@ -0,0 +1,9 @@ +--- +"Get an empty emote info": + - skip: + version: " - 5.3.99" + reason: this API doesn't exist in 5.3.x yet + - do: + remote.info: {} + - is_true: '' + diff --git a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java index 81c3b47bde4..21cd1961d7c 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java +++ b/test/framework/src/main/java/org/elasticsearch/common/settings/MockSecureSettings.java @@ -19,8 +19,11 @@ package org.elasticsearch.common.settings; +import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -30,6 +33,8 @@ import java.util.Set; public class MockSecureSettings implements SecureSettings { private Map secureStrings = new HashMap<>(); + private Map files = new HashMap<>(); + private Set settingNames = new HashSet<>(); @Override public boolean isLoaded() { @@ -38,7 +43,7 @@ public class MockSecureSettings implements SecureSettings { @Override public Set getSettingNames() { - return secureStrings.keySet(); + return settingNames; } @Override @@ -46,8 +51,19 @@ public class MockSecureSettings implements SecureSettings { return secureStrings.get(setting); } + @Override + public InputStream getFile(String setting) { + return new ByteArrayInputStream(files.get(setting)); + } + public void setString(String setting, String value) { secureStrings.put(setting, new SecureString(value.toCharArray())); + settingNames.add(setting); + } + + public void setFile(String setting, byte[] value) { + files.put(setting, value); + settingNames.add(setting); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java index 9a6747d5301..2956e44d507 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java @@ -35,9 +35,4 @@ public final class MockEngineFactory implements EngineFactory { public Engine newReadWriteEngine(EngineConfig config) { return new MockInternalEngine(config, wrapper); } - - @Override - public Engine newReadOnlyEngine(EngineConfig config) { - return new MockShadowEngine(config, wrapper); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java index fbc4352b1e2..a7fc61e6913 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -47,7 +47,7 @@ import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; /** - * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine} or {@link org.elasticsearch.test.engine.MockShadowEngine} + * Support class to build MockEngines like {@link org.elasticsearch.test.engine.MockInternalEngine} * since they need to subclass the actual engine */ public final class MockEngineSupport { diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java index 603907cc03c..fe8c4daec8d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -29,12 +29,10 @@ import java.io.IOException; final class MockInternalEngine extends InternalEngine { private MockEngineSupport support; - private final boolean randomizeFlushOnClose; private Class wrapperClass; MockInternalEngine(EngineConfig config, Class wrapper) throws EngineException { super(config); - randomizeFlushOnClose = config.getIndexSettings().isOnSharedFilesystem() == false; wrapperClass = wrapper; } @@ -61,17 +59,13 @@ final class MockInternalEngine extends InternalEngine { @Override public void flushAndClose() throws IOException { - if (randomizeFlushOnClose) { - switch (support().flushOrClose(MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) { - case FLUSH_AND_CLOSE: - flushAndCloseInternal(); - break; - case CLOSE: - super.close(); - break; - } - } else { - flushAndCloseInternal(); + switch (support().flushOrClose(MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) { + case FLUSH_AND_CLOSE: + flushAndCloseInternal(); + break; + case CLOSE: + super.close(); + break; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java deleted file mode 100644 index 2116dcc390c..00000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.engine; - -import org.apache.lucene.index.FilterDirectoryReader; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.SearcherManager; -import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineException; -import org.elasticsearch.index.engine.ShadowEngine; - -final class MockShadowEngine extends ShadowEngine { - private final MockEngineSupport support; - - MockShadowEngine(EngineConfig config, Class wrapper) { - super(config); - this.support = new MockEngineSupport(config, wrapper); - } - - @Override - protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { - final Searcher engineSearcher = super.newSearcher(source, searcher, manager); - return support.wrapSearcher(source, engineSearcher, searcher, manager); - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index a2e03a063cf..c2c0f57c942 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -56,7 +56,7 @@ public class ClientYamlTestExecutionContext { private final boolean randomizeContentType; - public ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType) { + ClientYamlTestExecutionContext(ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType) { this.clientYamlTestClient = clientYamlTestClient; this.randomizeContentType = randomizeContentType; } @@ -68,7 +68,7 @@ public class ClientYamlTestExecutionContext { public ClientYamlTestResponse callApi(String apiName, Map params, List> bodies, Map headers) throws IOException { //makes a copy of the parameters before modifying them for this specific request - HashMap requestParams = new HashMap<>(params); + Map requestParams = new HashMap<>(params); requestParams.putIfAbsent("error_trace", "true"); // By default ask for error traces, this my be overridden by params for (Map.Entry entry : requestParams.entrySet()) { if (stash.containsStashedValue(entry.getValue())) { @@ -76,9 +76,17 @@ public class ClientYamlTestExecutionContext { } } - HttpEntity entity = createEntity(bodies, headers); + //make a copy of the headers before modifying them for this specific request + Map requestHeaders = new HashMap<>(headers); + for (Map.Entry entry : requestHeaders.entrySet()) { + if (stash.containsStashedValue(entry.getValue())) { + entry.setValue(stash.getValue(entry.getValue()).toString()); + } + } + + HttpEntity entity = createEntity(bodies, requestHeaders); try { - response = callApiInternal(apiName, requestParams, entity, headers); + response = callApiInternal(apiName, requestParams, entity, requestHeaders); return response; } catch(ClientYamlTestResponseException e) { response = e.getRestTestResponse(); @@ -143,7 +151,8 @@ public class ClientYamlTestExecutionContext { } } - private ClientYamlTestResponse callApiInternal(String apiName, Map params, + // pkg-private for testing + ClientYamlTestResponse callApiInternal(String apiName, Map params, HttpEntity entity, Map headers) throws IOException { return clientYamlTestClient.callApi(apiName, params, entity, headers); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java index 787e219b0ed..1efd210b110 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -103,7 +103,7 @@ public class MockFSIndexStore extends IndexStore { @Override public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) { - if (currentState == IndexShardState.CLOSED && validCheckIndexStates.contains(previousState) && indexShard.indexSettings().isOnSharedFilesystem() == false) { + if (currentState == IndexShardState.CLOSED && validCheckIndexStates.contains(previousState)) { shardSet.put(indexShard, Boolean.TRUE); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java new file mode 100644 index 00000000000..2150baf59ea --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.yaml; + +import org.apache.http.HttpEntity; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class ClientYamlTestExecutionContextTests extends ESTestCase { + + public void testHeadersSupportStashedValueReplacement() throws IOException { + final AtomicReference> headersRef = new AtomicReference<>(); + final ClientYamlTestExecutionContext context = + new ClientYamlTestExecutionContext(null, randomBoolean()) { + @Override + ClientYamlTestResponse callApiInternal(String apiName, Map params, + HttpEntity entity, + Map headers) { + headersRef.set(headers); + return null; + } + }; + final Map headers = new HashMap<>(); + headers.put("foo", "$bar"); + headers.put("foo1", "baz ${c}"); + + context.stash().stashValue("bar", "foo2"); + context.stash().stashValue("c", "bar1"); + + assertNull(headersRef.get()); + context.callApi("test", Collections.emptyMap(), Collections.emptyList(), headers); + assertNotNull(headersRef.get()); + assertNotEquals(headers, headersRef.get()); + + assertEquals("foo2", headersRef.get().get("foo")); + assertEquals("baz bar1", headersRef.get().get("foo1")); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java index 7ad1cc8377f..dd6a2979344 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSectionTests.java @@ -123,7 +123,7 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa parser = createParser(YamlXContent.yamlXContent, "\"First test section\": \n" + " - skip:\n" + - " version: \"2.0.0 - 2.2.0\"\n" + + " version: \"5.0.0 - 5.2.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do :\n" + " catch: missing\n" + @@ -138,8 +138,9 @@ public class ClientYamlTestSectionTests extends AbstractClientYamlTestFragmentPa assertThat(testSection, notNullValue()); assertThat(testSection.getName(), equalTo("First test section")); assertThat(testSection.getSkipSection(), notNullValue()); - assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); - assertThat(testSection.getSkipSection().getUpperVersion(), equalTo(Version.V_2_2_0)); + assertThat(testSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(testSection.getSkipSection().getUpperVersion(), + equalTo(Version.V_5_2_0_UNRELEASED)); assertThat(testSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(testSection.getExecutableSections().size(), equalTo(2)); DoSection doSection = (DoSection)testSection.getExecutableSections().get(0); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 4c96986146b..4c97eb45361 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -66,10 +66,10 @@ public class ClientYamlTestSuiteTests extends AbstractClientYamlTestFragmentPars " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" + "\n" + "---\n" + - "\"Get type mapping - pre 1.0\":\n" + + "\"Get type mapping - pre 5.0\":\n" + "\n" + " - skip:\n" + - " version: \"2.0.0 - \"\n" + + " version: \"5.0.0 - \"\n" + " reason: \"for newer versions the index name is always returned\"\n" + "\n" + " - do:\n" + @@ -130,11 +130,13 @@ public class ClientYamlTestSuiteTests extends AbstractClientYamlTestFragmentPars assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); - assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 1.0")); + assertThat(restTestSuite.getTestSections().get(1).getName(), + equalTo("Get type mapping - pre 5.0")); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getReason(), equalTo("for newer versions the index name is always returned")); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); + assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), + equalTo(Version.V_5_0_0)); assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java index f6174cf0be2..7b3022dd937 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SetupSectionTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.test.rest.yaml.section; import org.elasticsearch.Version; import org.elasticsearch.common.xcontent.yaml.YamlXContent; -import org.elasticsearch.test.rest.yaml.section.SetupSection; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -54,7 +53,7 @@ public class SetupSectionTests extends AbstractClientYamlTestFragmentParserTestC public void testParseSetupAndSkipSectionNoSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"2.0.0 - 2.3.0\"\n" + + " version: \"5.0.0 - 5.3.0\"\n" + " reason: \"Update doesn't return metadata fields, waiting for #3259\"\n" + " - do:\n" + " index1:\n" + @@ -75,8 +74,9 @@ public class SetupSectionTests extends AbstractClientYamlTestFragmentParserTestC assertThat(setupSection, notNullValue()); assertThat(setupSection.getSkipSection().isEmpty(), equalTo(false)); assertThat(setupSection.getSkipSection(), notNullValue()); - assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); - assertThat(setupSection.getSkipSection().getUpperVersion(), equalTo(Version.V_2_3_0)); + assertThat(setupSection.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(setupSection.getSkipSection().getUpperVersion(), + equalTo(Version.V_5_3_0_UNRELEASED)); assertThat(setupSection.getSkipSection().getReason(), equalTo("Update doesn't return metadata fields, waiting for #3259")); assertThat(setupSection.getDoSections().size(), equalTo(2)); assertThat(setupSection.getDoSections().get(0).getApiCallSection().getApi(), equalTo("index1")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java index 85304be1711..1ba31ed288d 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/SkipSectionTests.java @@ -34,16 +34,18 @@ import static org.hamcrest.Matchers.nullValue; public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCase { public void testSkip() { - SkipSection section = new SkipSection("2.0.0 - 2.1.0", + SkipSection section = new SkipSection("5.0.0 - 5.1.0", randomBoolean() ? Collections.emptyList() : Collections.singletonList("warnings"), "foobar"); assertFalse(section.skip(Version.CURRENT)); - assertTrue(section.skip(Version.V_2_0_0)); - section = new SkipSection(randomBoolean() ? null : "2.0.0 - 2.1.0", Collections.singletonList("boom"), "foobar"); + assertTrue(section.skip(Version.V_5_0_0)); + section = new SkipSection(randomBoolean() ? null : "5.0.0 - 5.1.0", + Collections.singletonList("boom"), "foobar"); assertTrue(section.skip(Version.CURRENT)); } public void testMessage() { - SkipSection section = new SkipSection("2.0.0 - 2.1.0", Collections.singletonList("warnings"), "foobar"); + SkipSection section = new SkipSection("5.0.0 - 5.1.0", + Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); section = new SkipSection(null, Collections.singletonList("warnings"), "foobar"); assertEquals("[FOOBAR] skipped, reason: [foobar] unsupported features [warnings]", section.getSkipMessage("FOOBAR")); @@ -53,14 +55,14 @@ public class SkipSectionTests extends AbstractClientYamlTestFragmentParserTestCa public void testParseSkipSectionVersionNoFeature() throws Exception { parser = createParser(YamlXContent.yamlXContent, - "version: \" - 2.1.0\"\n" + + "version: \" - 5.1.1\"\n" + "reason: Delete ignores the parent param" ); SkipSection skipSection = SkipSection.parse(parser); assertThat(skipSection, notNullValue()); assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); - assertThat(skipSection.getUpperVersion(), equalTo(Version.V_2_1_0)); + assertThat(skipSection.getUpperVersion(), equalTo(Version.V_5_1_1_UNRELEASED)); assertThat(skipSection.getFeatures().size(), equalTo(0)); assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java index f057d0d370d..de8e83692b8 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/TeardownSectionTests.java @@ -56,7 +56,7 @@ public class TeardownSectionTests extends AbstractClientYamlTestFragmentParserTe public void testParseWithSkip() throws Exception { parser = createParser(YamlXContent.yamlXContent, " - skip:\n" + - " version: \"2.0.0 - 2.3.0\"\n" + + " version: \"5.0.0 - 5.3.0\"\n" + " reason: \"there is a reason\"\n" + " - do:\n" + " delete:\n" + @@ -75,8 +75,8 @@ public class TeardownSectionTests extends AbstractClientYamlTestFragmentParserTe TeardownSection section = TeardownSection.parse(parser); assertThat(section, notNullValue()); assertThat(section.getSkipSection().isEmpty(), equalTo(false)); - assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_2_0_0)); - assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_2_3_0)); + assertThat(section.getSkipSection().getLowerVersion(), equalTo(Version.V_5_0_0)); + assertThat(section.getSkipSection().getUpperVersion(), equalTo(Version.V_5_3_0_UNRELEASED)); assertThat(section.getSkipSection().getReason(), equalTo("there is a reason")); assertThat(section.getDoSections().size(), equalTo(2)); assertThat(section.getDoSections().get(0).getApiCallSection().getApi(), equalTo("delete")); diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java index c97188e7016..eee31dd09b4 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java @@ -46,21 +46,22 @@ public class VersionUtilsTests extends ESTestCase { assertTrue(got.onOrBefore(Version.CURRENT)); // sub range - got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_5_0_0_alpha1); - assertTrue(got.onOrAfter(Version.V_2_0_0)); - assertTrue(got.onOrBefore(Version.V_5_0_0_alpha1)); + got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + Version.V_6_0_0_alpha1_UNRELEASED); + assertTrue(got.onOrAfter(Version.V_5_0_0)); + assertTrue(got.onOrBefore(Version.V_6_0_0_alpha1_UNRELEASED)); // unbounded lower - got = VersionUtils.randomVersionBetween(random(), null, Version.V_5_0_0_alpha1); + got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_alpha1_UNRELEASED); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); - assertTrue(got.onOrBefore(Version.V_5_0_0_alpha1)); + assertTrue(got.onOrBefore(Version.V_6_0_0_alpha1_UNRELEASED)); got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0)); assertTrue(got.onOrAfter(VersionUtils.getFirstVersion())); assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0))); // unbounded upper - got = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, null); - assertTrue(got.onOrAfter(Version.V_2_0_0)); + got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, null); + assertTrue(got.onOrAfter(Version.V_5_0_0)); assertTrue(got.onOrBefore(Version.CURRENT)); got = VersionUtils.randomVersionBetween(random(), VersionUtils.getPreviousVersion(), null); assertTrue(got.onOrAfter(VersionUtils.getPreviousVersion())); @@ -71,8 +72,9 @@ public class VersionUtilsTests extends ESTestCase { assertEquals(got, VersionUtils.getFirstVersion()); got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT); assertEquals(got, Version.CURRENT); - got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha1, Version.V_5_0_0_alpha1); - assertEquals(got, Version.V_5_0_0_alpha1); + got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1_UNRELEASED, + Version.V_6_0_0_alpha1_UNRELEASED); + assertEquals(got, Version.V_6_0_0_alpha1_UNRELEASED); // implicit range of one got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion());