Merge branch 'master' into feature/client_aggs_parsing
This commit is contained in:
commit
cf89fb86b5
|
@ -117,7 +117,7 @@ For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to
|
|||
Please follow these formatting guidelines:
|
||||
|
||||
* Java indent is 4 spaces
|
||||
* Line width is 100 characters
|
||||
* Line width is 140 characters
|
||||
* The rest is left to Java coding standards
|
||||
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
||||
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
||||
|
|
|
@ -311,16 +311,9 @@ class BuildPlugin implements Plugin<Project> {
|
|||
/**
|
||||
* Returns a closure which can be used with a MavenPom for fixing problems with gradle generated poms.
|
||||
*
|
||||
* <ul>
|
||||
* <li>Remove transitive dependencies. We currently exclude all artifacts explicitly instead of using wildcards
|
||||
* as Ivy incorrectly translates POMs with * excludes to Ivy XML with * excludes which results in the main artifact
|
||||
* being excluded as well (see https://issues.apache.org/jira/browse/IVY-1531). Note that Gradle 2.14+ automatically
|
||||
* translates non-transitive dependencies to * excludes. We should revisit this when upgrading Gradle.</li>
|
||||
* <li>Set compile time deps back to compile from runtime (known issue with maven-publish plugin)</li>
|
||||
* </ul>
|
||||
* The current fixup is to set compile time deps back to compile from runtime (known issue with maven-publish plugin).
|
||||
*/
|
||||
private static Closure fixupDependencies(Project project) {
|
||||
// TODO: revisit this when upgrading to Gradle 2.14+, see Javadoc comment above
|
||||
return { XmlProvider xml ->
|
||||
// first find if we have dependencies at all, and grab the node
|
||||
NodeList depsNodes = xml.asNode().get('dependencies')
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
suppress the check there but enforce it everywhere else. This prevents the list from getting longer even if it is
|
||||
unfair. -->
|
||||
<module name="LineLength">
|
||||
<property name="max" value="100"/>
|
||||
<property name="max" value="140"/>
|
||||
</module>
|
||||
|
||||
<module name="AvoidStarImport" />
|
||||
|
|
|
@ -16,6 +16,6 @@ eclipse.preferences.version=1
|
|||
# org.eclipse.jdt.core.compiler.problem.potentialNullReference=warning
|
||||
|
||||
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
|
||||
org.eclipse.jdt.core.formatter.lineSplit=100
|
||||
org.eclipse.jdt.core.formatter.lineSplit=140
|
||||
org.eclipse.jdt.core.formatter.tabulation.char=space
|
||||
org.eclipse.jdt.core.formatter.tabulation.size=4
|
||||
|
|
|
@ -35,48 +35,6 @@ public class Version implements Comparable<Version> {
|
|||
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
|
||||
* indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id
|
||||
*/
|
||||
public static final int V_2_0_0_ID = 2000099;
|
||||
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_1_ID = 2000199;
|
||||
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_0_2_ID = 2000299;
|
||||
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
|
||||
public static final int V_2_1_0_ID = 2010099;
|
||||
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_1_ID = 2010199;
|
||||
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_1_2_ID = 2010299;
|
||||
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, org.apache.lucene.util.Version.LUCENE_5_3_1);
|
||||
public static final int V_2_2_0_ID = 2020099;
|
||||
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_1_ID = 2020199;
|
||||
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_2_2_ID = 2020299;
|
||||
public static final Version V_2_2_2 = new Version(V_2_2_2_ID, org.apache.lucene.util.Version.LUCENE_5_4_1);
|
||||
public static final int V_2_3_0_ID = 2030099;
|
||||
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_1_ID = 2030199;
|
||||
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_2_ID = 2030299;
|
||||
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_3_ID = 2030399;
|
||||
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_4_ID = 2030499;
|
||||
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_5_ID = 2030599;
|
||||
public static final Version V_2_3_5 = new Version(V_2_3_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_4_0_ID = 2040099;
|
||||
public static final Version V_2_4_0 = new Version(V_2_4_0_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_2_4_1_ID = 2040199;
|
||||
public static final Version V_2_4_1 = new Version(V_2_4_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_2_4_2_ID = 2040299;
|
||||
public static final Version V_2_4_2 = new Version(V_2_4_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_2_4_3_ID = 2040399;
|
||||
public static final Version V_2_4_3 = new Version(V_2_4_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_2_4_4_ID = 2040499;
|
||||
public static final Version V_2_4_4 = new Version(V_2_4_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_2_4_5_ID = 2040599;
|
||||
public static final Version V_2_4_5 = new Version(V_2_4_5_ID, org.apache.lucene.util.Version.LUCENE_5_5_2);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
|
@ -182,48 +140,6 @@ public class Version implements Comparable<Version> {
|
|||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_4_5_ID:
|
||||
return V_2_4_5;
|
||||
case V_2_4_4_ID:
|
||||
return V_2_4_4;
|
||||
case V_2_4_3_ID:
|
||||
return V_2_4_3;
|
||||
case V_2_4_2_ID:
|
||||
return V_2_4_2;
|
||||
case V_2_4_1_ID:
|
||||
return V_2_4_1;
|
||||
case V_2_4_0_ID:
|
||||
return V_2_4_0;
|
||||
case V_2_3_5_ID:
|
||||
return V_2_3_5;
|
||||
case V_2_3_4_ID:
|
||||
return V_2_3_4;
|
||||
case V_2_3_3_ID:
|
||||
return V_2_3_3;
|
||||
case V_2_3_2_ID:
|
||||
return V_2_3_2;
|
||||
case V_2_3_1_ID:
|
||||
return V_2_3_1;
|
||||
case V_2_3_0_ID:
|
||||
return V_2_3_0;
|
||||
case V_2_2_2_ID:
|
||||
return V_2_2_2;
|
||||
case V_2_2_1_ID:
|
||||
return V_2_2_1;
|
||||
case V_2_2_0_ID:
|
||||
return V_2_2_0;
|
||||
case V_2_1_2_ID:
|
||||
return V_2_1_2;
|
||||
case V_2_1_1_ID:
|
||||
return V_2_1_1;
|
||||
case V_2_1_0_ID:
|
||||
return V_2_1_0;
|
||||
case V_2_0_2_ID:
|
||||
return V_2_0_2;
|
||||
case V_2_0_1_ID:
|
||||
return V_2_0_1;
|
||||
case V_2_0_0_ID:
|
||||
return V_2_0_0;
|
||||
default:
|
||||
return new Version(id, org.apache.lucene.util.Version.LATEST);
|
||||
}
|
||||
|
|
|
@ -37,6 +37,8 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction;
|
|||
import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
|
||||
import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.remote.TransportRemoteInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.TransportDeleteRepositoryAction;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesAction;
|
||||
|
@ -235,6 +237,7 @@ import org.elasticsearch.rest.action.admin.cluster.RestNodesStatsAction;
|
|||
import org.elasticsearch.rest.action.admin.cluster.RestPendingClusterTasksAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestPutRepositoryAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestPutStoredScriptAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestRemoteClusterInfoAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestRestoreSnapshotAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestSnapshotsStatusAction;
|
||||
import org.elasticsearch.rest.action.admin.cluster.RestVerifyRepositoryAction;
|
||||
|
@ -400,6 +403,7 @@ public class ActionModule extends AbstractModule {
|
|||
|
||||
actions.register(MainAction.INSTANCE, TransportMainAction.class);
|
||||
actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
|
||||
actions.register(RemoteInfoAction.INSTANCE, TransportRemoteInfoAction.class);
|
||||
actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
|
||||
actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
|
||||
actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class);
|
||||
|
@ -509,6 +513,7 @@ public class ActionModule extends AbstractModule {
|
|||
};
|
||||
registerHandler.accept(new RestMainAction(settings, restController));
|
||||
registerHandler.accept(new RestNodesInfoAction(settings, restController, settingsFilter));
|
||||
registerHandler.accept(new RestRemoteClusterInfoAction(settings, restController));
|
||||
registerHandler.accept(new RestNodesStatsAction(settings, restController));
|
||||
registerHandler.accept(new RestNodesHotThreadsAction(settings, restController));
|
||||
registerHandler.accept(new RestClusterAllocationExplainAction(settings, restController));
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public final class RemoteInfoAction extends Action<RemoteInfoRequest, RemoteInfoResponse, RemoteInfoRequestBuilder> {
|
||||
|
||||
public static final String NAME = "cluster:monitor/remote/info";
|
||||
public static final RemoteInfoAction INSTANCE = new RemoteInfoAction();
|
||||
|
||||
public RemoteInfoAction() {
|
||||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoteInfoRequestBuilder newRequestBuilder(ElasticsearchClient client) {
|
||||
return new RemoteInfoRequestBuilder(client, INSTANCE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RemoteInfoResponse newResponse() {
|
||||
return new RemoteInfoResponse();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
|
||||
public final class RemoteInfoRequest extends ActionRequest {
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
public final class RemoteInfoRequestBuilder extends ActionRequestBuilder<RemoteInfoRequest, RemoteInfoResponse, RemoteInfoRequestBuilder> {
|
||||
|
||||
public RemoteInfoRequestBuilder(ElasticsearchClient client, RemoteInfoAction action) {
|
||||
super(client, action, new RemoteInfoRequest());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.search.RemoteConnectionInfo;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public final class RemoteInfoResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private List<RemoteConnectionInfo> infos;
|
||||
|
||||
RemoteInfoResponse() {
|
||||
}
|
||||
|
||||
RemoteInfoResponse(Collection<RemoteConnectionInfo> infos) {
|
||||
this.infos = Collections.unmodifiableList(new ArrayList<>(infos));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeList(infos);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
infos = in.readList(RemoteConnectionInfo::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
for (RemoteConnectionInfo info : infos) {
|
||||
info.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.remote;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.RemoteClusterService;
|
||||
import org.elasticsearch.action.search.SearchTransportService;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
public final class TransportRemoteInfoAction extends HandledTransportAction<RemoteInfoRequest, RemoteInfoResponse> {
|
||||
|
||||
private final RemoteClusterService remoteClusterService;
|
||||
|
||||
@Inject
|
||||
public TransportRemoteInfoAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchTransportService searchTransportService) {
|
||||
super(settings, RemoteInfoAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
RemoteInfoRequest::new);
|
||||
this.remoteClusterService = searchTransportService.getRemoteClusterService();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(RemoteInfoRequest remoteInfoRequest, ActionListener<RemoteInfoResponse> listener) {
|
||||
remoteClusterService.getRemoteConnectionInfos(ActionListener.wrap(remoteConnectionInfos
|
||||
-> listener.onResponse(new RemoteInfoResponse(remoteConnectionInfos)), listener::onFailure));
|
||||
}
|
||||
}
|
|
@ -126,9 +126,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
}
|
||||
}
|
||||
type = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
attributes = (Map<String, Object>) in.readGenericValue();
|
||||
}
|
||||
attributes = (Map<String, Object>) in.readGenericValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -141,9 +139,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
out.writeOptionalVInt(positionLength > 1 ? positionLength : null);
|
||||
}
|
||||
out.writeOptionalString(type);
|
||||
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
out.writeGenericValue(attributes);
|
||||
}
|
||||
out.writeGenericValue(attributes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -200,9 +196,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
for (int i = 0; i < size; i++) {
|
||||
tokens.add(AnalyzeToken.readAnalyzeToken(in));
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
detail = in.readOptionalStreamable(DetailAnalyzeResponse::new);
|
||||
}
|
||||
detail = in.readOptionalStreamable(DetailAnalyzeResponse::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -216,9 +210,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
out.writeOptionalStreamable(detail);
|
||||
}
|
||||
out.writeOptionalStreamable(detail);
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
|
|
|
@ -63,9 +63,4 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
|
|||
logger.trace("{} flush request executed on replica", replica.shardId());
|
||||
return new ReplicaResult();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,9 +66,4 @@ public class TransportShardRefreshAction
|
|||
logger.trace("{} refresh request executed on replica", replica.shardId());
|
||||
return new ReplicaResult();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -317,7 +317,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
/*
|
||||
* If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
|
||||
*/
|
||||
startNextScroll(thisBatchStartTime, 0);
|
||||
startNextScroll(thisBatchStartTime, timeValueNanos(System.nanoTime()), 0);
|
||||
return;
|
||||
}
|
||||
request.timeout(mainRequest.getTimeout());
|
||||
|
@ -400,7 +400,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
return;
|
||||
}
|
||||
|
||||
startNextScroll(thisBatchStartTime, response.getItems().length);
|
||||
startNextScroll(thisBatchStartTime, timeValueNanos(System.nanoTime()), response.getItems().length);
|
||||
} catch (Exception t) {
|
||||
finishHim(t);
|
||||
}
|
||||
|
@ -412,12 +412,12 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
* @param lastBatchSize the number of requests sent in the last batch. This is used to calculate the throttling values which are applied
|
||||
* when the scroll returns
|
||||
*/
|
||||
void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) {
|
||||
void startNextScroll(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, lastBatchSize);
|
||||
TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, now, lastBatchSize);
|
||||
scrollSource.startNextScroll(extraKeepAlive, response -> {
|
||||
onScrollResponse(lastBatchStartTime, lastBatchSize, response);
|
||||
});
|
||||
|
|
|
@ -178,14 +178,14 @@ public class WorkingBulkByScrollTask extends BulkByScrollTask implements Success
|
|||
AbstractRunnable prepareBulkRequestRunnable) {
|
||||
// Synchronize so we are less likely to schedule the same request twice.
|
||||
synchronized (delayedPrepareBulkRequestReference) {
|
||||
TimeValue delay = throttleWaitTime(lastBatchStartTime, lastBatchSize);
|
||||
TimeValue delay = throttleWaitTime(lastBatchStartTime, timeValueNanos(System.nanoTime()), lastBatchSize);
|
||||
delayedPrepareBulkRequestReference.set(new DelayedPrepareBulkRequest(threadPool, getRequestsPerSecond(),
|
||||
delay, new RunOnce(prepareBulkRequestRunnable)));
|
||||
}
|
||||
}
|
||||
|
||||
TimeValue throttleWaitTime(TimeValue lastBatchStartTime, int lastBatchSize) {
|
||||
long earliestNextBatchStartTime = lastBatchStartTime.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize);
|
||||
TimeValue throttleWaitTime(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) {
|
||||
long earliestNextBatchStartTime = now.nanos() + (long) perfectlyThrottledBatchTime(lastBatchSize);
|
||||
return timeValueNanos(max(0, earliestNextBatchStartTime - System.nanoTime()));
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.fieldstats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ValidateActions;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
|
@ -200,9 +199,7 @@ public class FieldStatsRequest extends BroadcastRequest<FieldStatsRequest> {
|
|||
out.writeByte(indexConstraint.getProperty().getId());
|
||||
out.writeByte(indexConstraint.getComparison().getId());
|
||||
out.writeString(indexConstraint.getValue());
|
||||
if (out.getVersion().onOrAfter(Version.V_2_0_1)) {
|
||||
out.writeOptionalString(indexConstraint.getOptionalFormat());
|
||||
}
|
||||
out.writeOptionalString(indexConstraint.getOptionalFormat());
|
||||
}
|
||||
out.writeString(level);
|
||||
out.writeBoolean(useCache);
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.fieldstats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -39,11 +38,7 @@ public class IndexConstraint {
|
|||
this.property = Property.read(input.readByte());
|
||||
this.comparison = Comparison.read(input.readByte());
|
||||
this.value = input.readString();
|
||||
if (input.getVersion().onOrAfter(Version.V_2_0_1)) {
|
||||
this.optionalFormat = input.readOptionalString();
|
||||
} else {
|
||||
this.optionalFormat = null;
|
||||
}
|
||||
this.optionalFormat = input.readOptionalString();
|
||||
}
|
||||
|
||||
public IndexConstraint(String field, Property property, Comparison comparison, String value) {
|
||||
|
|
|
@ -68,13 +68,6 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
@Override
|
||||
protected void resolveRequest(ClusterState state, InternalRequest request) {
|
||||
IndexMetaData indexMeta = state.getMetaData().index(request.concreteIndex());
|
||||
if (request.request().realtime && // if the realtime flag is set
|
||||
request.request().preference() == null && // the preference flag is not already set
|
||||
indexMeta != null && // and we have the index
|
||||
indexMeta.isIndexUsingShadowReplicas()) { // and the index uses shadow replicas
|
||||
// set the preference for the request to use "_primary" automatically
|
||||
request.request().preference(Preference.PRIMARY.type());
|
||||
}
|
||||
// update the routing (request#index here is possibly an alias)
|
||||
request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index()));
|
||||
// Fail fast on the node that received the request.
|
||||
|
|
|
@ -23,6 +23,10 @@ import org.apache.logging.log4j.util.Supplier;
|
|||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
|
@ -33,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
@ -54,8 +59,10 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
|
@ -65,6 +72,7 @@ import java.util.concurrent.RejectedExecutionException;
|
|||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Represents a connection to a single remote cluster. In contrast to a local cluster a remote cluster is not joined such that the
|
||||
|
@ -521,4 +529,71 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
|
|||
return connectedNodes.contains(node);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Fetches connection info for this connection
|
||||
*/
|
||||
public void getConnectionInfo(ActionListener<RemoteConnectionInfo> listener) {
|
||||
final Optional<DiscoveryNode> anyNode = connectedNodes.stream().findAny();
|
||||
if (anyNode.isPresent() == false) {
|
||||
// not connected we return immediately
|
||||
RemoteConnectionInfo remoteConnectionStats = new RemoteConnectionInfo(clusterAlias,
|
||||
Collections.emptyList(), Collections.emptyList(), maxNumRemoteConnections, 0,
|
||||
RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings));
|
||||
listener.onResponse(remoteConnectionStats);
|
||||
} else {
|
||||
NodesInfoRequest request = new NodesInfoRequest();
|
||||
request.clear();
|
||||
request.http(true);
|
||||
|
||||
transportService.sendRequest(anyNode.get(), NodesInfoAction.NAME, request, new TransportResponseHandler<NodesInfoResponse>() {
|
||||
@Override
|
||||
public NodesInfoResponse newInstance() {
|
||||
return new NodesInfoResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(NodesInfoResponse response) {
|
||||
Collection<TransportAddress> httpAddresses = new HashSet<>();
|
||||
for (NodeInfo info : response.getNodes()) {
|
||||
if (connectedNodes.contains(info.getNode()) && info.getHttp() != null) {
|
||||
httpAddresses.add(info.getHttp().getAddress().publishAddress());
|
||||
}
|
||||
}
|
||||
|
||||
if (httpAddresses.size() < maxNumRemoteConnections) {
|
||||
// just in case non of the connected nodes have http enabled we get other http enabled nodes instead.
|
||||
for (NodeInfo info : response.getNodes()) {
|
||||
if (nodePredicate.test(info.getNode()) && info.getHttp() != null) {
|
||||
httpAddresses.add(info.getHttp().getAddress().publishAddress());
|
||||
}
|
||||
if (httpAddresses.size() == maxNumRemoteConnections) {
|
||||
break; // once we have enough return...
|
||||
}
|
||||
}
|
||||
}
|
||||
RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(clusterAlias,
|
||||
seedNodes.stream().map(n -> n.getAddress()).collect(Collectors.toList()), new ArrayList<>(httpAddresses),
|
||||
maxNumRemoteConnections, connectedNodes.size(),
|
||||
RemoteClusterService.REMOTE_INITIAL_CONNECTION_TIMEOUT_SETTING.get(settings));
|
||||
listener.onResponse(remoteConnectionInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int getNumNodesConnected() {
|
||||
return connectedNodes.size();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,9 +24,10 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
|
||||
import org.elasticsearch.action.support.GroupedActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.PlainShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
|
@ -51,10 +52,12 @@ import java.net.InetSocketAddress;
|
|||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
@ -111,11 +114,13 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
|
||||
private final TransportService transportService;
|
||||
private final int numRemoteConnections;
|
||||
private final ClusterNameExpressionResolver clusterNameResolver;
|
||||
private volatile Map<String, RemoteClusterConnection> remoteClusters = Collections.emptyMap();
|
||||
|
||||
RemoteClusterService(Settings settings, TransportService transportService) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.clusterNameResolver = new ClusterNameExpressionResolver(settings);
|
||||
numRemoteConnections = REMOTE_CONNECTIONS_PER_CLUSTER.get(settings);
|
||||
}
|
||||
|
||||
|
@ -203,25 +208,30 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
*/
|
||||
Map<String, List<String>> groupClusterIndices(String[] requestIndices, Predicate<String> indexExists) {
|
||||
Map<String, List<String>> perClusterIndices = new HashMap<>();
|
||||
Set<String> remoteClusterNames = this.remoteClusters.keySet();
|
||||
for (String index : requestIndices) {
|
||||
int i = index.indexOf(REMOTE_CLUSTER_INDEX_SEPARATOR);
|
||||
String indexName = index;
|
||||
String clusterName = LOCAL_CLUSTER_GROUP_KEY;
|
||||
if (i >= 0) {
|
||||
String remoteClusterName = index.substring(0, i);
|
||||
if (isRemoteClusterRegistered(remoteClusterName)) {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusterNames, remoteClusterName);
|
||||
if (clusters.isEmpty() == false) {
|
||||
if (indexExists.test(index)) {
|
||||
// we use : as a separator for remote clusters. might conflict if there is an index that is actually named
|
||||
// remote_cluster_alias:index_name - for this case we fail the request. the user can easily change the cluster alias
|
||||
// if that happens
|
||||
throw new IllegalArgumentException("Can not filter indices; index " + index +
|
||||
" exists but there is also a remote cluster named: " + remoteClusterName);
|
||||
}
|
||||
String indexName = index.substring(i + 1);
|
||||
for (String clusterName : clusters) {
|
||||
perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<>()).add(indexName);
|
||||
}
|
||||
indexName = index.substring(i + 1);
|
||||
clusterName = remoteClusterName;
|
||||
} else {
|
||||
perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index);
|
||||
}
|
||||
} else {
|
||||
perClusterIndices.computeIfAbsent(LOCAL_CLUSTER_GROUP_KEY, k -> new ArrayList<>()).add(index);
|
||||
}
|
||||
perClusterIndices.computeIfAbsent(clusterName, k -> new ArrayList<String>()).add(indexName);
|
||||
}
|
||||
return perClusterIndices;
|
||||
}
|
||||
|
@ -413,4 +423,17 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
|
|||
public void close() throws IOException {
|
||||
IOUtils.close(remoteClusters.values());
|
||||
}
|
||||
|
||||
public void getRemoteConnectionInfos(ActionListener<Collection<RemoteConnectionInfo>> listener) {
|
||||
final Map<String, RemoteClusterConnection> remoteClusters = this.remoteClusters;
|
||||
if (remoteClusters.isEmpty()) {
|
||||
listener.onResponse(Collections.emptyList());
|
||||
} else {
|
||||
final GroupedActionListener<RemoteConnectionInfo> actionListener = new GroupedActionListener<>(listener,
|
||||
remoteClusters.size(), Collections.emptyList());
|
||||
for (RemoteClusterConnection connection : remoteClusters.values()) {
|
||||
connection.getConnectionInfo(actionListener);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This class encapsulates all remote cluster information to be rendered on
|
||||
* <tt>_remote/info</tt> requests.
|
||||
*/
|
||||
public final class RemoteConnectionInfo implements ToXContent, Writeable {
|
||||
final List<TransportAddress> seedNodes;
|
||||
final List<TransportAddress> httpAddresses;
|
||||
final int connectionsPerCluster;
|
||||
final TimeValue initialConnectionTimeout;
|
||||
final int numNodesConnected;
|
||||
final String clusterAlias;
|
||||
|
||||
RemoteConnectionInfo(String clusterAlias, List<TransportAddress> seedNodes,
|
||||
List<TransportAddress> httpAddresses,
|
||||
int connectionsPerCluster, int numNodesConnected,
|
||||
TimeValue initialConnectionTimeout) {
|
||||
this.clusterAlias = clusterAlias;
|
||||
this.seedNodes = seedNodes;
|
||||
this.httpAddresses = httpAddresses;
|
||||
this.connectionsPerCluster = connectionsPerCluster;
|
||||
this.numNodesConnected = numNodesConnected;
|
||||
this.initialConnectionTimeout = initialConnectionTimeout;
|
||||
}
|
||||
|
||||
public RemoteConnectionInfo(StreamInput input) throws IOException {
|
||||
seedNodes = input.readList(TransportAddress::new);
|
||||
httpAddresses = input.readList(TransportAddress::new);
|
||||
connectionsPerCluster = input.readVInt();
|
||||
initialConnectionTimeout = new TimeValue(input);
|
||||
numNodesConnected = input.readVInt();
|
||||
clusterAlias = input.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(clusterAlias);
|
||||
{
|
||||
builder.startArray("seeds");
|
||||
for (TransportAddress addr : seedNodes) {
|
||||
builder.value(addr.toString());
|
||||
}
|
||||
builder.endArray();
|
||||
builder.startArray("http_addresses");
|
||||
for (TransportAddress addr : httpAddresses) {
|
||||
builder.value(addr.toString());
|
||||
}
|
||||
builder.endArray();
|
||||
builder.field("connected", numNodesConnected > 0);
|
||||
builder.field("num_nodes_connected", numNodesConnected);
|
||||
builder.field("max_connections_per_cluster", connectionsPerCluster);
|
||||
builder.field("initial_connect_timeout", initialConnectionTimeout);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeList(seedNodes);
|
||||
out.writeList(httpAddresses);
|
||||
out.writeVInt(connectionsPerCluster);
|
||||
initialConnectionTimeout.writeTo(out);
|
||||
out.writeVInt(numNodesConnected);
|
||||
out.writeString(clusterAlias);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
RemoteConnectionInfo that = (RemoteConnectionInfo) o;
|
||||
return connectionsPerCluster == that.connectionsPerCluster &&
|
||||
numNodesConnected == that.numNodesConnected &&
|
||||
Objects.equals(seedNodes, that.seedNodes) &&
|
||||
Objects.equals(httpAddresses, that.httpAddresses) &&
|
||||
Objects.equals(initialConnectionTimeout, that.initialConnectionTimeout) &&
|
||||
Objects.equals(clusterAlias, that.clusterAlias);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(seedNodes, httpAddresses, connectionsPerCluster, initialConnectionTimeout, numNodesConnected, clusterAlias);
|
||||
}
|
||||
}
|
|
@ -60,7 +60,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
|
||||
/** The maximum number of shards for a single search request. */
|
||||
public static final Setting<Long> SHARD_COUNT_LIMIT_SETTING = Setting.longSetting(
|
||||
"action.search.shard_count.limit", 1000L, 1L, Property.Dynamic, Property.NodeScope);
|
||||
"action.search.shard_count.limit", Long.MAX_VALUE, 1L, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchTransportService searchTransportService;
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* An action listener that delegates it's results to another listener once
|
||||
* it has received one or more failures or N results. This allows synchronous
|
||||
* tasks to be forked off in a loop with the same listener and respond to a
|
||||
* higher level listener once all tasks responded.
|
||||
*/
|
||||
public final class GroupedActionListener<T> implements ActionListener<T> {
|
||||
private final CountDown countDown;
|
||||
private final AtomicInteger pos = new AtomicInteger();
|
||||
private final AtomicArray<T> roles;
|
||||
private final ActionListener<Collection<T>> delegate;
|
||||
private final Collection<T> defaults;
|
||||
private final AtomicReference<Exception> failure = new AtomicReference<>();
|
||||
|
||||
/**
|
||||
* Creates a new listener
|
||||
* @param delegate the delegate listener
|
||||
* @param groupSize the group size
|
||||
*/
|
||||
public GroupedActionListener(ActionListener<Collection<T>> delegate, int groupSize,
|
||||
Collection<T> defaults) {
|
||||
roles = new AtomicArray<>(groupSize);
|
||||
countDown = new CountDown(groupSize);
|
||||
this.delegate = delegate;
|
||||
this.defaults = defaults;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(T element) {
|
||||
roles.set(pos.incrementAndGet() - 1, element);
|
||||
if (countDown.countDown()) {
|
||||
if (failure.get() != null) {
|
||||
delegate.onFailure(failure.get());
|
||||
} else {
|
||||
List<T> collect = this.roles.asList();
|
||||
collect.addAll(defaults);
|
||||
delegate.onResponse(Collections.unmodifiableList(collect));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (failure.compareAndSet(null, e) == false) {
|
||||
failure.get().addSuppressed(e);
|
||||
}
|
||||
if (countDown.countDown()) {
|
||||
delegate.onFailure(failure.get());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -74,7 +74,6 @@ public class ReplicationOperation<
|
|||
*/
|
||||
private final AtomicInteger pendingActions = new AtomicInteger();
|
||||
private final AtomicInteger successfulShards = new AtomicInteger();
|
||||
private final boolean executeOnReplicas;
|
||||
private final Primary<Request, ReplicaRequest, PrimaryResultT> primary;
|
||||
private final Replicas<ReplicaRequest> replicasProxy;
|
||||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
|
@ -86,9 +85,8 @@ public class ReplicationOperation<
|
|||
|
||||
public ReplicationOperation(Request request, Primary<Request, ReplicaRequest, PrimaryResultT> primary,
|
||||
ActionListener<PrimaryResultT> listener,
|
||||
boolean executeOnReplicas, Replicas<ReplicaRequest> replicas,
|
||||
Replicas<ReplicaRequest> replicas,
|
||||
Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
|
||||
this.executeOnReplicas = executeOnReplicas;
|
||||
this.replicasProxy = replicas;
|
||||
this.primary = primary;
|
||||
this.resultListener = listener;
|
||||
|
@ -160,7 +158,7 @@ public class ReplicationOperation<
|
|||
final String localNodeId = primary.routingEntry().currentNodeId();
|
||||
// If the index gets deleted after primary operation, we skip replication
|
||||
for (final ShardRouting shard : shards) {
|
||||
if (executeOnReplicas == false || shard.unassigned()) {
|
||||
if (shard.unassigned()) {
|
||||
if (shard.primary() == false) {
|
||||
totalShards.incrementAndGet();
|
||||
}
|
||||
|
|
|
@ -319,11 +319,10 @@ public abstract class TransportReplicationAction<
|
|||
} else {
|
||||
setPhase(replicationTask, "primary");
|
||||
final IndexMetaData indexMetaData = clusterService.state().getMetaData().index(request.shardId().getIndex());
|
||||
final boolean executeOnReplicas = (indexMetaData == null) || shouldExecuteReplication(indexMetaData);
|
||||
final ActionListener<Response> listener = createResponseListener(primaryShardReference);
|
||||
createReplicatedOperation(request,
|
||||
ActionListener.wrap(result -> result.respond(listener), listener::onFailure),
|
||||
primaryShardReference, executeOnReplicas)
|
||||
primaryShardReference)
|
||||
.execute();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
@ -371,9 +370,9 @@ public abstract class TransportReplicationAction<
|
|||
|
||||
protected ReplicationOperation<Request, ReplicaRequest, PrimaryResult<ReplicaRequest, Response>> createReplicatedOperation(
|
||||
Request request, ActionListener<PrimaryResult<ReplicaRequest, Response>> listener,
|
||||
PrimaryShardReference primaryShardReference, boolean executeOnReplicas) {
|
||||
PrimaryShardReference primaryShardReference) {
|
||||
return new ReplicationOperation<>(request, primaryShardReference, listener,
|
||||
executeOnReplicas, replicasProxy, clusterService::state, logger, actionName);
|
||||
replicasProxy, clusterService::state, logger, actionName);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -909,14 +908,6 @@ public abstract class TransportReplicationAction<
|
|||
indexShard.acquirePrimaryOperationLock(onAcquired, executor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicated whether this operation should be replicated to shadow replicas or not. If this method returns true the replication phase
|
||||
* will be skipped. For example writes such as index and delete don't need to be replicated on shadow replicas but refresh and flush do.
|
||||
*/
|
||||
protected boolean shouldExecuteReplication(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.isIndexUsingShadowReplicas() == false;
|
||||
}
|
||||
|
||||
class ShardReference implements Releasable {
|
||||
|
||||
protected final IndexShard indexShard;
|
||||
|
|
|
@ -338,13 +338,12 @@ final class Bootstrap {
|
|||
|
||||
INSTANCE.setup(true, environment);
|
||||
|
||||
/* TODO: close this once s3 repository doesn't try to read during repository construction
|
||||
try {
|
||||
// any secure settings must be read during node construction
|
||||
IOUtils.close(keystore);
|
||||
} catch (IOException e) {
|
||||
throw new BootstrapException(e);
|
||||
}*/
|
||||
}
|
||||
|
||||
INSTANCE.start();
|
||||
|
||||
|
|
|
@ -45,7 +45,16 @@ public abstract class EnvironmentAwareCommand extends Command {
|
|||
final Map<String, String> settings = new HashMap<>();
|
||||
for (final KeyValuePair kvp : settingOption.values(options)) {
|
||||
if (kvp.value.isEmpty()) {
|
||||
throw new UserException(ExitCodes.USAGE, "Setting [" + kvp.key + "] must not be empty");
|
||||
throw new UserException(ExitCodes.USAGE, "setting [" + kvp.key + "] must not be empty");
|
||||
}
|
||||
if (settings.containsKey(kvp.key)) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"setting [%s] already set, saw [%s] and [%s]",
|
||||
kvp.key,
|
||||
settings.get(kvp.key),
|
||||
kvp.value);
|
||||
throw new UserException(ExitCodes.USAGE, message);
|
||||
}
|
||||
settings.put(kvp.key, kvp.value);
|
||||
}
|
||||
|
|
|
@ -383,13 +383,6 @@ public class InternalClusterInfoService extends AbstractComponent
|
|||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("shard: {} size: {}", sid, size);
|
||||
}
|
||||
if (indexMeta != null && indexMeta.isIndexUsingShadowReplicas()) {
|
||||
// Shards on a shared filesystem should be considered of size 0
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("shard: {} is using shadow replicas and will be treated as size 0", sid);
|
||||
}
|
||||
size = 0;
|
||||
}
|
||||
newShardSizes.put(sid, size);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Resolves cluster names from an expression. The expression must be the exact match of a cluster
|
||||
* name or must be a wildcard expression.
|
||||
*/
|
||||
public final class ClusterNameExpressionResolver extends AbstractComponent {
|
||||
|
||||
private final WildcardExpressionResolver wildcardResolver = new WildcardExpressionResolver();
|
||||
|
||||
public ClusterNameExpressionResolver(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the provided cluster expression to matching cluster names. This method only
|
||||
* supports exact or wildcard matches.
|
||||
*
|
||||
* @param remoteClusters the aliases for remote clusters
|
||||
* @param clusterExpression the expressions that can be resolved to cluster names.
|
||||
* @return the resolved cluster aliases.
|
||||
*/
|
||||
public List<String> resolveClusterNames(Set<String> remoteClusters, String clusterExpression) {
|
||||
if (remoteClusters.contains(clusterExpression)) {
|
||||
return Collections.singletonList(clusterExpression);
|
||||
} else if (Regex.isSimpleMatchPattern(clusterExpression)) {
|
||||
return wildcardResolver.resolve(remoteClusters, clusterExpression);
|
||||
} else {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
}
|
||||
|
||||
private static class WildcardExpressionResolver {
|
||||
|
||||
private List<String> resolve(Set<String> remoteClusters, String clusterExpression) {
|
||||
if (isTrivialWildcard(clusterExpression)) {
|
||||
return resolveTrivialWildcard(remoteClusters);
|
||||
}
|
||||
|
||||
Set<String> matches = matches(remoteClusters, clusterExpression);
|
||||
if (matches.isEmpty()) {
|
||||
return Collections.emptyList();
|
||||
} else {
|
||||
return new ArrayList<>(matches);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isTrivialWildcard(String clusterExpression) {
|
||||
return Regex.isMatchAllPattern(clusterExpression);
|
||||
}
|
||||
|
||||
private List<String> resolveTrivialWildcard(Set<String> remoteClusters) {
|
||||
return new ArrayList<>(remoteClusters);
|
||||
}
|
||||
|
||||
private static Set<String> matches(Set<String> remoteClusters, String expression) {
|
||||
if (expression.indexOf("*") == expression.length() - 1) {
|
||||
return otherWildcard(remoteClusters, expression);
|
||||
} else {
|
||||
return otherWildcard(remoteClusters, expression);
|
||||
}
|
||||
}
|
||||
|
||||
private static Set<String> otherWildcard(Set<String> remoteClusters, String expression) {
|
||||
final String pattern = expression;
|
||||
return remoteClusters.stream()
|
||||
.filter(n -> Regex.simpleMatch(pattern, n))
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -192,18 +192,11 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
|
||||
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING =
|
||||
Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope);
|
||||
public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
|
||||
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING =
|
||||
Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope, Property.Deprecated);
|
||||
|
||||
public static final String SETTING_ROUTING_PARTITION_SIZE = "index.routing_partition_size";
|
||||
public static final Setting<Integer> INDEX_ROUTING_PARTITION_SIZE_SETTING =
|
||||
Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope);
|
||||
|
||||
public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING =
|
||||
Setting.boolSetting(SETTING_SHARED_FILESYSTEM, INDEX_SHADOW_REPLICAS_SETTING, Property.IndexScope, Property.Deprecated);
|
||||
|
||||
public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
|
||||
public static final Setting<AutoExpandReplicas> INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING;
|
||||
public static final String SETTING_READ_ONLY = "index.blocks.read_only";
|
||||
|
@ -240,10 +233,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
public static final String SETTING_DATA_PATH = "index.data_path";
|
||||
public static final Setting<String> INDEX_DATA_PATH_SETTING =
|
||||
new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope);
|
||||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING =
|
||||
Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false,
|
||||
Property.Dynamic, Property.IndexScope, Property.Deprecated);
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
public static final String INDEX_ROUTING_REQUIRE_GROUP_PREFIX = "index.routing.allocation.require";
|
||||
|
@ -1237,35 +1226,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(ESLoggerFactory.getLogger(IndexMetaData.class));
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given settings indicate that the index
|
||||
* associated with these settings allocates it's shards on a shared
|
||||
* filesystem. Otherwise <code>false</code>. The default setting for this
|
||||
* is the returned value from
|
||||
* {@link #isIndexUsingShadowReplicas(org.elasticsearch.common.settings.Settings)}.
|
||||
*/
|
||||
public boolean isOnSharedFilesystem(Settings settings) {
|
||||
// don't use the setting directly, not to trigger verbose deprecation logging
|
||||
return settings.getAsBooleanLenientForPreEs6Indices(
|
||||
this.indexCreatedVersion, SETTING_SHARED_FILESYSTEM, isIndexUsingShadowReplicas(settings), deprecationLogger);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given settings indicate that the index associated
|
||||
* with these settings uses shadow replicas. Otherwise <code>false</code>. The default
|
||||
* setting for this is <code>false</code>.
|
||||
*/
|
||||
public boolean isIndexUsingShadowReplicas() {
|
||||
return isIndexUsingShadowReplicas(this.settings);
|
||||
}
|
||||
|
||||
public boolean isIndexUsingShadowReplicas(Settings settings) {
|
||||
// don't use the setting directly, not to trigger verbose deprecation logging
|
||||
return settings.getAsBooleanLenientForPreEs6Indices(this.indexCreatedVersion, SETTING_SHADOW_REPLICAS, false, deprecationLogger);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds human readable version and creation date settings.
|
||||
* This method is used to display the settings in a human readable format in REST API
|
||||
|
|
|
@ -433,10 +433,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
.put(indexMetaData, false)
|
||||
.build();
|
||||
|
||||
String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : "";
|
||||
logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}",
|
||||
logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}], mappings {}",
|
||||
request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(),
|
||||
indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
|
||||
indexMetaData.getNumberOfReplicas(), mappings.keySet());
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
if (!request.blocks().isEmpty()) {
|
||||
|
|
|
@ -139,8 +139,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
"allocation set " + inSyncAllocationIds);
|
||||
}
|
||||
|
||||
if (indexMetaData.isIndexUsingShadowReplicas() == false && // see #20650
|
||||
shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false &&
|
||||
if (shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false &&
|
||||
RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) == false &&
|
||||
inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false)
|
||||
throw new IllegalStateException("a primary shard routing " + shardRouting + " is a primary that is recovering from " +
|
||||
|
|
|
@ -69,6 +69,12 @@ public interface RoutingChangesObserver {
|
|||
*/
|
||||
void replicaPromoted(ShardRouting replicaShard);
|
||||
|
||||
/**
|
||||
* Called when an initializing replica is reinitialized. This happens when a primary relocation completes, which
|
||||
* reinitializes all currently initializing replicas as their recovery source node changes
|
||||
*/
|
||||
void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica);
|
||||
|
||||
|
||||
/**
|
||||
* Abstract implementation of {@link RoutingChangesObserver} that does not take any action. Useful for subclasses that only override
|
||||
|
@ -120,6 +126,11 @@ public interface RoutingChangesObserver {
|
|||
public void replicaPromoted(ShardRouting replicaShard) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
class DelegatingRoutingChangesObserver implements RoutingChangesObserver {
|
||||
|
@ -192,5 +203,12 @@ public interface RoutingChangesObserver {
|
|||
routingChangesObserver.replicaPromoted(replicaShard);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) {
|
||||
for (RoutingChangesObserver routingChangesObserver : routingChangesObservers) {
|
||||
routingChangesObserver.initializedReplicaReinitialized(oldReplica, reinitializedReplica);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -451,6 +451,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
*
|
||||
* Moves the initializing shard to started. If the shard is a relocation target, also removes the relocation source.
|
||||
*
|
||||
* If the started shard is a primary relocation target, this also reinitializes currently initializing replicas as their
|
||||
* recovery source changes
|
||||
*
|
||||
* @return the started shard
|
||||
*/
|
||||
public ShardRouting startShard(Logger logger, ShardRouting initializingShard, RoutingChangesObserver routingChangesObserver) {
|
||||
|
@ -468,6 +471,30 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
+ initializingShard + " but was: " + relocationSourceShard.getTargetRelocatingShard();
|
||||
remove(relocationSourceShard);
|
||||
routingChangesObserver.relocationCompleted(relocationSourceShard);
|
||||
|
||||
// if this is a primary shard with ongoing replica recoveries, reinitialize them as their recovery source changed
|
||||
if (startedShard.primary()) {
|
||||
List<ShardRouting> assignedShards = assignedShards(startedShard.shardId());
|
||||
// copy list to prevent ConcurrentModificationException
|
||||
for (ShardRouting routing : new ArrayList<>(assignedShards)) {
|
||||
if (routing.initializing() && routing.primary() == false) {
|
||||
if (routing.isRelocationTarget()) {
|
||||
// find the relocation source
|
||||
ShardRouting sourceShard = getByAllocationId(routing.shardId(), routing.allocationId().getRelocationId());
|
||||
// cancel relocation and start relocation to same node again
|
||||
ShardRouting startedReplica = cancelRelocation(sourceShard);
|
||||
remove(routing);
|
||||
routingChangesObserver.shardFailed(routing,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.REINITIALIZED, "primary changed"));
|
||||
relocateShard(startedReplica, sourceShard.relocatingNodeId(),
|
||||
sourceShard.getExpectedShardSize(), routingChangesObserver);
|
||||
} else {
|
||||
ShardRouting reinitializedReplica = reinitReplica(routing);
|
||||
routingChangesObserver.initializedReplicaReinitialized(routing, reinitializedReplica);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return startedShard;
|
||||
}
|
||||
|
@ -540,9 +567,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
if (failedShard.primary()) {
|
||||
// promote active replica to primary if active replica exists (only the case for shadow replicas)
|
||||
ShardRouting activeReplica = activeReplica(failedShard.shardId());
|
||||
assert activeReplica == null || indexMetaData.isIndexUsingShadowReplicas() :
|
||||
"initializing primary [" + failedShard + "] with active replicas [" + activeReplica + "] only expected when " +
|
||||
"using shadow replicas";
|
||||
if (activeReplica == null) {
|
||||
moveToUnassigned(failedShard, unassignedInfo);
|
||||
} else {
|
||||
|
@ -599,10 +623,6 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
assert activeReplica.started() : "replica relocation should have been cancelled: " + activeReplica;
|
||||
ShardRouting primarySwappedCandidate = promoteActiveReplicaShardToPrimary(activeReplica);
|
||||
routingChangesObserver.replicaPromoted(activeReplica);
|
||||
if (indexMetaData.isIndexUsingShadowReplicas()) {
|
||||
ShardRouting initializedShard = reinitShadowPrimary(primarySwappedCandidate);
|
||||
routingChangesObserver.startedPrimaryReinitialized(primarySwappedCandidate, initializedShard);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -730,6 +750,15 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
return reinitializedShard;
|
||||
}
|
||||
|
||||
private ShardRouting reinitReplica(ShardRouting shard) {
|
||||
assert shard.primary() == false : "shard must be a replica: " + shard;
|
||||
assert shard.initializing() : "can only reinitialize an initializing replica: " + shard;
|
||||
assert shard.isRelocationTarget() == false : "replication target cannot be reinitialized: " + shard;
|
||||
ShardRouting reinitializedShard = shard.reinitializeReplicaShard();
|
||||
updateAssigned(shard, reinitializedShard);
|
||||
return reinitializedShard;
|
||||
}
|
||||
|
||||
private void updateAssigned(ShardRouting oldShard, ShardRouting newShard) {
|
||||
assert oldShard.shardId().equals(newShard.shardId()) :
|
||||
"can only update " + oldShard + " by shard with same shard id but was " + newShard;
|
||||
|
|
|
@ -393,6 +393,17 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
allocationId, UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reinitializes a replica shard, giving it a fresh allocation id
|
||||
*/
|
||||
public ShardRouting reinitializeReplicaShard() {
|
||||
assert state == ShardRoutingState.INITIALIZING : this;
|
||||
assert primary == false : this;
|
||||
assert isRelocationTarget() == false : this;
|
||||
return new ShardRouting(shardId, currentNodeId, null, primary, ShardRoutingState.INITIALIZING,
|
||||
recoverySource, unassignedInfo, AllocationId.newInitializing(), expectedShardSize);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the shards state to <code>STARTED</code>. The shards state must be
|
||||
* <code>INITIALIZING</code> or <code>RELOCATING</code>. Any relocation will be
|
||||
|
|
|
@ -41,7 +41,9 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
@ -88,6 +90,9 @@ public class AllocationService extends AbstractComponent {
|
|||
routingNodes.unassigned().shuffle();
|
||||
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
|
||||
// as starting a primary relocation target can reinitialize replica shards, start replicas first
|
||||
startedShards = new ArrayList<>(startedShards);
|
||||
Collections.sort(startedShards, Comparator.comparing(ShardRouting::primary));
|
||||
applyStartedShards(allocation, startedShards);
|
||||
gatewayAllocator.applyStartedShards(allocation, startedShards);
|
||||
reroute(allocation);
|
||||
|
|
|
@ -96,6 +96,17 @@ public class RoutingNodesChangedObserver implements RoutingChangesObserver {
|
|||
setChanged();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initializedReplicaReinitialized(ShardRouting oldReplica, ShardRouting reinitializedReplica) {
|
||||
assert oldReplica.initializing() && oldReplica.primary() == false :
|
||||
"expected initializing replica shard " + oldReplica;
|
||||
assert reinitializedReplica.initializing() && reinitializedReplica.primary() == false :
|
||||
"expected reinitialized replica shard " + reinitializedReplica;
|
||||
assert oldReplica.allocationId().getId().equals(reinitializedReplica.allocationId().getId()) == false :
|
||||
"expected allocation id to change for reinitialized replica shard (old: " + oldReplica + " new: " + reinitializedReplica + ")";
|
||||
setChanged();
|
||||
}
|
||||
|
||||
/**
|
||||
* Marks the allocation as changed.
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
/**
|
||||
* A subcommand for the keystore cli which adds a file setting.
|
||||
*/
|
||||
class AddFileKeyStoreCommand extends EnvironmentAwareCommand {
|
||||
|
||||
private final OptionSpec<Void> forceOption;
|
||||
private final OptionSpec<String> arguments;
|
||||
|
||||
AddFileKeyStoreCommand() {
|
||||
super("Add a file setting to the keystore");
|
||||
this.forceOption = parser.acceptsAll(Arrays.asList("f", "force"), "Overwrite existing setting without prompting");
|
||||
// jopt simple has issue with multiple non options, so we just get one set of them here
|
||||
// and convert to File when necessary
|
||||
// see https://github.com/jopt-simple/jopt-simple/issues/103
|
||||
this.arguments = parser.nonOptions("setting [filepath]");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.load(env.configFile());
|
||||
if (keystore == null) {
|
||||
throw new UserException(ExitCodes.DATA_ERROR, "Elasticsearch keystore not found. Use 'create' command to create one.");
|
||||
}
|
||||
|
||||
keystore.decrypt(new char[0] /* TODO: prompt for password when they are supported */);
|
||||
|
||||
List<String> argumentValues = arguments.values(options);
|
||||
if (argumentValues.size() == 0) {
|
||||
throw new UserException(ExitCodes.USAGE, "Missing setting name");
|
||||
}
|
||||
String setting = argumentValues.get(0);
|
||||
if (keystore.getSettingNames().contains(setting) && options.has(forceOption) == false) {
|
||||
if (terminal.promptYesNo("Setting " + setting + " already exists. Overwrite?", false) == false) {
|
||||
terminal.println("Exiting without modifying keystore.");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (argumentValues.size() == 1) {
|
||||
throw new UserException(ExitCodes.USAGE, "Missing file name");
|
||||
}
|
||||
Path file = getPath(argumentValues.get(1));
|
||||
if (Files.exists(file) == false) {
|
||||
throw new UserException(ExitCodes.IO_ERROR, "File [" + file.toString() + "] does not exist");
|
||||
}
|
||||
if (argumentValues.size() > 2) {
|
||||
throw new UserException(ExitCodes.USAGE, "Unrecognized extra arguments [" +
|
||||
String.join(", ", argumentValues.subList(2, argumentValues.size())) + "] after filepath");
|
||||
}
|
||||
keystore.setFile(setting, Files.readAllBytes(file));
|
||||
keystore.save(env.configFile());
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason="file arg for cli")
|
||||
private Path getPath(String file) {
|
||||
return PathUtils.get(file);
|
||||
}
|
||||
}
|
|
@ -368,7 +368,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
TribeService.TRIBE_NAME_SETTING,
|
||||
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING,
|
||||
NodeEnvironment.ENABLE_LUCENE_SEGMENT_INFOS_TRACE_SETTING,
|
||||
NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH,
|
||||
OsService.REFRESH_INTERVAL_SETTING,
|
||||
ProcessService.REFRESH_INTERVAL_SETTING,
|
||||
JvmService.REFRESH_INTERVAL_SETTING,
|
||||
|
|
|
@ -70,13 +70,10 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING,
|
||||
IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING,
|
||||
IndexMetaData.INDEX_SHADOW_REPLICAS_SETTING,
|
||||
IndexMetaData.INDEX_SHARED_FILESYSTEM_SETTING,
|
||||
IndexMetaData.INDEX_READ_ONLY_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_READ_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_WRITE_SETTING,
|
||||
IndexMetaData.INDEX_BLOCKS_METADATA_SETTING,
|
||||
IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING,
|
||||
IndexMetaData.INDEX_PRIORITY_SETTING,
|
||||
IndexMetaData.INDEX_DATA_PATH_SETTING,
|
||||
SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
|
||||
|
|
|
@ -32,6 +32,7 @@ public class KeyStoreCli extends MultiCommand {
|
|||
subcommands.put("create", new CreateKeyStoreCommand());
|
||||
subcommands.put("list", new ListKeyStoreCommand());
|
||||
subcommands.put("add", new AddStringKeyStoreCommand());
|
||||
subcommands.put("add-file", new AddStringKeyStoreCommand());
|
||||
subcommands.put("remove", new RemoveSettingKeyStoreCommand());
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import javax.crypto.spec.PBEKeySpec;
|
|||
import javax.security.auth.DestroyFailedException;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.CharBuffer;
|
||||
|
@ -41,10 +40,14 @@ import java.security.KeyStore;
|
|||
import java.security.KeyStoreException;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Base64;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.BufferedChecksumIndexInput;
|
||||
|
@ -54,7 +57,6 @@ import org.apache.lucene.store.IndexInput;
|
|||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
/**
|
||||
* A wrapper around a Java KeyStore which provides supplements the keystore with extra metadata.
|
||||
|
@ -67,29 +69,52 @@ import org.elasticsearch.ElasticsearchException;
|
|||
*/
|
||||
public class KeyStoreWrapper implements SecureSettings {
|
||||
|
||||
/** An identifier for the type of data that may be stored in a keystore entry. */
|
||||
private enum KeyType {
|
||||
STRING,
|
||||
FILE
|
||||
}
|
||||
|
||||
/** The name of the keystore file to read and write. */
|
||||
private static final String KEYSTORE_FILENAME = "elasticsearch.keystore";
|
||||
|
||||
/** The version of the metadata written before the keystore data. */
|
||||
private static final int FORMAT_VERSION = 1;
|
||||
private static final int FORMAT_VERSION = 2;
|
||||
|
||||
/** The oldest metadata format version that can be read. */
|
||||
private static final int MIN_FORMAT_VERSION = 1;
|
||||
|
||||
/** The keystore type for a newly created keystore. */
|
||||
private static final String NEW_KEYSTORE_TYPE = "PKCS12";
|
||||
|
||||
/** The algorithm used to store password for a newly created keystore. */
|
||||
private static final String NEW_KEYSTORE_SECRET_KEY_ALGO = "PBE";//"PBEWithHmacSHA256AndAES_128";
|
||||
/** The algorithm used to store string setting contents. */
|
||||
private static final String NEW_KEYSTORE_STRING_KEY_ALGO = "PBE";
|
||||
|
||||
/** The algorithm used to store file setting contents. */
|
||||
private static final String NEW_KEYSTORE_FILE_KEY_ALGO = "PBE";
|
||||
|
||||
/** An encoder to check whether string values are ascii. */
|
||||
private static final CharsetEncoder ASCII_ENCODER = StandardCharsets.US_ASCII.newEncoder();
|
||||
|
||||
/** The metadata format version used to read the current keystore wrapper. */
|
||||
private final int formatVersion;
|
||||
|
||||
/** True iff the keystore has a password needed to read. */
|
||||
private final boolean hasPassword;
|
||||
|
||||
/** The type of the keystore, as passed to {@link java.security.KeyStore#getInstance(String)} */
|
||||
private final String type;
|
||||
|
||||
/** A factory necessary for constructing instances of secrets in a {@link KeyStore}. */
|
||||
private final SecretKeyFactory secretFactory;
|
||||
/** A factory necessary for constructing instances of string secrets in a {@link KeyStore}. */
|
||||
private final SecretKeyFactory stringFactory;
|
||||
|
||||
/** A factory necessary for constructing instances of file secrets in a {@link KeyStore}. */
|
||||
private final SecretKeyFactory fileFactory;
|
||||
|
||||
/**
|
||||
* The settings that exist in the keystore, mapped to their type of data.
|
||||
*/
|
||||
private final Map<String, KeyType> settingTypes;
|
||||
|
||||
/** The raw bytes of the encrypted keystore. */
|
||||
private final byte[] keystoreBytes;
|
||||
|
@ -100,17 +125,19 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
/** The password for the keystore. See {@link #decrypt(char[])}. */
|
||||
private final SetOnce<KeyStore.PasswordProtection> keystorePassword = new SetOnce<>();
|
||||
|
||||
/** The setting names contained in the loaded keystore. */
|
||||
private final Set<String> settingNames = new HashSet<>();
|
||||
|
||||
private KeyStoreWrapper(boolean hasPassword, String type, String secretKeyAlgo, byte[] keystoreBytes) {
|
||||
private KeyStoreWrapper(int formatVersion, boolean hasPassword, String type,
|
||||
String stringKeyAlgo, String fileKeyAlgo,
|
||||
Map<String, KeyType> settingTypes, byte[] keystoreBytes) {
|
||||
this.formatVersion = formatVersion;
|
||||
this.hasPassword = hasPassword;
|
||||
this.type = type;
|
||||
try {
|
||||
secretFactory = SecretKeyFactory.getInstance(secretKeyAlgo);
|
||||
stringFactory = SecretKeyFactory.getInstance(stringKeyAlgo);
|
||||
fileFactory = SecretKeyFactory.getInstance(fileKeyAlgo);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
this.settingTypes = settingTypes;
|
||||
this.keystoreBytes = keystoreBytes;
|
||||
}
|
||||
|
||||
|
@ -121,7 +148,8 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
|
||||
/** Constructs a new keystore with the given password. */
|
||||
static KeyStoreWrapper create(char[] password) throws Exception {
|
||||
KeyStoreWrapper wrapper = new KeyStoreWrapper(password.length != 0, NEW_KEYSTORE_TYPE, NEW_KEYSTORE_SECRET_KEY_ALGO, null);
|
||||
KeyStoreWrapper wrapper = new KeyStoreWrapper(FORMAT_VERSION, password.length != 0, NEW_KEYSTORE_TYPE,
|
||||
NEW_KEYSTORE_STRING_KEY_ALGO, NEW_KEYSTORE_FILE_KEY_ALGO, new HashMap<>(), null);
|
||||
KeyStore keyStore = KeyStore.getInstance(NEW_KEYSTORE_TYPE);
|
||||
keyStore.load(null, null);
|
||||
wrapper.keystore.set(keyStore);
|
||||
|
@ -144,7 +172,7 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
|
||||
try (IndexInput indexInput = directory.openInput(KEYSTORE_FILENAME, IOContext.READONCE)) {
|
||||
ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput);
|
||||
CodecUtil.checkHeader(input, KEYSTORE_FILENAME, FORMAT_VERSION, FORMAT_VERSION);
|
||||
int formatVersion = CodecUtil.checkHeader(input, KEYSTORE_FILENAME, MIN_FORMAT_VERSION, FORMAT_VERSION);
|
||||
byte hasPasswordByte = input.readByte();
|
||||
boolean hasPassword = hasPasswordByte == 1;
|
||||
if (hasPassword == false && hasPasswordByte != 0) {
|
||||
|
@ -152,11 +180,25 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
+ String.format(Locale.ROOT, "%02x", hasPasswordByte));
|
||||
}
|
||||
String type = input.readString();
|
||||
String secretKeyAlgo = input.readString();
|
||||
String stringKeyAlgo = input.readString();
|
||||
final String fileKeyAlgo;
|
||||
if (formatVersion >= 2) {
|
||||
fileKeyAlgo = input.readString();
|
||||
} else {
|
||||
fileKeyAlgo = NEW_KEYSTORE_FILE_KEY_ALGO;
|
||||
}
|
||||
final Map<String, KeyType> settingTypes;
|
||||
if (formatVersion >= 2) {
|
||||
settingTypes = input.readMapOfStrings().entrySet().stream().collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
e -> KeyType.valueOf(e.getValue())));
|
||||
} else {
|
||||
settingTypes = new HashMap<>();
|
||||
}
|
||||
byte[] keystoreBytes = new byte[input.readInt()];
|
||||
input.readBytes(keystoreBytes, 0, keystoreBytes.length);
|
||||
CodecUtil.checkFooter(input);
|
||||
return new KeyStoreWrapper(hasPassword, type, secretKeyAlgo, keystoreBytes);
|
||||
return new KeyStoreWrapper(formatVersion, hasPassword, type, stringKeyAlgo, fileKeyAlgo, settingTypes, keystoreBytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -189,10 +231,24 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
keystorePassword.set(new KeyStore.PasswordProtection(password));
|
||||
Arrays.fill(password, '\0');
|
||||
|
||||
// convert keystore aliases enum into a set for easy lookup
|
||||
|
||||
Enumeration<String> aliases = keystore.get().aliases();
|
||||
while (aliases.hasMoreElements()) {
|
||||
settingNames.add(aliases.nextElement());
|
||||
if (formatVersion == 1) {
|
||||
while (aliases.hasMoreElements()) {
|
||||
settingTypes.put(aliases.nextElement(), KeyType.STRING);
|
||||
}
|
||||
} else {
|
||||
// verify integrity: keys in keystore match what the metadata thinks exist
|
||||
Set<String> expectedSettings = new HashSet<>(settingTypes.keySet());
|
||||
while (aliases.hasMoreElements()) {
|
||||
String settingName = aliases.nextElement();
|
||||
if (expectedSettings.remove(settingName) == false) {
|
||||
throw new SecurityException("Keystore has been corrupted or tampered with");
|
||||
}
|
||||
}
|
||||
if (expectedSettings.isEmpty() == false) {
|
||||
throw new SecurityException("Keystore has been corrupted or tampered with");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -206,8 +262,19 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) {
|
||||
CodecUtil.writeHeader(output, KEYSTORE_FILENAME, FORMAT_VERSION);
|
||||
output.writeByte(password.length == 0 ? (byte)0 : (byte)1);
|
||||
output.writeString(type);
|
||||
output.writeString(secretFactory.getAlgorithm());
|
||||
output.writeString(NEW_KEYSTORE_TYPE);
|
||||
output.writeString(NEW_KEYSTORE_STRING_KEY_ALGO);
|
||||
output.writeString(NEW_KEYSTORE_FILE_KEY_ALGO);
|
||||
output.writeMapOfStrings(settingTypes.entrySet().stream().collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
e -> e.getValue().name())));
|
||||
|
||||
// TODO: in the future if we ever change any algorithms used above, we need
|
||||
// to create a new KeyStore here instead of using the existing one, so that
|
||||
// the encoded material inside the keystore is updated
|
||||
assert type.equals(NEW_KEYSTORE_TYPE) : "keystore type changed";
|
||||
assert stringFactory.getAlgorithm().equals(NEW_KEYSTORE_STRING_KEY_ALGO) : "string pbe algo changed";
|
||||
assert fileFactory.getAlgorithm().equals(NEW_KEYSTORE_FILE_KEY_ALGO) : "file pbe algo changed";
|
||||
|
||||
ByteArrayOutputStream keystoreBytesStream = new ByteArrayOutputStream();
|
||||
keystore.get().store(keystoreBytesStream, password);
|
||||
|
@ -228,25 +295,51 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
|
||||
@Override
|
||||
public Set<String> getSettingNames() {
|
||||
return settingNames;
|
||||
return settingTypes.keySet();
|
||||
}
|
||||
|
||||
// TODO: make settings accessible only to code that registered the setting
|
||||
/** Retrieve a string setting. The {@link SecureString} should be closed once it is used. */
|
||||
@Override
|
||||
public SecureString getString(String setting) throws GeneralSecurityException {
|
||||
KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get());
|
||||
if (entry instanceof KeyStore.SecretKeyEntry == false) {
|
||||
if (settingTypes.get(setting) != KeyType.STRING ||
|
||||
entry instanceof KeyStore.SecretKeyEntry == false) {
|
||||
throw new IllegalStateException("Secret setting " + setting + " is not a string");
|
||||
}
|
||||
// TODO: only allow getting a setting once?
|
||||
KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) entry;
|
||||
PBEKeySpec keySpec = (PBEKeySpec) secretFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class);
|
||||
PBEKeySpec keySpec = (PBEKeySpec) stringFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class);
|
||||
SecureString value = new SecureString(keySpec.getPassword());
|
||||
keySpec.clearPassword();
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getFile(String setting) throws GeneralSecurityException {
|
||||
KeyStore.Entry entry = keystore.get().getEntry(setting, keystorePassword.get());
|
||||
if (settingTypes.get(setting) != KeyType.FILE ||
|
||||
entry instanceof KeyStore.SecretKeyEntry == false) {
|
||||
throw new IllegalStateException("Secret setting " + setting + " is not a file");
|
||||
}
|
||||
KeyStore.SecretKeyEntry secretKeyEntry = (KeyStore.SecretKeyEntry) entry;
|
||||
PBEKeySpec keySpec = (PBEKeySpec) fileFactory.getKeySpec(secretKeyEntry.getSecretKey(), PBEKeySpec.class);
|
||||
// The PBE keyspec gives us chars, we first convert to bytes, then decode base64 inline.
|
||||
char[] chars = keySpec.getPassword();
|
||||
byte[] bytes = new byte[chars.length];
|
||||
for (int i = 0; i < bytes.length; ++i) {
|
||||
bytes[i] = (byte)chars[i]; // PBE only stores the lower 8 bits, so this narrowing is ok
|
||||
}
|
||||
keySpec.clearPassword(); // wipe the original copy
|
||||
InputStream bytesStream = new ByteArrayInputStream(bytes) {
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
super.close();
|
||||
Arrays.fill(bytes, (byte)0); // wipe our second copy when the stream is exhausted
|
||||
}
|
||||
};
|
||||
return Base64.getDecoder().wrap(bytesStream);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a string setting.
|
||||
*
|
||||
|
@ -256,15 +349,27 @@ public class KeyStoreWrapper implements SecureSettings {
|
|||
if (ASCII_ENCODER.canEncode(CharBuffer.wrap(value)) == false) {
|
||||
throw new IllegalArgumentException("Value must be ascii");
|
||||
}
|
||||
SecretKey secretKey = secretFactory.generateSecret(new PBEKeySpec(value));
|
||||
SecretKey secretKey = stringFactory.generateSecret(new PBEKeySpec(value));
|
||||
keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get());
|
||||
settingNames.add(setting);
|
||||
settingTypes.put(setting, KeyType.STRING);
|
||||
}
|
||||
|
||||
/** Set a file setting. */
|
||||
void setFile(String setting, byte[] bytes) throws GeneralSecurityException {
|
||||
bytes = Base64.getEncoder().encode(bytes);
|
||||
char[] chars = new char[bytes.length];
|
||||
for (int i = 0; i < chars.length; ++i) {
|
||||
chars[i] = (char)bytes[i]; // PBE only stores the lower 8 bits, so this narrowing is ok
|
||||
}
|
||||
SecretKey secretKey = stringFactory.generateSecret(new PBEKeySpec(chars));
|
||||
keystore.get().setEntry(setting, new KeyStore.SecretKeyEntry(secretKey), keystorePassword.get());
|
||||
settingTypes.put(setting, KeyType.FILE);
|
||||
}
|
||||
|
||||
/** Remove the given setting from the keystore. */
|
||||
void remove(String setting) throws KeyStoreException {
|
||||
keystore.get().deleteEntry(setting);
|
||||
settingNames.remove(setting);
|
||||
settingTypes.remove(setting);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
|
@ -137,5 +138,26 @@ public abstract class SecureSetting<T> extends Setting<T> {
|
|||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* A setting which contains a file. Reading the setting opens an input stream to the file.
|
||||
*
|
||||
* This may be any sensitive file, e.g. a set of credentials normally in plaintext.
|
||||
*/
|
||||
public static Setting<InputStream> secureFile(String name, Setting<InputStream> fallback,
|
||||
Property... properties) {
|
||||
return new SecureSetting<InputStream>(name, properties) {
|
||||
@Override
|
||||
protected InputStream getSecret(SecureSettings secureSettings) throws GeneralSecurityException {
|
||||
return secureSettings.getFile(getKey());
|
||||
}
|
||||
@Override
|
||||
InputStream getFallback(Settings settings) {
|
||||
if (fallback != null) {
|
||||
return fallback.get(settings);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.InputStream;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -36,4 +37,7 @@ public interface SecureSettings extends Closeable {
|
|||
|
||||
/** Return a string setting. The {@link SecureString} should be closed once it is used. */
|
||||
SecureString getString(String setting) throws GeneralSecurityException;
|
||||
|
||||
/** Return a file setting. The {@link InputStream} should be closed once it is used. */
|
||||
InputStream getFile(String setting) throws GeneralSecurityException;
|
||||
}
|
||||
|
|
|
@ -1294,6 +1294,11 @@ public final class Settings implements ToXContent {
|
|||
return delegate.getString(keyTransform.apply(setting));
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getFile(String setting) throws GeneralSecurityException{
|
||||
return delegate.getFile(keyTransform.apply(setting));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
delegate.close();
|
||||
|
|
|
@ -157,13 +157,6 @@ public final class NodeEnvironment implements Closeable {
|
|||
public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1,
|
||||
Property.NodeScope);
|
||||
|
||||
/**
|
||||
* If true automatically append node lock id to custom data paths.
|
||||
*/
|
||||
public static final Setting<Boolean> ADD_NODE_LOCK_ID_TO_CUSTOM_PATH =
|
||||
Setting.boolSetting("node.add_lock_id_to_custom_path", true, Property.NodeScope);
|
||||
|
||||
|
||||
/**
|
||||
* Seed for determining a persisted unique uuid of this node. If the node has already a persisted uuid on disk,
|
||||
* this seed will be ignored and the uuid from disk will be reused.
|
||||
|
@ -922,11 +915,7 @@ public final class NodeEnvironment implements Closeable {
|
|||
if (customDataDir != null) {
|
||||
// This assert is because this should be caught by MetaDataCreateIndexService
|
||||
assert sharedDataPath != null;
|
||||
if (ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(indexSettings.getNodeSettings())) {
|
||||
return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId));
|
||||
} else {
|
||||
return sharedDataPath.resolve(customDataDir);
|
||||
}
|
||||
return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId));
|
||||
} else {
|
||||
throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available");
|
||||
}
|
||||
|
|
|
@ -106,11 +106,10 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index());
|
||||
final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id());
|
||||
final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
|
||||
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
|
||||
|
||||
assert inSyncAllocationIds.isEmpty() == false;
|
||||
// use in-sync allocation ids to select nodes
|
||||
final NodeShardsResult nodeShardsResult = buildNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
|
||||
final NodeShardsResult nodeShardsResult = buildNodeShardsResult(unassignedShard, snapshotRestore,
|
||||
allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger);
|
||||
final boolean enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
|
||||
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(),
|
||||
|
@ -122,10 +121,6 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
logger.debug("[{}][{}]: missing local data, will restore from [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource());
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
} else if (recoverOnAnyNode) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id());
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
} else {
|
||||
// We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary.
|
||||
// We could just be waiting for the node that holds the primary to start back up, in which case the allocation for
|
||||
|
@ -331,19 +326,6 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
|||
Collections.unmodifiableList(noNodeShards));
|
||||
}
|
||||
|
||||
/**
|
||||
* Return {@code true} if the index is configured to allow shards to be
|
||||
* recovered on any node
|
||||
*/
|
||||
private boolean recoverOnAnyNode(IndexMetaData metaData) {
|
||||
// don't use the setting directly, not to trigger verbose deprecation logging
|
||||
return (metaData.isOnSharedFilesystem(metaData.getSettings()) || metaData.isOnSharedFilesystem(this.settings))
|
||||
&& (metaData.getSettings().getAsBooleanLenientForPreEs6Indices(
|
||||
metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger) ||
|
||||
this.settings.getAsBooleanLenientForPreEs6Indices
|
||||
(metaData.getCreationVersion(), IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, deprecationLogger));
|
||||
}
|
||||
|
||||
protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
|
||||
private static class NodeShardsResult {
|
||||
|
|
|
@ -55,7 +55,6 @@ import org.elasticsearch.index.shard.IndexShard;
|
|||
import org.elasticsearch.index.shard.IndexShardClosedException;
|
||||
import org.elasticsearch.index.shard.IndexingOperationListener;
|
||||
import org.elasticsearch.index.shard.SearchOperationListener;
|
||||
import org.elasticsearch.index.shard.ShadowIndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
|
@ -343,8 +342,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
|
||||
logger.debug("creating shard_id {}", shardId);
|
||||
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
|
||||
final boolean canDeleteShardContent = this.indexSettings.isOnSharedFilesystem() == false ||
|
||||
(primary && this.indexSettings.isOnSharedFilesystem());
|
||||
final Engine.Warmer engineWarmer = (searcher) -> {
|
||||
IndexShard shard = getShardOrNull(shardId.getId());
|
||||
if (shard != null) {
|
||||
|
@ -352,18 +349,11 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
}
|
||||
};
|
||||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock,
|
||||
new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId)));
|
||||
if (useShadowEngine(primary, this.indexSettings)) {
|
||||
indexShard = new ShadowIndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer,
|
||||
searchOperationListeners);
|
||||
// no indexing listeners - shadow engines don't index
|
||||
} else {
|
||||
indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
new StoreCloseListener(shardId, () -> eventListener.onStoreClosed(shardId)));
|
||||
indexShard = new IndexShard(routing, this.indexSettings, path, store, indexCache, mapperService, similarityService,
|
||||
indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, engineWarmer,
|
||||
() -> globalCheckpointSyncer.accept(shardId),
|
||||
searchOperationListeners, indexingOperationListeners);
|
||||
}
|
||||
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
|
||||
eventListener.afterIndexShardCreated(indexShard);
|
||||
shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
|
||||
|
@ -381,10 +371,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
}
|
||||
}
|
||||
|
||||
static boolean useShadowEngine(boolean primary, IndexSettings indexSettings) {
|
||||
return primary == false && indexSettings.isShadowReplicaIndex();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void removeShard(int shardId, String reason) {
|
||||
final ShardId sId = new ShardId(index(), shardId);
|
||||
|
@ -438,16 +424,14 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
}
|
||||
|
||||
|
||||
private void onShardClose(ShardLock lock, boolean ownsShard) {
|
||||
private void onShardClose(ShardLock lock) {
|
||||
if (deleted.get()) { // we remove that shards content if this index has been deleted
|
||||
try {
|
||||
if (ownsShard) {
|
||||
try {
|
||||
eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
|
||||
} finally {
|
||||
shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings);
|
||||
eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
|
||||
}
|
||||
try {
|
||||
eventListener.beforeIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
|
||||
} finally {
|
||||
shardStoreDeleter.deleteShardStore("delete index", lock, indexSettings);
|
||||
eventListener.afterIndexShardDeleted(lock.getShardId(), indexSettings.getSettings());
|
||||
}
|
||||
} catch (IOException e) {
|
||||
shardStoreDeleter.addPendingDelete(lock.getShardId(), indexSettings);
|
||||
|
@ -514,12 +498,10 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
|
||||
private class StoreCloseListener implements Store.OnClose {
|
||||
private final ShardId shardId;
|
||||
private final boolean ownsShard;
|
||||
private final Closeable[] toClose;
|
||||
|
||||
StoreCloseListener(ShardId shardId, boolean ownsShard, Closeable... toClose) {
|
||||
StoreCloseListener(ShardId shardId, Closeable... toClose) {
|
||||
this.shardId = shardId;
|
||||
this.ownsShard = ownsShard;
|
||||
this.toClose = toClose;
|
||||
}
|
||||
|
||||
|
@ -527,7 +509,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
public void handle(ShardLock lock) {
|
||||
try {
|
||||
assert lock.getShardId().equals(shardId) : "shard id mismatch, expected: " + shardId + " but got: " + lock.getShardId();
|
||||
onShardClose(lock, ownsShard);
|
||||
onShardClose(lock);
|
||||
} finally {
|
||||
try {
|
||||
IOUtils.close(toClose);
|
||||
|
|
|
@ -160,7 +160,6 @@ public final class IndexSettings {
|
|||
private final String nodeName;
|
||||
private final Settings nodeSettings;
|
||||
private final int numberOfShards;
|
||||
private final boolean isShadowReplicaIndex;
|
||||
// volatile fields are updated via #updateIndexMetaData(IndexMetaData) under lock
|
||||
private volatile Settings settings;
|
||||
private volatile IndexMetaData indexMetaData;
|
||||
|
@ -257,7 +256,6 @@ public final class IndexSettings {
|
|||
nodeName = Node.NODE_NAME_SETTING.get(settings);
|
||||
this.indexMetaData = indexMetaData;
|
||||
numberOfShards = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
isShadowReplicaIndex = indexMetaData.isIndexUsingShadowReplicas(settings);
|
||||
|
||||
this.defaultField = DEFAULT_FIELD_SETTING.get(settings);
|
||||
this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings);
|
||||
|
@ -359,15 +357,6 @@ public final class IndexSettings {
|
|||
return settings.get(IndexMetaData.SETTING_DATA_PATH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given settings indicate that the index
|
||||
* associated with these settings allocates it's shards on a shared
|
||||
* filesystem.
|
||||
*/
|
||||
public boolean isOnSharedFilesystem() {
|
||||
return indexMetaData.isOnSharedFilesystem(getSettings());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the version the index was created on.
|
||||
* @see Version#indexCreated(Settings)
|
||||
|
@ -400,12 +389,6 @@ public final class IndexSettings {
|
|||
*/
|
||||
public int getNumberOfReplicas() { return settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); }
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff this index uses shadow replicas.
|
||||
* @see IndexMetaData#isIndexUsingShadowReplicas(Settings)
|
||||
*/
|
||||
public boolean isShadowReplicaIndex() { return isShadowReplicaIndex; }
|
||||
|
||||
/**
|
||||
* Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the
|
||||
* index settings and the node settings where node settings are overwritten by index settings.
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.analysis;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.TextFieldMapper;
|
||||
|
@ -78,19 +77,6 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
|
|||
|
||||
int positionIncrementGap = TextFieldMapper.Defaults.POSITION_INCREMENT_GAP;
|
||||
|
||||
if (analyzerSettings.getAsMap().containsKey("position_offset_gap")){
|
||||
if (indexSettings.getIndexVersionCreated().before(Version.V_2_0_0)){
|
||||
if (analyzerSettings.getAsMap().containsKey("position_increment_gap")){
|
||||
throw new IllegalArgumentException("Custom Analyzer [" + name() +
|
||||
"] defined both [position_offset_gap] and [position_increment_gap], use only [position_increment_gap]");
|
||||
}
|
||||
positionIncrementGap = analyzerSettings.getAsInt("position_offset_gap", positionIncrementGap);
|
||||
}else {
|
||||
throw new IllegalArgumentException("Option [position_offset_gap] in Custom Analyzer [" + name() +
|
||||
"] has been renamed, please use [position_increment_gap] instead.");
|
||||
}
|
||||
}
|
||||
|
||||
positionIncrementGap = analyzerSettings.getAsInt("position_increment_gap", positionIncrementGap);
|
||||
|
||||
int offsetGap = analyzerSettings.getAsInt("offset_gap", -1);;
|
||||
|
|
|
@ -25,5 +25,4 @@ public interface EngineFactory {
|
|||
|
||||
Engine newReadWriteEngine(EngineConfig config);
|
||||
|
||||
Engine newReadOnlyEngine(EngineConfig config);
|
||||
}
|
||||
|
|
|
@ -24,9 +24,4 @@ public class InternalEngineFactory implements EngineFactory {
|
|||
public Engine newReadWriteEngine(EngineConfig config) {
|
||||
return new InternalEngine(config);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Engine newReadOnlyEngine(EngineConfig config) {
|
||||
return new ShadowEngine(config);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,282 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.search.SearcherFactory;
|
||||
import org.apache.lucene.search.SearcherManager;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* ShadowEngine is a specialized engine that only allows read-only operations
|
||||
* on the underlying Lucene index. An {@code IndexReader} is opened instead of
|
||||
* an {@code IndexWriter}. All methods that would usually perform write
|
||||
* operations are no-ops, this means:
|
||||
*
|
||||
* - No operations are written to or read from the translog
|
||||
* - Create, Index, and Delete do nothing
|
||||
* - Flush does not fsync any files, or make any on-disk changes
|
||||
*
|
||||
* In order for new segments to become visible, the ShadowEngine may perform
|
||||
* stage1 of the traditional recovery process (copying segment files) from a
|
||||
* regular primary (which uses {@link org.elasticsearch.index.engine.InternalEngine})
|
||||
*
|
||||
* Notice that since this Engine does not deal with the translog, any
|
||||
* {@link #get(Get get)} request goes directly to the searcher, meaning it is
|
||||
* non-realtime.
|
||||
*/
|
||||
public class ShadowEngine extends Engine {
|
||||
|
||||
/** how long to wait for an index to exist */
|
||||
public static final String NONEXISTENT_INDEX_RETRY_WAIT = "index.shadow.wait_for_initial_commit";
|
||||
public static final TimeValue DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT = TimeValue.timeValueSeconds(5);
|
||||
|
||||
private volatile SearcherManager searcherManager;
|
||||
|
||||
private volatile SegmentInfos lastCommittedSegmentInfos;
|
||||
|
||||
public ShadowEngine(EngineConfig engineConfig) {
|
||||
super(engineConfig);
|
||||
if (engineConfig.getRefreshListeners() != null) {
|
||||
throw new IllegalArgumentException("ShadowEngine doesn't support RefreshListeners");
|
||||
}
|
||||
SearcherFactory searcherFactory = new EngineSearcherFactory(engineConfig);
|
||||
final long nonexistentRetryTime = engineConfig.getIndexSettings().getSettings()
|
||||
.getAsTime(NONEXISTENT_INDEX_RETRY_WAIT, DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT)
|
||||
.getMillis();
|
||||
try {
|
||||
DirectoryReader reader = null;
|
||||
store.incRef();
|
||||
boolean success = false;
|
||||
try {
|
||||
if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) {
|
||||
reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId);
|
||||
this.searcherManager = new SearcherManager(reader, searcherFactory);
|
||||
this.lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
|
||||
success = true;
|
||||
} else {
|
||||
throw new IllegalStateException("failed to open a shadow engine after" +
|
||||
nonexistentRetryTime + "ms, " +
|
||||
"directory is not an index");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to create new reader", e);
|
||||
throw e;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
IOUtils.closeWhileHandlingException(reader);
|
||||
store.decRef();
|
||||
}
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new EngineCreationFailureException(shardId, "failed to open index reader", ex);
|
||||
}
|
||||
logger.trace("created new ShadowEngine");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public IndexResult index(Index index) {
|
||||
throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeleteResult delete(Delete delete) {
|
||||
throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public NoOpResult noOp(NoOp noOp) {
|
||||
throw new UnsupportedOperationException(shardId + " no-op operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public SyncedFlushResult syncFlush(String syncId, CommitId expectedCommitId) {
|
||||
throw new UnsupportedOperationException(shardId + " sync commit operation not allowed on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public CommitId flush() throws EngineException {
|
||||
return flush(false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CommitId flush(boolean force, boolean waitIfOngoing) throws EngineException {
|
||||
logger.trace("skipping FLUSH on shadow engine");
|
||||
// reread the last committed segment infos
|
||||
refresh("flush");
|
||||
/*
|
||||
* we have to inc-ref the store here since if the engine is closed by a tragic event
|
||||
* we don't acquire the write lock and wait until we have exclusive access. This might also
|
||||
* dec the store reference which can essentially close the store and unless we can inc the reference
|
||||
* we can't use it.
|
||||
*/
|
||||
store.incRef();
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
// reread the last committed segment infos
|
||||
lastCommittedSegmentInfos = readLastCommittedSegmentInfos(searcherManager, store);
|
||||
} catch (Exception e) {
|
||||
if (isClosed.get() == false) {
|
||||
logger.warn("failed to read latest segment infos on flush", e);
|
||||
if (Lucene.isCorruptionException(e)) {
|
||||
throw new FlushFailedEngineException(shardId, e);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
return new CommitId(lastCommittedSegmentInfos.getId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException {
|
||||
// no-op
|
||||
logger.trace("skipping FORCE-MERGE on shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public GetResult get(Get get, Function<String, Searcher> searcherFacotry) throws EngineException {
|
||||
// There is no translog, so we can get it directly from the searcher
|
||||
return getFromSearcher(get, searcherFacotry);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Translog getTranslog() {
|
||||
throw new UnsupportedOperationException("shadow engines don't have translogs");
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Segment> segments(boolean verbose) {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
Segment[] segmentsArr = getSegmentInfo(lastCommittedSegmentInfos, verbose);
|
||||
for (int i = 0; i < segmentsArr.length; i++) {
|
||||
// hard code all segments as committed, because they are in
|
||||
// order for the shadow replica to see them
|
||||
segmentsArr[i].committed = true;
|
||||
}
|
||||
return Arrays.asList(segmentsArr);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void refresh(String source) throws EngineException {
|
||||
// we obtain a read lock here, since we don't want a flush to happen while we are refreshing
|
||||
// since it flushes the index as well (though, in terms of concurrency, we are allowed to do it)
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
searcherManager.maybeRefreshBlocking();
|
||||
} catch (AlreadyClosedException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
try {
|
||||
failEngine("refresh failed", e);
|
||||
} catch (Exception inner) {
|
||||
e.addSuppressed(inner);
|
||||
}
|
||||
throw new RefreshFailedEngineException(shardId, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexCommit acquireIndexCommit(boolean flushFirst) throws EngineException {
|
||||
throw new UnsupportedOperationException("Can not take snapshot from a shadow engine");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearcherManager getSearcherManager() {
|
||||
return searcherManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeNoLock(String reason) {
|
||||
if (isClosed.compareAndSet(false, true)) {
|
||||
try {
|
||||
logger.debug("shadow replica close searcher manager refCount: {}", store.refCount());
|
||||
IOUtils.close(searcherManager);
|
||||
} catch (Exception e) {
|
||||
logger.warn("shadow replica failed to close searcher manager", e);
|
||||
} finally {
|
||||
store.decRef();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SegmentInfos getLastCommittedSegmentInfos() {
|
||||
return lastCommittedSegmentInfos;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getIndexBufferRAMBytesUsed() {
|
||||
// No IndexWriter nor version map
|
||||
throw new UnsupportedOperationException("ShadowEngine has no IndexWriter");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeIndexingBuffer() {
|
||||
// No indexing buffer
|
||||
throw new UnsupportedOperationException("ShadowEngine has no IndexWriter");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void activateThrottling() {
|
||||
throw new UnsupportedOperationException("ShadowEngine has no IndexWriter");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deactivateThrottling() {
|
||||
throw new UnsupportedOperationException("ShadowEngine has no IndexWriter");
|
||||
}
|
||||
|
||||
@Override
|
||||
public SequenceNumbersService seqNoService() {
|
||||
throw new UnsupportedOperationException("ShadowEngine doesn't track sequence numbers");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isThrottled() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getIndexThrottleTimeInMillis() {
|
||||
return 0L;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Engine recoverFromTranslog() throws IOException {
|
||||
throw new UnsupportedOperationException("can't recover on a shadow engine");
|
||||
}
|
||||
|
||||
}
|
|
@ -26,7 +26,6 @@ import org.apache.lucene.search.IndexOrDocValuesQuery;
|
|||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
|
@ -286,9 +285,8 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
|||
return ignoreUnmapped;
|
||||
}
|
||||
|
||||
QueryValidationException checkLatLon(boolean indexCreatedBeforeV2_0) {
|
||||
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
|
||||
if (GeoValidationMethod.isIgnoreMalformed(validationMethod) || indexCreatedBeforeV2_0) {
|
||||
QueryValidationException checkLatLon() {
|
||||
if (GeoValidationMethod.isIgnoreMalformed(validationMethod)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -327,15 +325,14 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
|
|||
throw new QueryShardException(context, "field [" + fieldName + "] is not a geo_point field");
|
||||
}
|
||||
|
||||
QueryValidationException exception = checkLatLon(context.indexVersionCreated().before(Version.V_2_0_0));
|
||||
QueryValidationException exception = checkLatLon();
|
||||
if (exception != null) {
|
||||
throw new QueryShardException(context, "couldn't validate latitude/ longitude values", exception);
|
||||
}
|
||||
|
||||
GeoPoint luceneTopLeft = new GeoPoint(topLeft);
|
||||
GeoPoint luceneBottomRight = new GeoPoint(bottomRight);
|
||||
final Version indexVersionCreated = context.indexVersionCreated();
|
||||
if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
|
||||
if (GeoValidationMethod.isCoerce(validationMethod)) {
|
||||
// Special case: if the difference between the left and right is 360 and the right is greater than the left, we are asking for
|
||||
// the complete longitude range so need to set longitude to the complete longitude range
|
||||
double right = luceneBottomRight.getLon();
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.document.LatLonPoint;
|
|||
import org.apache.lucene.search.IndexOrDocValuesQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -241,13 +240,12 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
|||
throw new QueryShardException(shardContext, "field [" + fieldName + "] is not a geo_point field");
|
||||
}
|
||||
|
||||
final Version indexVersionCreated = shardContext.indexVersionCreated();
|
||||
QueryValidationException exception = checkLatLon(shardContext.indexVersionCreated().before(Version.V_2_0_0));
|
||||
QueryValidationException exception = checkLatLon();
|
||||
if (exception != null) {
|
||||
throw new QueryShardException(shardContext, "couldn't validate latitude/ longitude values", exception);
|
||||
}
|
||||
|
||||
if (indexVersionCreated.onOrAfter(Version.V_2_2_0) || GeoValidationMethod.isCoerce(validationMethod)) {
|
||||
if (GeoValidationMethod.isCoerce(validationMethod)) {
|
||||
GeoUtils.normalizePoint(center, true, true);
|
||||
}
|
||||
|
||||
|
@ -389,9 +387,8 @@ public class GeoDistanceQueryBuilder extends AbstractQueryBuilder<GeoDistanceQue
|
|||
Objects.equals(ignoreUnmapped, other.ignoreUnmapped);
|
||||
}
|
||||
|
||||
private QueryValidationException checkLatLon(boolean indexCreatedBeforeV2_0) {
|
||||
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed on 2.x created indexes
|
||||
if (GeoValidationMethod.isIgnoreMalformed(validationMethod) || indexCreatedBeforeV2_0) {
|
||||
private QueryValidationException checkLatLon() {
|
||||
if (GeoValidationMethod.isIgnoreMalformed(validationMethod)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,10 +221,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
*/
|
||||
private final AtomicBoolean active = new AtomicBoolean();
|
||||
/**
|
||||
* Allows for the registration of listeners that are called when a change becomes visible for search. This is nullable because
|
||||
* {@linkplain ShadowIndexShard} doesn't support this.
|
||||
* Allows for the registration of listeners that are called when a change becomes visible for search.
|
||||
*/
|
||||
@Nullable
|
||||
private final RefreshListeners refreshListeners;
|
||||
|
||||
public IndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache,
|
||||
|
@ -416,6 +414,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
// active primaries.
|
||||
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
|
||||
}
|
||||
assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.RELOCATED ||
|
||||
state == IndexShardState.CLOSED :
|
||||
"routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state;
|
||||
this.shardRouting = newRouting;
|
||||
persistMetadata(newRouting, currentRouting);
|
||||
}
|
||||
|
@ -498,6 +499,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
* @return the previous shard state
|
||||
*/
|
||||
private IndexShardState changeState(IndexShardState newState, String reason) {
|
||||
assert Thread.holdsLock(mutex);
|
||||
logger.debug("state: [{}]->[{}], reason [{}]", state, newState, reason);
|
||||
IndexShardState previousState = state;
|
||||
state = newState;
|
||||
|
@ -1921,9 +1923,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
/**
|
||||
* Build {@linkplain RefreshListeners} for this shard. Protected so {@linkplain ShadowIndexShard} can override it to return null.
|
||||
* Build {@linkplain RefreshListeners} for this shard.
|
||||
*/
|
||||
protected RefreshListeners buildRefreshListeners() {
|
||||
private RefreshListeners buildRefreshListeners() {
|
||||
return new RefreshListeners(
|
||||
indexSettings::getMaxRefreshListeners,
|
||||
() -> refresh("too_many_listeners"),
|
||||
|
|
|
@ -1,144 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.seqno.SeqNoStats;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* ShadowIndexShard extends {@link IndexShard} to add file synchronization
|
||||
* from the primary when a flush happens. It also ensures that a replica being
|
||||
* promoted to a primary causes the shard to fail, kicking off a re-allocation
|
||||
* of the primary shard.
|
||||
*/
|
||||
public final class ShadowIndexShard extends IndexShard {
|
||||
|
||||
public ShadowIndexShard(ShardRouting shardRouting, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache,
|
||||
MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService,
|
||||
@Nullable EngineFactory engineFactory, IndexEventListener indexEventListener, IndexSearcherWrapper wrapper,
|
||||
ThreadPool threadPool, BigArrays bigArrays, Engine.Warmer engineWarmer,
|
||||
List<SearchOperationListener> searchOperationListeners) throws IOException {
|
||||
super(shardRouting, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory,
|
||||
indexEventListener, wrapper, threadPool, bigArrays, engineWarmer, () -> {
|
||||
}, searchOperationListeners, Collections.emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
* In addition to the regular accounting done in
|
||||
* {@link IndexShard#updateRoutingEntry(ShardRouting)},
|
||||
* if this shadow replica needs to be promoted to a primary, the shard is
|
||||
* failed in order to allow a new primary to be re-allocated.
|
||||
*/
|
||||
@Override
|
||||
public void updateRoutingEntry(ShardRouting newRouting) throws IOException {
|
||||
if (newRouting.primary()) {// becoming a primary
|
||||
throw new IllegalStateException("can't promote shard to primary");
|
||||
}
|
||||
super.updateRoutingEntry(newRouting);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MergeStats mergeStats() {
|
||||
return new MergeStats();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SeqNoStats seqNoStats() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean canIndex() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Engine newEngine(EngineConfig config) {
|
||||
assert this.shardRouting.primary() == false;
|
||||
assert config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG;
|
||||
return engineFactory.newReadOnlyEngine(config);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RefreshListeners buildRefreshListeners() {
|
||||
// ShadowEngine doesn't have a translog so it shouldn't try to support RefreshListeners.
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean shouldFlush() {
|
||||
// we don't need to flush since we don't write - all dominated by the primary
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TranslogStats translogStats() {
|
||||
return null; // shadow engine has no translog
|
||||
}
|
||||
|
||||
@Override
|
||||
public void updateGlobalCheckpointOnReplica(long checkpoint) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLocalCheckpoint() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getGlobalCheckpoint() {
|
||||
return -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addRefreshListener(Translog.Location location, Consumer<Boolean> listener) {
|
||||
throw new UnsupportedOperationException("Can't listen for a refresh on a shadow engine because it doesn't have a translog");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Store.MetadataSnapshot snapshotStoreMetadata() throws IOException {
|
||||
throw new UnsupportedOperationException("can't snapshot the directory as the primary may change it underneath us");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onNewEngine(Engine newEngine) {
|
||||
// nothing to do here - the superclass sets the translog on some listeners but we don't have such a thing
|
||||
}
|
||||
|
||||
}
|
|
@ -331,14 +331,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent {
|
|||
} else if (writtenBy == null) {
|
||||
throw new ElasticsearchParseException("missing or invalid written_by [" + writtenByStr + "]");
|
||||
} else if (checksum == null) {
|
||||
if (physicalName.startsWith("segments_")
|
||||
&& writtenBy.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION) == false) {
|
||||
// its possible the checksum is null for segments_N files that belong to a shard with no data,
|
||||
// so we will assign it _na_ for now and try to get the checksum from the file itself later
|
||||
checksum = UNKNOWN_CHECKSUM;
|
||||
} else {
|
||||
throw new ElasticsearchParseException("missing checksum for name [" + name + "]");
|
||||
}
|
||||
throw new ElasticsearchParseException("missing checksum for name [" + name + "]");
|
||||
}
|
||||
return new FileInfo(name, new StoreFileMetaData(physicalName, length, checksum, writtenBy, metaHash), partSize);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.lucene.store.NIOFSDirectory;
|
|||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.lucene.store.SleepingLockWrapper;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -74,9 +73,6 @@ public class FsDirectoryService extends DirectoryService {
|
|||
Set<String> preLoadExtensions = new HashSet<>(
|
||||
indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING));
|
||||
wrapped = setPreload(wrapped, location, lockFactory, preLoadExtensions);
|
||||
if (indexSettings.isOnSharedFilesystem()) {
|
||||
wrapped = new SleepingLockWrapper(wrapped, 5000);
|
||||
}
|
||||
return wrapped;
|
||||
}
|
||||
|
||||
|
|
|
@ -449,7 +449,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
boolean success = false;
|
||||
try {
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
output = new LuceneVerifyingIndexOutput(metadata, output);
|
||||
success = true;
|
||||
} finally {
|
||||
|
@ -468,7 +467,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
|
||||
public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException {
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
return new VerifyingIndexInput(directory().openInput(filename, context));
|
||||
}
|
||||
|
||||
|
@ -813,22 +811,14 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
maxVersion = version;
|
||||
}
|
||||
for (String file : info.files()) {
|
||||
if (version.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
|
||||
} else {
|
||||
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + version);
|
||||
}
|
||||
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
|
||||
}
|
||||
}
|
||||
if (maxVersion == null) {
|
||||
maxVersion = StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION;
|
||||
maxVersion = org.elasticsearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion;
|
||||
}
|
||||
final String segmentsFile = segmentCommitInfos.getSegmentsFileName();
|
||||
if (maxVersion.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
|
||||
} else {
|
||||
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + maxVersion);
|
||||
}
|
||||
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
|
||||
} catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
// we either know the index is corrupted or it's just not there
|
||||
throw ex;
|
||||
|
|
|
@ -27,12 +27,11 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.ParseException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class StoreFileMetaData implements Writeable {
|
||||
|
||||
public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_5_0_0;
|
||||
|
||||
private final String name;
|
||||
|
||||
// the actual file size on "disk", if compressed, the compressed size
|
||||
|
@ -44,20 +43,11 @@ public class StoreFileMetaData implements Writeable {
|
|||
|
||||
private final BytesRef hash;
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum) {
|
||||
this(name, length, checksum, FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy) {
|
||||
this(name, length, checksum, writtenBy, null);
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {
|
||||
// its possible here to have a _na_ checksum or an unsupported writtenBy version, if the
|
||||
// file is a segments_N file, but that is fine in the case of a segments_N file because
|
||||
// we handle that case upstream
|
||||
assert name.startsWith("segments_") || (writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) :
|
||||
"index version less that " + FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
|
||||
this.name = Objects.requireNonNull(name, "name must not be null");
|
||||
this.length = length;
|
||||
this.checksum = Objects.requireNonNull(checksum, "checksum must not be null");
|
||||
|
@ -72,8 +62,11 @@ public class StoreFileMetaData implements Writeable {
|
|||
name = in.readString();
|
||||
length = in.readVLong();
|
||||
checksum = in.readString();
|
||||
// TODO Why not Version.parse?
|
||||
writtenBy = Lucene.parseVersionLenient(in.readString(), FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
try {
|
||||
writtenBy = Version.parse(in.readString());
|
||||
} catch (ParseException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
hash = in.readBytesRef();
|
||||
}
|
||||
|
||||
|
|
|
@ -424,11 +424,11 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
IndexingOperationListener... indexingOperationListeners) throws IOException {
|
||||
final Index index = indexMetaData.getIndex();
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopeSetting);
|
||||
logger.debug("creating Index [{}], shards [{}]/[{}{}] - reason [{}]",
|
||||
logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]",
|
||||
indexMetaData.getIndex(),
|
||||
idxSettings.getNumberOfShards(),
|
||||
idxSettings.getNumberOfReplicas(),
|
||||
idxSettings.isShadowReplicaIndex() ? "s" : "", reason);
|
||||
reason);
|
||||
|
||||
final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry);
|
||||
for (IndexingOperationListener operationListener : indexingOperationListeners) {
|
||||
|
@ -732,16 +732,11 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
* @return true if the index can be deleted on this node
|
||||
*/
|
||||
public boolean canDeleteIndexContents(Index index, IndexSettings indexSettings) {
|
||||
// index contents can be deleted if the index is not on a shared file system,
|
||||
// or if its on a shared file system but its an already closed index (so all
|
||||
// its resources have already been relinquished)
|
||||
if (indexSettings.isOnSharedFilesystem() == false || indexSettings.getIndexMetaData().getState() == IndexMetaData.State.CLOSE) {
|
||||
final IndexService indexService = indexService(index);
|
||||
if (indexService == null && nodeEnv.hasNodeFile()) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
logger.trace("{} skipping index directory deletion due to shadow replicas", index);
|
||||
// index contents can be deleted if its an already closed index (so all its resources have
|
||||
// already been relinquished)
|
||||
final IndexService indexService = indexService(index);
|
||||
if (indexService == null && nodeEnv.hasNodeFile()) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -789,7 +784,6 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
FOLDER_FOUND_CAN_DELETE, // shard data exists and can be deleted
|
||||
STILL_ALLOCATED, // the shard is still allocated / active on this node
|
||||
NO_FOLDER_FOUND, // the shards data locations do not exist
|
||||
SHARED_FILE_SYSTEM, // the shard is located on shared and should not be deleted
|
||||
NO_LOCAL_STORAGE // node does not have local storage (see DiscoveryNode.nodeRequiresLocalStorage)
|
||||
}
|
||||
|
||||
|
@ -802,30 +796,25 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
public ShardDeletionCheckResult canDeleteShardContent(ShardId shardId, IndexSettings indexSettings) {
|
||||
assert shardId.getIndex().equals(indexSettings.getIndex());
|
||||
final IndexService indexService = indexService(shardId.getIndex());
|
||||
if (indexSettings.isOnSharedFilesystem() == false) {
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id());
|
||||
if (isAllocated) {
|
||||
return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard
|
||||
} else if (indexSettings.hasCustomDataPath()) {
|
||||
// lets see if it's on a custom path (return false if the shared doesn't exist)
|
||||
// we don't need to delete anything that is not there
|
||||
return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)) ?
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
final boolean isAllocated = indexService != null && indexService.hasShard(shardId.id());
|
||||
if (isAllocated) {
|
||||
return ShardDeletionCheckResult.STILL_ALLOCATED; // we are allocated - can't delete the shard
|
||||
} else if (indexSettings.hasCustomDataPath()) {
|
||||
// lets see if it's on a custom path (return false if the shared doesn't exist)
|
||||
// we don't need to delete anything that is not there
|
||||
return Files.exists(nodeEnv.resolveCustomLocation(indexSettings, shardId)) ?
|
||||
ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE :
|
||||
ShardDeletionCheckResult.NO_FOLDER_FOUND;
|
||||
} else {
|
||||
// lets see if it's path is available (return false if the shared doesn't exist)
|
||||
// we don't need to delete anything that is not there
|
||||
return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)) ?
|
||||
ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE :
|
||||
ShardDeletionCheckResult.NO_FOLDER_FOUND;
|
||||
}
|
||||
} else {
|
||||
return ShardDeletionCheckResult.NO_LOCAL_STORAGE;
|
||||
}
|
||||
// lets see if it's path is available (return false if the shared doesn't exist)
|
||||
// we don't need to delete anything that is not there
|
||||
return FileSystemUtils.exists(nodeEnv.availableShardPaths(shardId)) ?
|
||||
ShardDeletionCheckResult.FOLDER_FOUND_CAN_DELETE :
|
||||
ShardDeletionCheckResult.NO_FOLDER_FOUND;
|
||||
}
|
||||
} else {
|
||||
logger.trace("{} skipping shard directory deletion due to shadow replicas", shardId);
|
||||
return ShardDeletionCheckResult.SHARED_FILE_SYSTEM;
|
||||
return ShardDeletionCheckResult.NO_LOCAL_STORAGE;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -403,20 +403,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
|||
// state may result in a new shard being initialized while having the same allocation id as the currently started shard.
|
||||
logger.debug("{} removing shard (not active, current {}, new {})", shardId, currentRoutingEntry, newShardRouting);
|
||||
indexService.removeShard(shardId.id(), "removing shard (stale copy)");
|
||||
} else {
|
||||
// remove shards where recovery source has changed. This re-initializes shards later in createOrUpdateShards
|
||||
if (newShardRouting.recoverySource() != null && newShardRouting.recoverySource().getType() == Type.PEER) {
|
||||
RecoveryState recoveryState = shard.recoveryState();
|
||||
final DiscoveryNode sourceNode = findSourceNodeForPeerRecovery(logger, routingTable, nodes, newShardRouting);
|
||||
if (recoveryState.getSourceNode().equals(sourceNode) == false) {
|
||||
if (recoveryTargetService.cancelRecoveriesForShard(shardId, "recovery source node changed")) {
|
||||
// getting here means that the shard was still recovering
|
||||
logger.debug("{} removing shard (recovery source changed), current [{}], global [{}], shard [{}])",
|
||||
shardId, recoveryState.getSourceNode(), sourceNode, newShardRouting);
|
||||
indexService.removeShard(shardId.id(), "removing shard (recovery source node changed)");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -197,13 +197,8 @@ public class PeerRecoverySourceService extends AbstractComponent implements Inde
|
|||
new RemoteRecoveryTargetHandler(request.recoveryId(), request.shardId(), targetAllocationId, transportService,
|
||||
request.targetNode(), recoverySettings, throttleTime -> shard.recoveryStats().addThrottleTime(throttleTime));
|
||||
Supplier<Long> currentClusterStateVersionSupplier = () -> clusterService.state().getVersion();
|
||||
if (shard.indexSettings().isOnSharedFilesystem()) {
|
||||
handler = new SharedFSRecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier,
|
||||
this::delayNewRecoveries, settings);
|
||||
} else {
|
||||
handler = new RecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier,
|
||||
handler = new RecoverySourceHandler(shard, recoveryTarget, request, currentClusterStateVersionSupplier,
|
||||
this::delayNewRecoveries, recoverySettings.getChunkSize().bytesAsInt(), settings);
|
||||
}
|
||||
return handler;
|
||||
}
|
||||
|
||||
|
|
|
@ -126,17 +126,6 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel all ongoing recoveries for the given shard.
|
||||
*
|
||||
* @param reason reason for cancellation
|
||||
* @param shardId shard ID for which to cancel recoveries
|
||||
* @return {@code true} if a recovery was cancelled
|
||||
*/
|
||||
public boolean cancelRecoveriesForShard(ShardId shardId, String reason) {
|
||||
return onGoingRecoveries.cancelRecoveriesForShard(shardId, reason);
|
||||
}
|
||||
|
||||
public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) {
|
||||
// create a new recovery status, and process...
|
||||
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
|
||||
|
@ -297,13 +286,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
|||
*/
|
||||
private Store.MetadataSnapshot getStoreMetadataSnapshot(final RecoveryTarget recoveryTarget) {
|
||||
try {
|
||||
if (recoveryTarget.indexShard().indexSettings().isOnSharedFilesystem()) {
|
||||
// we are not going to copy any files, so don't bother listing files, potentially running into concurrency issues with the
|
||||
// primary changing files underneath us
|
||||
return Store.MetadataSnapshot.EMPTY;
|
||||
} else {
|
||||
return recoveryTarget.indexShard().snapshotStoreMetadata();
|
||||
}
|
||||
return recoveryTarget.indexShard().snapshotStoreMetadata();
|
||||
} catch (final org.apache.lucene.index.IndexNotFoundException e) {
|
||||
// happens on an empty folder. no need to log
|
||||
logger.trace("{} shard folder empty, recovering all files", recoveryTarget);
|
||||
|
|
|
@ -1,90 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* A recovery handler that skips phase one as well as sending the translog snapshot.
|
||||
*/
|
||||
public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
|
||||
|
||||
private final IndexShard shard;
|
||||
private final StartRecoveryRequest request;
|
||||
|
||||
SharedFSRecoverySourceHandler(IndexShard shard, RecoveryTargetHandler recoveryTarget, StartRecoveryRequest request,
|
||||
Supplier<Long> currentClusterStateVersionSupplier,
|
||||
Function<String, Releasable> delayNewRecoveries, Settings nodeSettings) {
|
||||
super(shard, recoveryTarget, request, currentClusterStateVersionSupplier, delayNewRecoveries, -1, nodeSettings);
|
||||
this.shard = shard;
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RecoveryResponse recoverToTarget() throws IOException {
|
||||
boolean engineClosed = false;
|
||||
try {
|
||||
logger.trace("recovery [phase1]: skipping phase1 for shared filesystem");
|
||||
final long maxUnsafeAutoIdTimestamp = shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp();
|
||||
if (request.isPrimaryRelocation()) {
|
||||
logger.debug("[phase1] closing engine on primary for shared filesystem recovery");
|
||||
try {
|
||||
// if we relocate we need to close the engine in order to open a new
|
||||
// IndexWriter on the other end of the relocation
|
||||
engineClosed = true;
|
||||
shard.flushAndCloseEngine();
|
||||
} catch (IOException e) {
|
||||
logger.warn("close engine failed", e);
|
||||
shard.failShard("failed to close engine (phase1)", e);
|
||||
}
|
||||
}
|
||||
prepareTargetForTranslog(0, maxUnsafeAutoIdTimestamp);
|
||||
finalizeRecovery();
|
||||
return response;
|
||||
} catch (Exception e) {
|
||||
if (engineClosed) {
|
||||
// If the relocation fails then the primary is closed and can't be
|
||||
// used anymore... (because it's closed) that's a problem, so in
|
||||
// that case, fail the shard to reallocate a new IndexShard and
|
||||
// create a new IndexWriter
|
||||
logger.info("recovery failed for primary shadow shard, failing shard");
|
||||
// pass the failure as null, as we want to ensure the store is not marked as corrupted
|
||||
shard.failShard("primary relocation failed on shared filesystem", e);
|
||||
} else {
|
||||
logger.info("recovery failed on shared filesystem", e);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int sendSnapshot(final long startingSeqNo, final Translog.Snapshot snapshot) {
|
||||
logger.trace("skipping recovery of translog snapshot on shared filesystem");
|
||||
return 0;
|
||||
}
|
||||
|
||||
}
|
|
@ -173,9 +173,6 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
|||
case STILL_ALLOCATED:
|
||||
// nothing to do
|
||||
break;
|
||||
case SHARED_FILE_SYSTEM:
|
||||
// nothing to do
|
||||
break;
|
||||
default:
|
||||
assert false : "unknown shard deletion check result: " + shardDeletionCheckResult;
|
||||
}
|
||||
|
|
|
@ -406,6 +406,8 @@ public class Node implements Closeable {
|
|||
final Transport transport = networkModule.getTransportSupplier().get();
|
||||
final TransportService transportService = newTransportService(settings, transport, threadPool,
|
||||
networkModule.getTransportInterceptor(), localNodeFactory, settingsModule.getClusterSettings());
|
||||
final SearchTransportService searchTransportService = new SearchTransportService(settings,
|
||||
settingsModule.getClusterSettings(), transportService);
|
||||
final Consumer<Binder> httpBind;
|
||||
final HttpServerTransport httpServerTransport;
|
||||
if (networkModule.isHttpEnabled()) {
|
||||
|
@ -447,8 +449,7 @@ public class Node implements Closeable {
|
|||
b.bind(IndicesService.class).toInstance(indicesService);
|
||||
b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService,
|
||||
threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase()));
|
||||
b.bind(SearchTransportService.class).toInstance(new SearchTransportService(settings,
|
||||
settingsModule.getClusterSettings(), transportService));
|
||||
b.bind(SearchTransportService.class).toInstance(searchTransportService);
|
||||
b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(settings, bigArrays,
|
||||
scriptModule.getScriptService()));
|
||||
b.bind(Transport.class).toInstance(transport);
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.rest.action.admin.cluster;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.remote.RemoteInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.remote.RemoteInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.remote.RemoteInfoResponse;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.BaseRestHandler;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.RestBuilderListener;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.rest.RestRequest.Method.GET;
|
||||
|
||||
public final class RestRemoteClusterInfoAction extends BaseRestHandler {
|
||||
|
||||
public RestRemoteClusterInfoAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
controller.registerHandler(GET, "_remote/info", this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client)
|
||||
throws IOException {
|
||||
return channel -> client.execute(RemoteInfoAction.INSTANCE, new RemoteInfoRequest(),
|
||||
new RestBuilderListener<RemoteInfoResponse>(channel) {
|
||||
@Override
|
||||
public RestResponse buildResponse(RemoteInfoResponse response, XContentBuilder builder) throws Exception {
|
||||
response.toXContent(builder, request);
|
||||
return new BytesRestResponse(RestStatus.OK, builder);
|
||||
}
|
||||
});
|
||||
}
|
||||
@Override
|
||||
public boolean canTripCircuitBreaker() {
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -190,18 +190,10 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
table.addCell(shard.id());
|
||||
|
||||
IndexMetaData indexMeta = state.getState().getMetaData().getIndexSafe(shard.index());
|
||||
boolean usesShadowReplicas = false;
|
||||
if (indexMeta != null) {
|
||||
usesShadowReplicas = indexMeta.isIndexUsingShadowReplicas();
|
||||
}
|
||||
if (shard.primary()) {
|
||||
table.addCell("p");
|
||||
} else {
|
||||
if (usesShadowReplicas) {
|
||||
table.addCell("s");
|
||||
} else {
|
||||
table.addCell("r");
|
||||
}
|
||||
table.addCell("r");
|
||||
}
|
||||
table.addCell(shard.state());
|
||||
table.addCell(commonStats == null ? null : commonStats.getDocs().getCount());
|
||||
|
|
|
@ -19,26 +19,18 @@
|
|||
|
||||
package org.elasticsearch.search.fetch.subphase;
|
||||
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.DocValuesTermsQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.TopDocsCollector;
|
||||
import org.apache.lucene.search.TopFieldCollector;
|
||||
import org.apache.lucene.search.TopScoreDocCollector;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.search.join.ParentChildrenBlockJoinQuery;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
|
@ -48,9 +40,9 @@ import org.elasticsearch.index.mapper.ObjectMapper;
|
|||
import org.elasticsearch.index.mapper.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.SubSearchContext;
|
||||
|
||||
|
@ -131,7 +123,8 @@ public final class InnerHitsContext {
|
|||
}
|
||||
BitSetProducer parentFilter = context.bitsetFilterCache().getBitSetProducer(rawParentFilter);
|
||||
Query childFilter = childObjectMapper.nestedTypeFilter();
|
||||
Query q = Queries.filtered(query(), new NestedChildrenQuery(parentFilter, childFilter, hitContext));
|
||||
int parentDocId = hitContext.readerContext().docBase + hitContext.docId();
|
||||
Query q = Queries.filtered(query(), new ParentChildrenBlockJoinQuery(parentFilter, childFilter, parentDocId));
|
||||
|
||||
if (size() == 0) {
|
||||
return new TopDocs(context.searcher().count(q), Lucene.EMPTY_SCORE_DOCS, 0);
|
||||
|
@ -156,120 +149,6 @@ public final class InnerHitsContext {
|
|||
}
|
||||
}
|
||||
|
||||
// A filter that only emits the nested children docs of a specific nested parent doc
|
||||
static class NestedChildrenQuery extends Query {
|
||||
|
||||
private final BitSetProducer parentFilter;
|
||||
private final Query childFilter;
|
||||
private final int docId;
|
||||
private final LeafReader leafReader;
|
||||
|
||||
NestedChildrenQuery(BitSetProducer parentFilter, Query childFilter, FetchSubPhase.HitContext hitContext) {
|
||||
this.parentFilter = parentFilter;
|
||||
this.childFilter = childFilter;
|
||||
this.docId = hitContext.docId();
|
||||
this.leafReader = hitContext.readerContext().reader();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (sameClassAs(obj) == false) {
|
||||
return false;
|
||||
}
|
||||
NestedChildrenQuery other = (NestedChildrenQuery) obj;
|
||||
return parentFilter.equals(other.parentFilter)
|
||||
&& childFilter.equals(other.childFilter)
|
||||
&& docId == other.docId
|
||||
&& leafReader.getCoreCacheKey() == other.leafReader.getCoreCacheKey();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int hash = classHash();
|
||||
hash = 31 * hash + parentFilter.hashCode();
|
||||
hash = 31 * hash + childFilter.hashCode();
|
||||
hash = 31 * hash + docId;
|
||||
hash = 31 * hash + leafReader.getCoreCacheKey().hashCode();
|
||||
return hash;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString(String field) {
|
||||
return "NestedChildren(parent=" + parentFilter + ",child=" + childFilter + ")";
|
||||
}
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
|
||||
final Weight childWeight = childFilter.createWeight(searcher, false);
|
||||
return new ConstantScoreWeight(this) {
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
// Nested docs only reside in a single segment, so no need to evaluate all segments
|
||||
if (!context.reader().getCoreCacheKey().equals(leafReader.getCoreCacheKey())) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// If docId == 0 then we a parent doc doesn't have child docs, because child docs are stored
|
||||
// before the parent doc and because parent doc is 0 we can safely assume that there are no child docs.
|
||||
if (docId == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final BitSet parents = parentFilter.getBitSet(context);
|
||||
final int firstChildDocId = parents.prevSetBit(docId - 1) + 1;
|
||||
// A parent doc doesn't have child docs, so we can early exit here:
|
||||
if (firstChildDocId == docId) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Scorer childrenScorer = childWeight.scorer(context);
|
||||
if (childrenScorer == null) {
|
||||
return null;
|
||||
}
|
||||
DocIdSetIterator childrenIterator = childrenScorer.iterator();
|
||||
final DocIdSetIterator it = new DocIdSetIterator() {
|
||||
|
||||
int doc = -1;
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return advance(doc + 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
target = Math.max(firstChildDocId, target);
|
||||
if (target >= docId) {
|
||||
// We're outside the child nested scope, so it is done
|
||||
return doc = NO_MORE_DOCS;
|
||||
} else {
|
||||
int advanced = childrenIterator.advance(target);
|
||||
if (advanced >= docId) {
|
||||
// We're outside the child nested scope, so it is done
|
||||
return doc = NO_MORE_DOCS;
|
||||
} else {
|
||||
return doc = advanced;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return Math.min(childrenIterator.cost(), docId - firstChildDocId);
|
||||
}
|
||||
|
||||
};
|
||||
return new ConstantScoreScorer(this, score(), it);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static final class ParentChildInnerHits extends BaseInnerHits {
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.FieldComparator;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
|
@ -491,12 +490,11 @@ public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
|
|||
|
||||
@Override
|
||||
public SortFieldAndFormat build(QueryShardContext context) throws IOException {
|
||||
final boolean indexCreatedBeforeV2_0 = context.indexVersionCreated().before(Version.V_2_0_0);
|
||||
|
||||
// validation was not available prior to 2.x, so to support bwc percolation queries we only ignore_malformed
|
||||
// on 2.x created indexes
|
||||
GeoPoint[] localPoints = points.toArray(new GeoPoint[points.size()]);
|
||||
if (!indexCreatedBeforeV2_0 && !GeoValidationMethod.isIgnoreMalformed(validation)) {
|
||||
if (GeoValidationMethod.isIgnoreMalformed(validation) == false) {
|
||||
for (GeoPoint point : localPoints) {
|
||||
if (GeoUtils.isValidLatitude(point.lat()) == false) {
|
||||
throw new ElasticsearchParseException(
|
||||
|
|
|
@ -33,44 +33,43 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.Version.V_2_2_0;
|
||||
import static org.elasticsearch.Version.V_5_0_0_alpha1;
|
||||
import static org.elasticsearch.Version.V_5_3_0_UNRELEASED;
|
||||
import static org.elasticsearch.Version.V_6_0_0_alpha1_UNRELEASED;
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
public class VersionTests extends ESTestCase {
|
||||
|
||||
public void testVersionComparison() throws Exception {
|
||||
assertThat(V_2_2_0.before(V_5_0_0_alpha1), is(true));
|
||||
assertThat(V_2_2_0.before(V_2_2_0), is(false));
|
||||
assertThat(V_5_0_0_alpha1.before(V_2_2_0), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.before(V_6_0_0_alpha1_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false));
|
||||
|
||||
assertThat(V_2_2_0.onOrBefore(V_5_0_0_alpha1), is(true));
|
||||
assertThat(V_2_2_0.onOrBefore(V_2_2_0), is(true));
|
||||
assertThat(V_5_0_0_alpha1.onOrBefore(V_2_2_0), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_6_0_0_alpha1_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(true));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(false));
|
||||
|
||||
assertThat(V_2_2_0.after(V_5_0_0_alpha1), is(false));
|
||||
assertThat(V_2_2_0.after(V_2_2_0), is(false));
|
||||
assertThat(V_5_0_0_alpha1.after(V_2_2_0), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.after(V_6_0_0_alpha1_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.after(V_5_3_0_UNRELEASED), is(false));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.after(V_5_3_0_UNRELEASED), is(true));
|
||||
|
||||
assertThat(V_2_2_0.onOrAfter(V_5_0_0_alpha1), is(false));
|
||||
assertThat(V_2_2_0.onOrAfter(V_2_2_0), is(true));
|
||||
assertThat(V_5_0_0_alpha1.onOrAfter(V_2_2_0), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_6_0_0_alpha1_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true));
|
||||
|
||||
assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1")));
|
||||
assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2")));
|
||||
assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24")));
|
||||
assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0")));
|
||||
|
||||
assertThat(V_2_2_0, is(lessThan(V_5_0_0_alpha1)));
|
||||
assertThat(V_2_2_0.compareTo(V_2_2_0), is(0));
|
||||
assertThat(V_5_0_0_alpha1, is(greaterThan(V_2_2_0)));
|
||||
assertThat(V_5_3_0_UNRELEASED, is(lessThan(V_6_0_0_alpha1_UNRELEASED)));
|
||||
assertThat(V_5_3_0_UNRELEASED.compareTo(V_5_3_0_UNRELEASED), is(0));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED, is(greaterThan(V_5_3_0_UNRELEASED)));
|
||||
}
|
||||
|
||||
public void testMin() {
|
||||
|
@ -99,9 +98,11 @@ public class VersionTests extends ESTestCase {
|
|||
|
||||
public void testMinimumIndexCompatibilityVersion() {
|
||||
assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.V_2_0_0, Version.V_5_0_0.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.V_2_0_0, Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.V_2_0_0, Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.fromId(2000099),
|
||||
Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.fromId(2000099),
|
||||
Version.V_5_0_0_alpha1.minimumIndexCompatibilityVersion());
|
||||
}
|
||||
|
||||
public void testVersionConstantPresent() {
|
||||
|
@ -155,7 +156,8 @@ public class VersionTests extends ESTestCase {
|
|||
|
||||
public void testIndexCreatedVersion() {
|
||||
// an actual index has a IndexMetaData.SETTING_INDEX_UUID
|
||||
final Version version = randomFrom(Version.V_2_0_0, Version.V_2_3_0, Version.V_5_0_0_alpha1);
|
||||
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2,
|
||||
Version.V_5_2_0_UNRELEASED, Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build()));
|
||||
}
|
||||
|
||||
|
@ -230,7 +232,7 @@ public class VersionTests extends ESTestCase {
|
|||
});
|
||||
assertSame(Version.CURRENT, Version.fromString(Version.CURRENT.toString()));
|
||||
|
||||
assertSame(Version.fromString("2.0.0-SNAPSHOT"), Version.fromString("2.0.0"));
|
||||
assertEquals(Version.fromString("2.0.0-SNAPSHOT"), Version.fromId(2000099));
|
||||
|
||||
expectThrows(IllegalArgumentException.class, () -> {
|
||||
Version.fromString("5.0.0-SNAPSHOT");
|
||||
|
@ -325,8 +327,8 @@ public class VersionTests extends ESTestCase {
|
|||
public void testIsCompatible() {
|
||||
assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()));
|
||||
assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.V_2_0_0, Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.V_2_0_0, Version.V_5_0_0));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0));
|
||||
}
|
||||
|
||||
public boolean isCompatible(Version left, Version right) {
|
||||
|
|
|
@ -34,10 +34,10 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
|||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit;
|
||||
import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.Hit;
|
||||
import org.elasticsearch.action.bulk.byscroll.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -199,7 +199,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
client.scrollsToReject = randomIntBetween(0, testRequest.getMaxRetries() - 1);
|
||||
DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff();
|
||||
action.setScroll(scrollId());
|
||||
action.startNextScroll(timeValueNanos(System.nanoTime()), 0);
|
||||
TimeValue now = timeValueNanos(System.nanoTime());
|
||||
action.startNextScroll(now, now, 0);
|
||||
assertBusy(() -> assertEquals(client.scrollsToReject + 1, client.scrollAttempts.get()));
|
||||
if (listener.isDone()) {
|
||||
Object result = listener.get();
|
||||
|
@ -213,7 +214,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
client.scrollsToReject = testRequest.getMaxRetries() + randomIntBetween(1, 100);
|
||||
DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff();
|
||||
action.setScroll(scrollId());
|
||||
action.startNextScroll(timeValueNanos(System.nanoTime()), 0);
|
||||
TimeValue now = timeValueNanos(System.nanoTime());
|
||||
action.startNextScroll(now, now, 0);
|
||||
assertBusy(() -> assertEquals(testRequest.getMaxRetries() + 1, client.scrollAttempts.get()));
|
||||
assertBusy(() -> assertTrue(listener.isDone()));
|
||||
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
|
||||
|
@ -438,7 +440,9 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
// Set throttle to 1 request per second to make the math simpler
|
||||
testTask.rethrottle(1f);
|
||||
// Make the last batch look nearly instant but have 100 documents
|
||||
action.startNextScroll(timeValueNanos(System.nanoTime()), 100);
|
||||
TimeValue lastBatchStartTime = timeValueNanos(System.nanoTime());
|
||||
TimeValue now = timeValueNanos(lastBatchStartTime.nanos() + 1);
|
||||
action.startNextScroll(lastBatchStartTime, now, 100);
|
||||
|
||||
// So the next request is going to have to wait an extra 100 seconds or so (base was 10 seconds, so 110ish)
|
||||
assertThat(client.lastScroll.get().request.scroll().keepAlive().seconds(), either(equalTo(110L)).or(equalTo(109L)));
|
||||
|
@ -451,14 +455,13 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
|
||||
if (randomBoolean()) {
|
||||
client.lastScroll.get().listener.onResponse(searchResponse);
|
||||
// The delay is still 100ish seconds because there hasn't been much time between when we requested the bulk and when we got it.
|
||||
assertThat(capturedDelay.get().seconds(), either(equalTo(100L)).or(equalTo(99L)));
|
||||
assertEquals(99, capturedDelay.get().seconds());
|
||||
} else {
|
||||
// Let's rethrottle between the starting the scroll and getting the response
|
||||
testTask.rethrottle(10f);
|
||||
client.lastScroll.get().listener.onResponse(searchResponse);
|
||||
// The delay uses the new throttle
|
||||
assertThat(capturedDelay.get().seconds(), either(equalTo(10L)).or(equalTo(9L)));
|
||||
assertEquals(9, capturedDelay.get().seconds());
|
||||
}
|
||||
|
||||
// Running the command ought to increment the delay counter on the task.
|
||||
|
@ -483,7 +486,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
CountDownLatch successLatch = new CountDownLatch(1);
|
||||
DummyAsyncBulkByScrollAction action = new DummyActionWithoutBackoff() {
|
||||
@Override
|
||||
void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) {
|
||||
void startNextScroll(TimeValue lastBatchStartTime, TimeValue now, int lastBatchSize) {
|
||||
successLatch.countDown();
|
||||
}
|
||||
};
|
||||
|
@ -574,7 +577,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testCancelBeforeStartNextScroll() throws Exception {
|
||||
cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.startNextScroll(timeValueNanos(System.nanoTime()), 0));
|
||||
TimeValue now = timeValueNanos(System.nanoTime());
|
||||
cancelTaskCase((DummyAsyncBulkByScrollAction action) -> action.startNextScroll(now, now, 0));
|
||||
}
|
||||
|
||||
public void testCancelBeforeRefreshAndFinish() throws Exception {
|
||||
|
|
|
@ -72,7 +72,7 @@ public class MainResponseTests extends ESTestCase {
|
|||
|
||||
public void testToXContent() throws IOException {
|
||||
Build build = new Build("buildHash", "2016-11-15".toString(), true);
|
||||
Version version = Version.V_2_4_5;
|
||||
Version version = Version.CURRENT;
|
||||
MainResponse response = new MainResponse("nodeName", version, new ClusterName("clusterName"), "clusterUuid", build, true);
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
|
@ -81,11 +81,11 @@ public class MainResponseTests extends ESTestCase {
|
|||
+ "\"cluster_name\":\"clusterName\","
|
||||
+ "\"cluster_uuid\":\"clusterUuid\","
|
||||
+ "\"version\":{"
|
||||
+ "\"number\":\"2.4.5\","
|
||||
+ "\"number\":\"" + version.toString() + "\","
|
||||
+ "\"build_hash\":\"buildHash\","
|
||||
+ "\"build_date\":\"2016-11-15\","
|
||||
+ "\"build_snapshot\":true,"
|
||||
+ "\"lucene_version\":\"5.5.2\"},"
|
||||
+ "\"lucene_version\":\"" + version.luceneVersion.toString() + "\"},"
|
||||
+ "\"tagline\":\"You Know, for Search\""
|
||||
+ "}", builder.string());
|
||||
}
|
||||
|
|
|
@ -19,8 +19,13 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
|
||||
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest;
|
||||
|
@ -33,25 +38,31 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.mocksocket.MockServerSocket;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.RemoteTransportException;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.channels.AlreadyConnectedException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -519,4 +530,187 @@ public class RemoteClusterConnectionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void installNodeStatsHandler(TransportService service, DiscoveryNode...nodes) {
|
||||
service.registerRequestHandler(NodesInfoAction.NAME, NodesInfoRequest::new, ThreadPool.Names.SAME, false, false,
|
||||
(request, channel) -> {
|
||||
List<NodeInfo> nodeInfos = new ArrayList<>();
|
||||
int port = 80;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
HttpInfo http = new HttpInfo(new BoundTransportAddress(new TransportAddress[]{node.getAddress()},
|
||||
new TransportAddress(node.getAddress().address().getAddress(), port++)), 100);
|
||||
nodeInfos.add(new NodeInfo(node.getVersion(), Build.CURRENT, node, null, null, null, null, null, null, http, null,
|
||||
null, null));
|
||||
}
|
||||
channel.sendResponse(new NodesInfoResponse(ClusterName.DEFAULT, nodeInfos, Collections.emptyList()));
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
public void testGetConnectionInfo() throws Exception {
|
||||
List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
|
||||
try (MockTransportService transport1 = startTransport("seed_node", knownNodes, Version.CURRENT);
|
||||
MockTransportService transport2 = startTransport("seed_node_1", knownNodes, Version.CURRENT);
|
||||
MockTransportService transport3 = startTransport("discoverable_node", knownNodes, Version.CURRENT)) {
|
||||
DiscoveryNode node1 = transport1.getLocalDiscoNode();
|
||||
DiscoveryNode node2 = transport3.getLocalDiscoNode();
|
||||
DiscoveryNode node3 = transport2.getLocalDiscoNode();
|
||||
knownNodes.add(transport1.getLocalDiscoNode());
|
||||
knownNodes.add(transport3.getLocalDiscoNode());
|
||||
knownNodes.add(transport2.getLocalDiscoNode());
|
||||
Collections.shuffle(knownNodes, random());
|
||||
List<DiscoveryNode> seedNodes = Arrays.asList(node3, node1, node2);
|
||||
Collections.shuffle(seedNodes, random());
|
||||
|
||||
try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
|
||||
service.start();
|
||||
service.acceptIncomingRequests();
|
||||
int maxNumConnections = randomIntBetween(1, 5);
|
||||
try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster",
|
||||
seedNodes, service, maxNumConnections, n -> true)) {
|
||||
// test no nodes connected
|
||||
RemoteConnectionInfo remoteConnectionInfo = assertSerialization(getRemoteConnectionInfo(connection));
|
||||
assertNotNull(remoteConnectionInfo);
|
||||
assertEquals(0, remoteConnectionInfo.numNodesConnected);
|
||||
assertEquals(0, remoteConnectionInfo.seedNodes.size());
|
||||
assertEquals(0, remoteConnectionInfo.httpAddresses.size());
|
||||
assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster);
|
||||
assertEquals("test-cluster", remoteConnectionInfo.clusterAlias);
|
||||
updateSeedNodes(connection, seedNodes);
|
||||
expectThrows(RemoteTransportException.class, () -> getRemoteConnectionInfo(connection));
|
||||
|
||||
for (MockTransportService s : Arrays.asList(transport1, transport2, transport3)) {
|
||||
installNodeStatsHandler(s, node1, node2, node3);
|
||||
}
|
||||
|
||||
remoteConnectionInfo = getRemoteConnectionInfo(connection);
|
||||
remoteConnectionInfo = assertSerialization(remoteConnectionInfo);
|
||||
assertNotNull(remoteConnectionInfo);
|
||||
assertEquals(connection.getNumNodesConnected(), remoteConnectionInfo.numNodesConnected);
|
||||
assertEquals(Math.min(3, maxNumConnections), connection.getNumNodesConnected());
|
||||
assertEquals(3, remoteConnectionInfo.seedNodes.size());
|
||||
assertEquals(remoteConnectionInfo.httpAddresses.size(), Math.min(3, maxNumConnections));
|
||||
assertEquals(maxNumConnections, remoteConnectionInfo.connectionsPerCluster);
|
||||
assertEquals("test-cluster", remoteConnectionInfo.clusterAlias);
|
||||
for (TransportAddress address : remoteConnectionInfo.httpAddresses) {
|
||||
assertTrue("port range mismatch: " + address.getPort(), address.getPort() >= 80 && address.getPort() <= 90);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testRemoteConnectionInfo() throws IOException {
|
||||
RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30));
|
||||
assertSerialization(stats);
|
||||
|
||||
RemoteConnectionInfo stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 4, TimeValue.timeValueMinutes(30));
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster_1",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30));
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 15)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30));
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 87)),
|
||||
4, 3, TimeValue.timeValueMinutes(30));
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
4, 3, TimeValue.timeValueMinutes(325));
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
|
||||
stats1 = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),
|
||||
5, 3, TimeValue.timeValueMinutes(30));
|
||||
assertSerialization(stats1);
|
||||
assertNotEquals(stats, stats1);
|
||||
}
|
||||
|
||||
private RemoteConnectionInfo assertSerialization(RemoteConnectionInfo info) throws IOException {
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(Version.CURRENT);
|
||||
info.writeTo(out);
|
||||
StreamInput in = out.bytes().streamInput();
|
||||
in.setVersion(Version.CURRENT);
|
||||
RemoteConnectionInfo remoteConnectionInfo = new RemoteConnectionInfo(in);
|
||||
assertEquals(info, remoteConnectionInfo);
|
||||
assertEquals(info.hashCode(), remoteConnectionInfo.hashCode());
|
||||
return randomBoolean() ? info : remoteConnectionInfo;
|
||||
}
|
||||
}
|
||||
|
||||
public void testRenderConnectionInfoXContent() throws IOException {
|
||||
RemoteConnectionInfo stats = new RemoteConnectionInfo("test_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80)),
|
||||
4, 3, TimeValue.timeValueMinutes(30));
|
||||
stats = assertSerialization(stats);
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
stats.toXContent(builder, null);
|
||||
builder.endObject();
|
||||
assertEquals("{\"test_cluster\":{\"seeds\":[\"0.0.0.0:1\"],\"http_addresses\":[\"0.0.0.0:80\"],\"connected\":true," +
|
||||
"\"num_nodes_connected\":3,\"max_connections_per_cluster\":4,\"initial_connect_timeout\":\"30m\"}}", builder.string());
|
||||
|
||||
stats = new RemoteConnectionInfo("some_other_cluster",
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,1), new TransportAddress(TransportAddress.META_ADDRESS,2)),
|
||||
Arrays.asList(new TransportAddress(TransportAddress.META_ADDRESS,80), new TransportAddress(TransportAddress.META_ADDRESS,81)),
|
||||
2, 0, TimeValue.timeValueSeconds(30));
|
||||
stats = assertSerialization(stats);
|
||||
builder = XContentFactory.jsonBuilder();
|
||||
builder.startObject();
|
||||
stats.toXContent(builder, null);
|
||||
builder.endObject();
|
||||
assertEquals("{\"some_other_cluster\":{\"seeds\":[\"0.0.0.0:1\",\"0.0.0.0:2\"],\"http_addresses\":[\"0.0.0.0:80\",\"0.0.0.0:81\"],"
|
||||
+ "\"connected\":false,\"num_nodes_connected\":0,\"max_connections_per_cluster\":2,\"initial_connect_timeout\":\"30s\"}}",
|
||||
builder.string());
|
||||
}
|
||||
|
||||
private RemoteConnectionInfo getRemoteConnectionInfo(RemoteClusterConnection connection) throws Exception {
|
||||
AtomicReference<RemoteConnectionInfo> statsRef = new AtomicReference<>();
|
||||
AtomicReference<Exception> exceptionRef = new AtomicReference<>();
|
||||
CountDownLatch latch = new CountDownLatch(1);
|
||||
connection.getConnectionInfo(new ActionListener<RemoteConnectionInfo>() {
|
||||
@Override
|
||||
public void onResponse(RemoteConnectionInfo remoteConnectionInfo) {
|
||||
statsRef.set(remoteConnectionInfo);
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
exceptionRef.set(e);
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
if (exceptionRef.get() != null) {
|
||||
throw exceptionRef.get();
|
||||
}
|
||||
return statsRef.get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -143,14 +143,14 @@ public class RemoteClusterServiceTests extends ESTestCase {
|
|||
assertTrue(service.isRemoteClusterRegistered("cluster_2"));
|
||||
assertFalse(service.isRemoteClusterRegistered("foo"));
|
||||
Map<String, List<String>> perClusterIndices = service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar",
|
||||
"cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo"}, i -> false);
|
||||
"cluster_2:foo:bar", "cluster_1:test", "cluster_2:foo*", "foo", "cluster*:baz", "*:boo", "no*match:boo"}, i -> false);
|
||||
String[] localIndices = perClusterIndices.computeIfAbsent(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY,
|
||||
k -> Collections.emptyList()).toArray(new String[0]);
|
||||
assertNotNull(perClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY));
|
||||
assertArrayEquals(new String[]{"foo:bar", "foo"}, localIndices);
|
||||
assertArrayEquals(new String[]{"foo:bar", "foo", "no*match:boo"}, localIndices);
|
||||
assertEquals(2, perClusterIndices.size());
|
||||
assertEquals(Arrays.asList("bar", "test"), perClusterIndices.get("cluster_1"));
|
||||
assertEquals(Arrays.asList("foo:bar", "foo*"), perClusterIndices.get("cluster_2"));
|
||||
assertEquals(Arrays.asList("bar", "test", "baz", "boo"), perClusterIndices.get("cluster_1"));
|
||||
assertEquals(Arrays.asList("foo:bar", "foo*", "baz", "boo"), perClusterIndices.get("cluster_2"));
|
||||
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () ->
|
||||
service.groupClusterIndices(new String[]{"foo:bar", "cluster_1:bar",
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
public class GroupedActionListenerTests extends ESTestCase {
|
||||
|
||||
public void testNotifications() throws InterruptedException {
|
||||
AtomicReference<Collection<Integer>> resRef = new AtomicReference<>();
|
||||
ActionListener<Collection<Integer>> result = new ActionListener<Collection<Integer>>() {
|
||||
@Override
|
||||
public void onResponse(Collection<Integer> integers) {
|
||||
resRef.set(integers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
};
|
||||
final int groupSize = randomIntBetween(10, 1000);
|
||||
AtomicInteger count = new AtomicInteger();
|
||||
Collection<Integer> defaults = randomBoolean() ? Collections.singletonList(-1) :
|
||||
Collections.emptyList();
|
||||
GroupedActionListener<Integer> listener = new GroupedActionListener<>(result, groupSize,
|
||||
defaults);
|
||||
int numThreads = randomIntBetween(2, 5);
|
||||
Thread[] threads = new Thread[numThreads];
|
||||
CyclicBarrier barrier = new CyclicBarrier(numThreads);
|
||||
for (int i = 0; i < numThreads; i++) {
|
||||
threads[i] = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
barrier.await(10, TimeUnit.SECONDS);
|
||||
} catch (Exception e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
int c = 0;
|
||||
while((c = count.incrementAndGet()) <= groupSize) {
|
||||
listener.onResponse(c-1);
|
||||
}
|
||||
}
|
||||
};
|
||||
threads[i].start();
|
||||
}
|
||||
for (Thread t : threads) {
|
||||
t.join();
|
||||
}
|
||||
assertNotNull(resRef.get());
|
||||
ArrayList<Integer> list = new ArrayList<>(resRef.get());
|
||||
Collections.sort(list);
|
||||
int expectedSize = groupSize + defaults.size();
|
||||
assertEquals(expectedSize, resRef.get().size());
|
||||
int expectedValue = defaults.isEmpty() ? 0 : -1;
|
||||
for (int i = 0; i < expectedSize; i++) {
|
||||
assertEquals(Integer.valueOf(expectedValue++), list.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
public void testFailed() {
|
||||
AtomicReference<Collection<Integer>> resRef = new AtomicReference<>();
|
||||
AtomicReference<Exception> excRef = new AtomicReference<>();
|
||||
|
||||
ActionListener<Collection<Integer>> result = new ActionListener<Collection<Integer>>() {
|
||||
@Override
|
||||
public void onResponse(Collection<Integer> integers) {
|
||||
resRef.set(integers);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
excRef.set(e);
|
||||
}
|
||||
};
|
||||
Collection<Integer> defaults = randomBoolean() ? Collections.singletonList(-1) :
|
||||
Collections.emptyList();
|
||||
int size = randomIntBetween(3, 4);
|
||||
GroupedActionListener<Integer> listener = new GroupedActionListener<>(result, size,
|
||||
defaults);
|
||||
listener.onResponse(0);
|
||||
IOException ioException = new IOException();
|
||||
RuntimeException rtException = new RuntimeException();
|
||||
listener.onFailure(rtException);
|
||||
listener.onFailure(ioException);
|
||||
if (size == 4) {
|
||||
listener.onResponse(2);
|
||||
}
|
||||
assertNotNull(excRef.get());
|
||||
assertEquals(rtException, excRef.get());
|
||||
assertEquals(1, excRef.get().getSuppressed().length);
|
||||
assertEquals(ioException, excRef.get().getSuppressed()[0]);
|
||||
assertNull(resRef.get());
|
||||
listener.onResponse(1);
|
||||
assertNull(resRef.get());
|
||||
}
|
||||
}
|
|
@ -132,33 +132,6 @@ public class ReplicationOperationTests extends ESTestCase {
|
|||
assertThat(primary.knownLocalCheckpoints, equalTo(replicasProxy.generatedLocalCheckpoints));
|
||||
}
|
||||
|
||||
|
||||
public void testReplicationWithShadowIndex() throws Exception {
|
||||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
|
||||
final ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
|
||||
final long primaryTerm = state.getMetaData().index(index).primaryTerm(0);
|
||||
final IndexShardRoutingTable indexShardRoutingTable = state.getRoutingTable().shardRoutingTable(shardId);
|
||||
final ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
|
||||
|
||||
Request request = new Request(shardId);
|
||||
PlainActionFuture<TestPrimary.Result> listener = new PlainActionFuture<>();
|
||||
final TestReplicationOperation op = new TestReplicationOperation(request,
|
||||
new TestPrimary(primaryShard, primaryTerm), listener, false,
|
||||
new TestReplicaProxy(), () -> state, logger, "test");
|
||||
op.execute();
|
||||
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
|
||||
assertThat(request.processedOnReplicas, equalTo(Collections.emptySet()));
|
||||
assertTrue("listener is not marked as done", listener.isDone());
|
||||
ShardInfo shardInfo = listener.actionGet().getShardInfo();
|
||||
assertThat(shardInfo.getFailed(), equalTo(0));
|
||||
assertThat(shardInfo.getFailures(), arrayWithSize(0));
|
||||
assertThat(shardInfo.getSuccessful(), equalTo(1));
|
||||
assertThat(shardInfo.getTotal(), equalTo(indexShardRoutingTable.getSize()));
|
||||
}
|
||||
|
||||
|
||||
public void testDemotedPrimary() throws Exception {
|
||||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
|
@ -310,7 +283,7 @@ public class ReplicationOperationTests extends ESTestCase {
|
|||
final ShardRouting primaryShard = shardRoutingTable.primaryShard();
|
||||
final TestReplicationOperation op = new TestReplicationOperation(request,
|
||||
new TestPrimary(primaryShard, primaryTerm),
|
||||
listener, randomBoolean(), new TestReplicaProxy(), () -> state, logger, "test");
|
||||
listener, new TestReplicaProxy(), () -> state, logger, "test");
|
||||
|
||||
if (passesActiveShardCheck) {
|
||||
assertThat(op.checkActiveShardCount(), nullValue());
|
||||
|
@ -519,13 +492,14 @@ public class ReplicationOperationTests extends ESTestCase {
|
|||
class TestReplicationOperation extends ReplicationOperation<Request, Request, TestPrimary.Result> {
|
||||
TestReplicationOperation(Request request, Primary<Request, Request, TestPrimary.Result> primary,
|
||||
ActionListener<TestPrimary.Result> listener, Replicas<Request> replicas, Supplier<ClusterState> clusterStateSupplier) {
|
||||
this(request, primary, listener, true, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test");
|
||||
this(request, primary, listener, replicas, clusterStateSupplier, ReplicationOperationTests.this.logger, "test");
|
||||
}
|
||||
|
||||
TestReplicationOperation(Request request, Primary<Request, Request, TestPrimary.Result> primary,
|
||||
ActionListener<TestPrimary.Result> listener, boolean executeOnReplicas,
|
||||
Replicas<Request> replicas, Supplier<ClusterState> clusterStateSupplier, Logger logger, String opType) {
|
||||
super(request, primary, listener, executeOnReplicas, replicas, clusterStateSupplier, logger, opType);
|
||||
ActionListener<TestPrimary.Result> listener,
|
||||
Replicas<Request> replicas, Supplier<ClusterState> clusterStateSupplier,
|
||||
Logger logger, String opType) {
|
||||
super(request, primary, listener, replicas, clusterStateSupplier, logger, opType);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -497,8 +497,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
createReplicatedOperation(
|
||||
Request request,
|
||||
ActionListener<TransportReplicationAction.PrimaryResult<Request, TestResponse>> actionListener,
|
||||
TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference,
|
||||
boolean executeOnReplicas) {
|
||||
TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference) {
|
||||
return new NoopReplicationOperation(request, actionListener) {
|
||||
public void execute() throws Exception {
|
||||
assertPhase(task, "primary");
|
||||
|
@ -550,8 +549,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
createReplicatedOperation(
|
||||
Request request,
|
||||
ActionListener<TransportReplicationAction.PrimaryResult<Request, TestResponse>> actionListener,
|
||||
TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference,
|
||||
boolean executeOnReplicas) {
|
||||
TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference) {
|
||||
return new NoopReplicationOperation(request, actionListener) {
|
||||
public void execute() throws Exception {
|
||||
assertPhase(task, "primary");
|
||||
|
@ -650,35 +648,6 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
assertEquals(0, shardFailedRequests.length);
|
||||
}
|
||||
|
||||
public void testShadowIndexDisablesReplication() throws Exception {
|
||||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
|
||||
ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
|
||||
MetaData.Builder metaData = MetaData.builder(state.metaData());
|
||||
Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings());
|
||||
settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true);
|
||||
metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings));
|
||||
state = ClusterState.builder(state).metaData(metaData).build();
|
||||
setState(clusterService, state);
|
||||
AtomicBoolean executed = new AtomicBoolean();
|
||||
ShardRouting primaryShard = state.routingTable().shardRoutingTable(shardId).primaryShard();
|
||||
action.new AsyncPrimaryAction(new Request(shardId), primaryShard.allocationId().getId(),
|
||||
createTransportChannel(new PlainActionFuture<>()), null) {
|
||||
@Override
|
||||
protected ReplicationOperation<Request, Request, TestAction.PrimaryResult<Request, TestResponse>> createReplicatedOperation(
|
||||
Request request, ActionListener<TransportReplicationAction.PrimaryResult<Request, TestResponse>> actionListener,
|
||||
TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference,
|
||||
boolean executeOnReplicas) {
|
||||
assertFalse(executeOnReplicas);
|
||||
assertFalse(executed.getAndSet(true));
|
||||
return new NoopReplicationOperation(request, actionListener);
|
||||
}
|
||||
|
||||
}.run();
|
||||
assertThat(executed.get(), equalTo(true));
|
||||
}
|
||||
|
||||
public void testSeqNoIsSetOnPrimary() throws Exception {
|
||||
final String index = "test";
|
||||
final ShardId shardId = new ShardId(index, "_na_", 0);
|
||||
|
@ -738,8 +707,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
createReplicatedOperation(
|
||||
Request request,
|
||||
ActionListener<TransportReplicationAction.PrimaryResult<Request, TestResponse>> actionListener,
|
||||
TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference,
|
||||
boolean executeOnReplicas) {
|
||||
TransportReplicationAction<Request, Request, TestResponse>.PrimaryShardReference primaryShardReference) {
|
||||
assertIndexShardCounter(1);
|
||||
if (throwExceptionOnCreation) {
|
||||
throw new ElasticsearchException("simulated exception, during createReplicatedOperation");
|
||||
|
@ -1150,7 +1118,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
class NoopReplicationOperation extends ReplicationOperation<Request, Request, TestAction.PrimaryResult<Request, TestResponse>> {
|
||||
|
||||
NoopReplicationOperation(Request request, ActionListener<TestAction.PrimaryResult<Request, TestResponse>> listener) {
|
||||
super(request, null, listener, true, null, null, TransportReplicationActionTests.this.logger, "noop");
|
||||
super(request, null, listener, null, null, TransportReplicationActionTests.this.logger, "noop");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -79,22 +79,19 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
|
|||
false,
|
||||
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")),
|
||||
(foreground, pidFile, quiet, esSettings) -> {},
|
||||
"foo"
|
||||
);
|
||||
"foo");
|
||||
runTest(
|
||||
ExitCodes.USAGE,
|
||||
false,
|
||||
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo, bar]")),
|
||||
(foreground, pidFile, quiet, esSettings) -> {},
|
||||
"foo", "bar"
|
||||
);
|
||||
"foo", "bar");
|
||||
runTest(
|
||||
ExitCodes.USAGE,
|
||||
false,
|
||||
output -> assertThat(output, containsString("Positional arguments not allowed, found [foo]")),
|
||||
(foreground, pidFile, quiet, esSettings) -> {},
|
||||
"-E", "foo=bar", "foo", "-E", "baz=qux"
|
||||
);
|
||||
"-E", "foo=bar", "foo", "-E", "baz=qux");
|
||||
}
|
||||
|
||||
public void testThatPidFileCanBeConfigured() throws Exception {
|
||||
|
@ -157,18 +154,25 @@ public class ElasticsearchCliTests extends ESElasticsearchCliTestCase {
|
|||
assertThat(settings, hasEntry("foo", "bar"));
|
||||
assertThat(settings, hasEntry("baz", "qux"));
|
||||
},
|
||||
"-Efoo=bar", "-E", "baz=qux"
|
||||
);
|
||||
"-Efoo=bar", "-E", "baz=qux");
|
||||
}
|
||||
|
||||
public void testElasticsearchSettingCanNotBeEmpty() throws Exception {
|
||||
runTest(
|
||||
ExitCodes.USAGE,
|
||||
false,
|
||||
output -> assertThat(output, containsString("Setting [foo] must not be empty")),
|
||||
output -> assertThat(output, containsString("setting [foo] must not be empty")),
|
||||
(foreground, pidFile, quiet, esSettings) -> {},
|
||||
"-E", "foo="
|
||||
);
|
||||
"-E", "foo=");
|
||||
}
|
||||
|
||||
public void testElasticsearchSettingCanNotBeDuplicated() throws Exception {
|
||||
runTest(
|
||||
ExitCodes.USAGE,
|
||||
false,
|
||||
output -> assertThat(output, containsString("setting [foo] already set, saw [bar] and [baz]")),
|
||||
(foreground, pidFile, quiet, initialEnv) -> {},
|
||||
"-E", "foo=bar", "-E", "foo=baz");
|
||||
}
|
||||
|
||||
public void testUnknownOption() throws Exception {
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.bwcompat;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.range.Range;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
|
||||
import org.elasticsearch.search.sort.SortBuilders;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
||||
@ESIntegTestCase.SuiteScopeTestCase
|
||||
public class IpFieldBwCompatIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(InternalSettingsPlugin.class); // uses index.merge.enabled
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setupSuiteScopeCluster() throws Exception {
|
||||
assertAcked(prepareCreate("old_index")
|
||||
.setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_3_3.id)
|
||||
.addMapping("type", "ip_field", "type=ip"));
|
||||
assertAcked(prepareCreate("new_index")
|
||||
.addMapping("type", "ip_field", "type=ip"));
|
||||
|
||||
indexRandom(true,
|
||||
client().prepareIndex("old_index", "type", "1").setSource("ip_field", "127.0.0.1"),
|
||||
client().prepareIndex("new_index", "type", "1").setSource("ip_field", "127.0.0.1"),
|
||||
client().prepareIndex("new_index", "type", "2").setSource("ip_field", "::1"));
|
||||
}
|
||||
|
||||
public void testSort() {
|
||||
SearchResponse response = client().prepareSearch("old_index", "new_index")
|
||||
.addSort(SortBuilders.fieldSort("ip_field")).get();
|
||||
assertNoFailures(response);
|
||||
assertEquals(3, response.getHits().getTotalHits());
|
||||
assertEquals("::1", response.getHits().getAt(0).getSortValues()[0]);
|
||||
assertEquals("127.0.0.1", response.getHits().getAt(1).getSortValues()[0]);
|
||||
assertEquals("127.0.0.1", response.getHits().getAt(2).getSortValues()[0]);
|
||||
}
|
||||
|
||||
public void testRangeAgg() {
|
||||
SearchResponse response = client().prepareSearch("old_index", "new_index")
|
||||
.addAggregation(AggregationBuilders.ipRange("ip_range").field("ip_field")
|
||||
.addMaskRange("127.0.0.1/16")
|
||||
.addMaskRange("::1/64")).get();
|
||||
assertNoFailures(response);
|
||||
assertEquals(3, response.getHits().getTotalHits());
|
||||
Range range = response.getAggregations().get("ip_range");
|
||||
assertEquals(2, range.getBuckets().size());
|
||||
assertEquals("::1/64", range.getBuckets().get(0).getKeyAsString());
|
||||
assertEquals(3, range.getBuckets().get(0).getDocCount());
|
||||
assertEquals("127.0.0.1/16", range.getBuckets().get(1).getKeyAsString());
|
||||
assertEquals(2, range.getBuckets().get(1).getDocCount());
|
||||
}
|
||||
|
||||
public void testTermsAgg() {
|
||||
SearchResponse response = client().prepareSearch("old_index", "new_index")
|
||||
.addAggregation(AggregationBuilders.terms("ip_terms").field("ip_field")).get();
|
||||
assertNoFailures(response);
|
||||
assertEquals(3, response.getHits().getTotalHits());
|
||||
Terms terms = response.getAggregations().get("ip_terms");
|
||||
assertEquals(2, terms.getBuckets().size());
|
||||
assertEquals(2, terms.getBucketByKey("127.0.0.1").getDocCount());
|
||||
assertEquals(1, terms.getBucketByKey("::1").getDocCount());
|
||||
}
|
||||
}
|
|
@ -316,13 +316,11 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertEquals(numDocs, searchRsp.getHits().getTotalHits());
|
||||
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings(indexName).get();
|
||||
Version versionCreated = Version.fromId(Integer.parseInt(getSettingsResponse.getSetting(indexName, "index.version.created")));
|
||||
if (versionCreated.onOrAfter(Version.V_2_4_0)) {
|
||||
searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.existsQuery("field.with.dots"));
|
||||
searchRsp = searchReq.get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertEquals(numDocs, searchRsp.getHits().getTotalHits());
|
||||
}
|
||||
searchReq = client().prepareSearch(indexName)
|
||||
.setQuery(QueryBuilders.existsQuery("field.with.dots"));
|
||||
searchRsp = searchReq.get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertEquals(numDocs, searchRsp.getHits().getTotalHits());
|
||||
}
|
||||
|
||||
boolean findPayloadBoostInExplanation(Explanation expl) {
|
||||
|
|
|
@ -134,53 +134,6 @@ public class DiskUsageTests extends ESTestCase {
|
|||
assertEquals(test1Path.getParent().getParent().getParent().toAbsolutePath().toString(), routingToPath.get(test_1));
|
||||
}
|
||||
|
||||
public void testFillShardsWithShadowIndices() {
|
||||
final Index index = new Index("non-shadow", "0xcafe0000");
|
||||
ShardRouting s0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||
s0 = ShardRoutingHelper.initialize(s0, "node1");
|
||||
s0 = ShardRoutingHelper.moveToStarted(s0);
|
||||
Path i0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0");
|
||||
CommonStats commonStats0 = new CommonStats();
|
||||
commonStats0.store = new StoreStats(100);
|
||||
final Index index2 = new Index("shadow", "0xcafe0001");
|
||||
ShardRouting s1 = ShardRouting.newUnassigned(new ShardId(index2, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||
s1 = ShardRoutingHelper.initialize(s1, "node2");
|
||||
s1 = ShardRoutingHelper.moveToStarted(s1);
|
||||
Path i1Path = createTempDir().resolve("indices").resolve(index2.getUUID()).resolve("0");
|
||||
CommonStats commonStats1 = new CommonStats();
|
||||
commonStats1.store = new StoreStats(1000);
|
||||
ShardStats[] stats = new ShardStats[] {
|
||||
new ShardStats(s0, new ShardPath(false, i0Path, i0Path, s0.shardId()), commonStats0 , null, null),
|
||||
new ShardStats(s1, new ShardPath(false, i1Path, i1Path, s1.shardId()), commonStats1 , null, null)
|
||||
};
|
||||
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<ShardRouting, String> routingToPath = ImmutableOpenMap.builder();
|
||||
ClusterState state = ClusterState.builder(new ClusterName("blarg"))
|
||||
.version(0)
|
||||
.metaData(MetaData.builder()
|
||||
.put(IndexMetaData.builder("non-shadow")
|
||||
.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0000")
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0))
|
||||
.put(IndexMetaData.builder("shadow")
|
||||
.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "0xcafe0001")
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)))
|
||||
.build();
|
||||
logger.info("--> calling buildShardLevelInfo with state: {}", state);
|
||||
InternalClusterInfoService.buildShardLevelInfo(logger, stats, shardSizes, routingToPath, state);
|
||||
assertEquals(2, shardSizes.size());
|
||||
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s0)));
|
||||
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(s1)));
|
||||
assertEquals(100L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s0)).longValue());
|
||||
assertEquals(0L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(s1)).longValue());
|
||||
}
|
||||
|
||||
public void testFillDiskUsage() {
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvaiableUsages = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvaiableUsages = ImmutableOpenMap.builder();
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
public class ClusterNameExpressionResolverTests extends ESTestCase {
|
||||
|
||||
private ClusterNameExpressionResolver clusterNameResolver = new ClusterNameExpressionResolver(Settings.EMPTY);
|
||||
private static final Set<String> remoteClusters = new HashSet<>();
|
||||
|
||||
static {
|
||||
remoteClusters.add("cluster1");
|
||||
remoteClusters.add("cluster2");
|
||||
remoteClusters.add("totallyDifferent");
|
||||
}
|
||||
|
||||
public void testExactMatch() {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totallyDifferent");
|
||||
assertEquals(new HashSet<>(Arrays.asList("totallyDifferent")), new HashSet<>(clusters));
|
||||
}
|
||||
|
||||
public void testNoWildCardNoMatch() {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totallyDifferent2");
|
||||
assertTrue(clusters.isEmpty());
|
||||
}
|
||||
|
||||
public void testWildCardNoMatch() {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "totally*2");
|
||||
assertTrue(clusters.isEmpty());
|
||||
}
|
||||
|
||||
public void testSimpleWildCard() {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "*");
|
||||
assertEquals(new HashSet<>(Arrays.asList("cluster1", "cluster2", "totallyDifferent")), new HashSet<>(clusters));
|
||||
}
|
||||
|
||||
public void testSuffixWildCard() {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "cluster*");
|
||||
assertEquals(new HashSet<>(Arrays.asList("cluster1", "cluster2")), new HashSet<>(clusters));
|
||||
}
|
||||
|
||||
public void testPrefixWildCard() {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "*Different");
|
||||
assertEquals(new HashSet<>(Arrays.asList("totallyDifferent")), new HashSet<>(clusters));
|
||||
}
|
||||
|
||||
public void testMiddleWildCard() {
|
||||
List<String> clusters = clusterNameResolver.resolveClusterNames(remoteClusters, "clu*1");
|
||||
assertEquals(new HashSet<>(Arrays.asList("cluster1")), new HashSet<>(clusters));
|
||||
}
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.InputStreamReader;
|
||||
import java.util.Arrays;
|
||||
|
||||
public class RoutingBackwardCompatibilityTests extends ESTestCase {
|
||||
|
||||
public void testBackwardCompatibility() throws Exception {
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(RoutingBackwardCompatibilityTests.class
|
||||
.getResourceAsStream("/org/elasticsearch/cluster/routing/shard_routes.txt"), "UTF-8"))) {
|
||||
for (String line = reader.readLine(); line != null; line = reader.readLine()) {
|
||||
if (line.startsWith("#")) { // comment
|
||||
continue;
|
||||
}
|
||||
String[] parts = line.split("\t");
|
||||
assertEquals(Arrays.toString(parts), 7, parts.length);
|
||||
final String index = parts[0];
|
||||
final int numberOfShards = Integer.parseInt(parts[1]);
|
||||
final String type = parts[2];
|
||||
final String id = parts[3];
|
||||
final String routing = "null".equals(parts[4]) ? null : parts[4];
|
||||
final int pre20ExpectedShardId = Integer.parseInt(parts[5]); // not needed anymore - old hashing is gone
|
||||
final int currentExpectedShard = Integer.parseInt(parts[6]);
|
||||
|
||||
OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY,
|
||||
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
|
||||
for (Version version : VersionUtils.allReleasedVersions()) {
|
||||
if (version.onOrAfter(Version.V_2_0_0) == false) {
|
||||
// unsupported version, no need to test
|
||||
continue;
|
||||
}
|
||||
final Settings settings = settings(version).build();
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(settings).numberOfShards(numberOfShards)
|
||||
.numberOfReplicas(randomInt(3)).build();
|
||||
MetaData.Builder metaData = MetaData.builder().put(indexMetaData, false);
|
||||
RoutingTable routingTable = RoutingTable.builder().addAsNew(indexMetaData).build();
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
|
||||
.metaData(metaData).routingTable(routingTable).build();
|
||||
final int shardId = operationRouting.indexShards(clusterState, index, id, routing).shardId().getId();
|
||||
assertEquals(currentExpectedShard, shardId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -33,11 +33,14 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
|||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
@ -84,4 +87,69 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase {
|
|||
assertThat(shardRouting.currentNodeId(), equalTo("node2"));
|
||||
assertThat(shardRouting.relocatingNodeId(), nullValue());
|
||||
}
|
||||
|
||||
public void testRelocatingPrimariesWithInitializingReplicas() {
|
||||
AllocationService allocation = createAllocationService();
|
||||
|
||||
logger.info("--> building initial cluster state");
|
||||
AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing());
|
||||
AllocationId replicaId = AllocationId.newInitializing();
|
||||
boolean relocatingReplica = randomBoolean();
|
||||
if (relocatingReplica) {
|
||||
replicaId = AllocationId.newRelocation(replicaId);
|
||||
}
|
||||
|
||||
final IndexMetaData indexMetaData = IndexMetaData.builder("test")
|
||||
.settings(settings(Version.CURRENT))
|
||||
.numberOfShards(1).numberOfReplicas(1)
|
||||
.putInSyncAllocationIds(0,
|
||||
relocatingReplica ? Sets.newHashSet(primaryId.getId(), replicaId.getId()) : Sets.newHashSet(primaryId.getId()))
|
||||
.build();
|
||||
final Index index = indexMetaData.getIndex();
|
||||
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
|
||||
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4")))
|
||||
.metaData(MetaData.builder().put(indexMetaData, false));
|
||||
|
||||
final ShardRouting relocatingPrimary = TestShardRouting.newShardRouting(
|
||||
new ShardId(index, 0), "node1", "node2", true, ShardRoutingState.RELOCATING, primaryId);
|
||||
final ShardRouting replica = TestShardRouting.newShardRouting(
|
||||
new ShardId(index, 0), "node3", relocatingReplica ? "node4" : null, false,
|
||||
relocatingReplica ? ShardRoutingState.RELOCATING : ShardRoutingState.INITIALIZING, replicaId);
|
||||
|
||||
stateBuilder.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index)
|
||||
.addIndexShard(new IndexShardRoutingTable.Builder(relocatingPrimary.shardId())
|
||||
.addShard(relocatingPrimary)
|
||||
.addShard(replica)
|
||||
.build()))
|
||||
.build());
|
||||
|
||||
|
||||
ClusterState state = stateBuilder.build();
|
||||
|
||||
logger.info("--> test starting of relocating primary shard with initializing / relocating replica");
|
||||
ClusterState newState = allocation.applyStartedShards(state, Arrays.asList(relocatingPrimary.getTargetRelocatingShard()));
|
||||
assertNotEquals(newState, state);
|
||||
assertTrue(newState.routingTable().index("test").allPrimaryShardsActive());
|
||||
ShardRouting startedReplica = newState.routingTable().index("test").shard(0).replicaShards().get(0);
|
||||
if (relocatingReplica) {
|
||||
assertTrue(startedReplica.relocating());
|
||||
assertEquals(replica.currentNodeId(), startedReplica.currentNodeId());
|
||||
assertEquals(replica.relocatingNodeId(), startedReplica.relocatingNodeId());
|
||||
assertEquals(replica.allocationId().getId(), startedReplica.allocationId().getId());
|
||||
assertNotEquals(replica.allocationId().getRelocationId(), startedReplica.allocationId().getRelocationId());
|
||||
} else {
|
||||
assertTrue(startedReplica.initializing());
|
||||
assertEquals(replica.currentNodeId(), startedReplica.currentNodeId());
|
||||
assertNotEquals(replica.allocationId().getId(), startedReplica.allocationId().getId());
|
||||
}
|
||||
|
||||
logger.info("--> test starting of relocating primary shard together with initializing / relocating replica");
|
||||
List<ShardRouting> startedShards = new ArrayList<>();
|
||||
startedShards.add(relocatingPrimary.getTargetRelocatingShard());
|
||||
startedShards.add(relocatingReplica ? replica.getTargetRelocatingShard() : replica);
|
||||
Collections.shuffle(startedShards, random());
|
||||
newState = allocation.applyStartedShards(state, startedShards);
|
||||
assertNotEquals(newState, state);
|
||||
assertTrue(newState.routingTable().index("test").shard(0).allShardsStarted());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,149 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Map;
|
||||
|
||||
import org.elasticsearch.cli.Command;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class AddFileKeyStoreCommandTests extends KeyStoreCommandTestCase {
|
||||
@Override
|
||||
protected Command newCommand() {
|
||||
return new AddFileKeyStoreCommand() {
|
||||
@Override
|
||||
protected Environment createEnv(Terminal terminal, Map<String, String> settings) {
|
||||
return env;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private Path createRandomFile() throws IOException {
|
||||
int length = randomIntBetween(10, 20);
|
||||
byte[] bytes = new byte[length];
|
||||
for (int i = 0; i < length; ++i) {
|
||||
bytes[i] = randomByte();
|
||||
}
|
||||
Path file = env.configFile().resolve("randomfile");
|
||||
Files.write(file, bytes);
|
||||
return file;
|
||||
}
|
||||
|
||||
private void addFile(KeyStoreWrapper keystore, String setting, Path file) throws Exception {
|
||||
keystore.setFile(setting, Files.readAllBytes(file));
|
||||
keystore.save(env.configFile());
|
||||
}
|
||||
|
||||
public void testMissing() throws Exception {
|
||||
UserException e = expectThrows(UserException.class, this::execute);
|
||||
assertEquals(ExitCodes.DATA_ERROR, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("keystore not found"));
|
||||
}
|
||||
|
||||
public void testOverwritePromptDefault() throws Exception {
|
||||
Path file = createRandomFile();
|
||||
KeyStoreWrapper keystore = createKeystore("");
|
||||
addFile(keystore, "foo", file);
|
||||
terminal.addTextInput("");
|
||||
execute("foo", "path/dne");
|
||||
assertSecureFile("foo", file);
|
||||
}
|
||||
|
||||
public void testOverwritePromptExplicitNo() throws Exception {
|
||||
Path file = createRandomFile();
|
||||
KeyStoreWrapper keystore = createKeystore("");
|
||||
addFile(keystore, "foo", file);
|
||||
terminal.addTextInput("n"); // explicit no
|
||||
execute("foo", "path/dne");
|
||||
assertSecureFile("foo", file);
|
||||
}
|
||||
|
||||
public void testOverwritePromptExplicitYes() throws Exception {
|
||||
Path file1 = createRandomFile();
|
||||
KeyStoreWrapper keystore = createKeystore("");
|
||||
addFile(keystore, "foo", file1);
|
||||
terminal.addTextInput("y");
|
||||
Path file2 = createRandomFile();
|
||||
execute("foo", file2.toString());
|
||||
assertSecureFile("foo", file2);
|
||||
}
|
||||
|
||||
public void testOverwriteForceShort() throws Exception {
|
||||
Path file1 = createRandomFile();
|
||||
KeyStoreWrapper keystore = createKeystore("");
|
||||
addFile(keystore, "foo", file1);
|
||||
Path file2 = createRandomFile();
|
||||
execute("-f", "foo", file2.toString());
|
||||
assertSecureFile("foo", file2);
|
||||
}
|
||||
|
||||
public void testOverwriteForceLong() throws Exception {
|
||||
Path file1 = createRandomFile();
|
||||
KeyStoreWrapper keystore = createKeystore("");
|
||||
addFile(keystore, "foo", file1);
|
||||
Path file2 = createRandomFile();
|
||||
execute("--force", "foo", file2.toString());
|
||||
assertSecureFile("foo", file2);
|
||||
}
|
||||
|
||||
public void testForceNonExistent() throws Exception {
|
||||
createKeystore("");
|
||||
Path file = createRandomFile();
|
||||
execute("--force", "foo", file.toString());
|
||||
assertSecureFile("foo", file);
|
||||
}
|
||||
|
||||
public void testMissingSettingName() throws Exception {
|
||||
createKeystore("");
|
||||
UserException e = expectThrows(UserException.class, this::execute);
|
||||
assertEquals(ExitCodes.USAGE, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("Missing setting name"));
|
||||
}
|
||||
|
||||
public void testMissingFileName() throws Exception {
|
||||
createKeystore("");
|
||||
UserException e = expectThrows(UserException.class, () -> execute("foo"));
|
||||
assertEquals(ExitCodes.USAGE, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("Missing file name"));
|
||||
}
|
||||
|
||||
public void testFileDNE() throws Exception {
|
||||
createKeystore("");
|
||||
UserException e = expectThrows(UserException.class, () -> execute("foo", "path/dne"));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("File [path/dne] does not exist"));
|
||||
}
|
||||
|
||||
public void testExtraArguments() throws Exception {
|
||||
createKeystore("");
|
||||
Path file = createRandomFile();
|
||||
UserException e = expectThrows(UserException.class, () -> execute("foo", file.toString(), "bar"));
|
||||
assertEquals(e.getMessage(), ExitCodes.USAGE, e.exitCode);
|
||||
assertThat(e.getMessage(), containsString("Unrecognized extra arguments [bar]"));
|
||||
}
|
||||
}
|
|
@ -127,7 +127,7 @@ public class AddStringKeyStoreCommandTests extends KeyStoreCommandTestCase {
|
|||
assertEquals("String value must contain only ASCII", e.getMessage());
|
||||
}
|
||||
|
||||
public void testNpe() throws Exception {
|
||||
public void testMissingSettingName() throws Exception {
|
||||
createKeystore("");
|
||||
terminal.addTextInput("");
|
||||
UserException e = expectThrows(UserException.class, this::execute);
|
||||
|
|
|
@ -47,7 +47,7 @@ public class CreateKeyStoreCommandTests extends KeyStoreCommandTestCase {
|
|||
}
|
||||
|
||||
public void testNotPosix() throws Exception {
|
||||
setupEnv(false);
|
||||
env = setupEnv(false, fileSystems);
|
||||
execute();
|
||||
Path configDir = env.configFile();
|
||||
assertNotNull(KeyStoreWrapper.load(configDir));
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.FileSystems;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
|
@ -53,10 +55,10 @@ public abstract class KeyStoreCommandTestCase extends CommandTestCase {
|
|||
|
||||
@Before
|
||||
public void setupEnv() throws IOException {
|
||||
setupEnv(true); // default to posix, but tests may call setupEnv(false) to overwrite
|
||||
env = setupEnv(true, fileSystems); // default to posix, but tests may call setupEnv(false) to overwrite
|
||||
}
|
||||
|
||||
void setupEnv(boolean posix) throws IOException {
|
||||
static Environment setupEnv(boolean posix, List<FileSystem> fileSystems) throws IOException {
|
||||
final Configuration configuration;
|
||||
if (posix) {
|
||||
configuration = Configuration.unix().toBuilder().setAttributeViews("basic", "owner", "posix", "unix").build();
|
||||
|
@ -68,7 +70,7 @@ public abstract class KeyStoreCommandTestCase extends CommandTestCase {
|
|||
PathUtilsForTesting.installMock(fs); // restored by restoreFileSystem in ESTestCase
|
||||
Path home = fs.getPath("/", "test-home");
|
||||
Files.createDirectories(home.resolve("config"));
|
||||
env = new Environment(Settings.builder().put("path.home", home).build());
|
||||
return new Environment(Settings.builder().put("path.home", home).build());
|
||||
}
|
||||
|
||||
KeyStoreWrapper createKeystore(String password, String... settings) throws Exception {
|
||||
|
@ -94,4 +96,28 @@ public abstract class KeyStoreCommandTestCase extends CommandTestCase {
|
|||
void assertSecureString(KeyStoreWrapper keystore, String setting, String value) throws Exception {
|
||||
assertEquals(value, keystore.getString(setting).toString());
|
||||
}
|
||||
|
||||
void assertSecureFile(String setting, Path file) throws Exception {
|
||||
assertSecureFile(loadKeystore(""), setting, file);
|
||||
}
|
||||
|
||||
void assertSecureFile(KeyStoreWrapper keystore, String setting, Path file) throws Exception {
|
||||
byte[] expectedBytes = Files.readAllBytes(file);
|
||||
try (InputStream input = keystore.getFile(setting)) {
|
||||
for (int i = 0; i < expectedBytes.length; ++i) {
|
||||
int got = input.read();
|
||||
int expected = Byte.toUnsignedInt(expectedBytes[i]);
|
||||
if (got < 0) {
|
||||
fail("Got EOF from keystore stream at position " + i + " but expected 0x" + Integer.toHexString(expected));
|
||||
}
|
||||
assertEquals("Byte " + i, expected, got);
|
||||
}
|
||||
int eof = input.read();
|
||||
if (eof != -1) {
|
||||
fail("Found extra bytes in file stream from keystore, expected " + expectedBytes.length +
|
||||
" bytes but found 0x" + Integer.toHexString(eof));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
public class KeyStoreWrapperTests extends ESTestCase {
|
||||
|
||||
Environment env;
|
||||
List<FileSystem> fileSystems = new ArrayList<>();
|
||||
|
||||
@After
|
||||
public void closeMockFileSystems() throws IOException {
|
||||
IOUtils.close(fileSystems);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setupEnv() throws IOException {
|
||||
env = KeyStoreCommandTestCase.setupEnv(true, fileSystems);
|
||||
}
|
||||
|
||||
public void testFileSettingExhaustiveBytes() throws Exception {
|
||||
KeyStoreWrapper keystore = KeyStoreWrapper.create(new char[0]);
|
||||
byte[] bytes = new byte[256];
|
||||
for (int i = 0; i < 256; ++i) {
|
||||
bytes[i] = (byte)i;
|
||||
}
|
||||
keystore.setFile("foo", bytes);
|
||||
keystore.save(env.configFile());
|
||||
keystore = KeyStoreWrapper.load(env.configFile());
|
||||
keystore.decrypt(new char[0]);
|
||||
try (InputStream stream = keystore.getFile("foo")) {
|
||||
for (int i = 0; i < 256; ++i) {
|
||||
int got = stream.read();
|
||||
if (got < 0) {
|
||||
fail("Expected 256 bytes but read " + i);
|
||||
}
|
||||
assertEquals(i, got);
|
||||
}
|
||||
assertEquals(-1, stream.read()); // nothing left
|
||||
}
|
||||
}
|
||||
}
|
|
@ -68,14 +68,13 @@ public class IndexFolderUpgraderTests extends ESTestCase {
|
|||
public void testUpgradeCustomDataPath() throws IOException {
|
||||
Path customPath = createTempDir();
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean())
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build();
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
|
||||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
|
@ -97,14 +96,13 @@ public class IndexFolderUpgraderTests extends ESTestCase {
|
|||
public void testPartialUpgradeCustomDataPath() throws IOException {
|
||||
Path customPath = createTempDir();
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean())
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build();
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
|
||||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
|
@ -136,14 +134,13 @@ public class IndexFolderUpgraderTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testUpgrade() throws IOException {
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build();
|
||||
final Settings nodeSettings = Settings.EMPTY;
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
final Index index = new Index(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
|
||||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
|
@ -159,8 +156,7 @@ public class IndexFolderUpgraderTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testUpgradeIndices() throws IOException {
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build();
|
||||
final Settings nodeSettings = Settings.EMPTY;
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
Map<IndexSettings, Tuple<Integer, Integer>> indexSettingsMap = new HashMap<>();
|
||||
for (int i = 0; i < randomIntBetween(2, 5); i++) {
|
||||
|
@ -168,7 +164,7 @@ public class IndexFolderUpgraderTests extends ESTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_5_0_0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
|
|
|
@ -380,11 +380,10 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
assertThat("index paths uses the regular template",
|
||||
env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID())));
|
||||
|
||||
IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(),
|
||||
Settings.builder().put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false).build());
|
||||
IndexSettings s3 = new IndexSettings(s2.getIndexMetaData(), Settings.builder().build());
|
||||
|
||||
assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid)));
|
||||
assertThat(env.resolveCustomLocation(s3, sid), equalTo(PathUtils.get("/tmp/foo/" + index.getUUID() + "/0")));
|
||||
assertThat(env.resolveCustomLocation(s3, sid), equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0")));
|
||||
|
||||
assertThat("shard paths with a custom data_path should contain only regular paths",
|
||||
env.availableShardPaths(sid),
|
||||
|
|
|
@ -330,26 +330,10 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
final int numNodes = 2;
|
||||
|
||||
final List<String> nodes;
|
||||
if (randomBoolean()) {
|
||||
// test with a regular index
|
||||
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
||||
nodes = internalCluster().startNodes(numNodes);
|
||||
logger.info("--> create an index");
|
||||
createIndex(indexName);
|
||||
} else {
|
||||
// test with a shadow replica index
|
||||
final Path dataPath = createTempDir();
|
||||
logger.info("--> created temp data path for shadow replicas [{}]", dataPath);
|
||||
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put("node.add_lock_id_to_custom_path", false)
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath.toString())
|
||||
.put("index.store.fs.fs_lock", randomFrom("native", "simple"))
|
||||
.build();
|
||||
nodes = internalCluster().startNodes(numNodes, nodeSettings);
|
||||
logger.info("--> create a shadow replica index");
|
||||
createShadowReplicaIndex(indexName, dataPath, numNodes - 1);
|
||||
}
|
||||
logger.info("--> starting a cluster with " + numNodes + " nodes");
|
||||
nodes = internalCluster().startNodes(numNodes);
|
||||
logger.info("--> create an index");
|
||||
createIndex(indexName);
|
||||
|
||||
logger.info("--> waiting for green status");
|
||||
ensureGreen();
|
||||
|
@ -535,23 +519,4 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
+ ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey()));
|
||||
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a shadow replica index and asserts that the index creation was acknowledged.
|
||||
* Can only be invoked on a cluster where each node has been configured with shared data
|
||||
* paths and the other necessary settings for shadow replicas.
|
||||
*/
|
||||
private void createShadowReplicaIndex(final String name, final Path dataPath, final int numReplicas) {
|
||||
assert Files.exists(dataPath);
|
||||
assert numReplicas >= 0;
|
||||
final Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.build();
|
||||
assertAcked(prepareCreate(name).setSettings(idxSettings).get());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -72,16 +72,6 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
private final DiscoveryNode node3 = newNode("node3");
|
||||
private TestAllocator testAllocator;
|
||||
|
||||
|
||||
/**
|
||||
* needed due to random usage of {@link IndexMetaData#INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING}. removed once
|
||||
* shadow replicas are removed.
|
||||
*/
|
||||
@Override
|
||||
protected boolean enableWarningsCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void buildTestAllocator() {
|
||||
this.testAllocator = new TestAllocator();
|
||||
|
@ -401,79 +391,6 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation
|
||||
* deciders say yes, we allocate to that node.
|
||||
*/
|
||||
public void testRecoverOnAnyNode() {
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, "allocId", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.RED);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation
|
||||
* deciders say throttle, we add it to ignored shards.
|
||||
*/
|
||||
public void testRecoverOnAnyNodeThrottle() {
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(throttleAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, "allocId", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy but allocation
|
||||
* deciders say no, we still allocate to that node.
|
||||
*/
|
||||
public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() {
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, "allocId", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.RED);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when recovering using "recover_on_any_node" and we don't find a node with a shard copy we let
|
||||
* BalancedShardAllocator assign the shard
|
||||
*/
|
||||
public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() {
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, null, randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(false));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
|
||||
}
|
||||
|
||||
private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders, String... allocIds) {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true))
|
||||
.numberOfShards(1).numberOfReplicas(0).putInSyncAllocationIds(0, Sets.newHashSet(allocIds)))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
.addAsRestore(metaData.index(shardId.getIndex()), new SnapshotRecoverySource(new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID())), Version.CURRENT, shardId.getIndexName()))
|
||||
.build();
|
||||
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
|
||||
.metaData(metaData)
|
||||
.routingTable(routingTable)
|
||||
.nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build();
|
||||
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false);
|
||||
}
|
||||
|
||||
private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, UnassignedInfo.Reason reason,
|
||||
String... activeAllocationIds) {
|
||||
MetaData metaData = MetaData.builder()
|
||||
|
|
|
@ -65,6 +65,8 @@ import static java.util.Collections.unmodifiableMap;
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
private static final org.apache.lucene.util.Version MIN_SUPPORTED_LUCENE_VERSION = org.elasticsearch.Version.CURRENT
|
||||
.minimumIndexCompatibilityVersion().luceneVersion;
|
||||
private final ShardId shardId = new ShardId("test", "_na_", 0);
|
||||
private final DiscoveryNode node1 = newNode("node1");
|
||||
private final DiscoveryNode node2 = newNode("node2");
|
||||
|
@ -119,8 +121,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
public void testSimpleFullMatchAllocation() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
|
||||
|
@ -132,8 +134,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
public void testSyncIdMatch() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(nodeToMatch, "MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM" ,MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
|
||||
|
@ -145,8 +147,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
public void testFileChecksumMatch() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(nodeToMatch, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(nodeToMatch, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId()));
|
||||
|
@ -160,7 +162,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
*/
|
||||
public void testNoPrimaryData() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
|
||||
|
@ -172,7 +174,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
*/
|
||||
public void testNoDataForReplicaOnAnyNode() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
|
||||
|
@ -184,8 +186,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
*/
|
||||
public void testNoMatchingFilesForReplicaOnAnyNode() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
|
||||
|
@ -197,8 +199,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
*/
|
||||
public void testNoOrThrottleDecidersRemainsInUnassigned() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
|
@ -222,8 +224,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
return Decision.YES;
|
||||
}
|
||||
})));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
|
@ -231,8 +233,9 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
|
||||
public void testDelayedAllocation() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(),
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))
|
||||
.build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
if (randomBoolean()) {
|
||||
// we sometime return empty list of files, make sure we test this as well
|
||||
testAllocator.addData(node2, null);
|
||||
|
@ -244,7 +247,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
|
||||
allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(),
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
|
@ -253,9 +256,9 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
|
||||
public void testCancelRecoveryBetterSyncId() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node3, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node3, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.processExistingRecoveries(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
|
@ -264,9 +267,10 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
|
||||
public void testNotCancellingRecoveryIfSyncedOnExistingRecovery() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node3, randomBoolean() ? "MATCH" : "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node3, randomBoolean() ? "MATCH" : "NO_MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM",
|
||||
MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.processExistingRecoveries(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(false));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0));
|
||||
|
@ -274,8 +278,8 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
|||
|
||||
public void testNotCancellingRecovery() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1ReplicaRecovering(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.processExistingRecoveries(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(false));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(0));
|
||||
|
|
|
@ -47,36 +47,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
|
|||
|
||||
/** Unit test(s) for IndexService */
|
||||
public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||
public void testDetermineShadowEngineShouldBeUsed() {
|
||||
IndexSettings regularSettings = new IndexSettings(
|
||||
IndexMetaData
|
||||
.builder("regular")
|
||||
.settings(Settings.builder()
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.build())
|
||||
.build(),
|
||||
Settings.EMPTY);
|
||||
|
||||
IndexSettings shadowSettings = new IndexSettings(
|
||||
IndexMetaData
|
||||
.builder("shadow")
|
||||
.settings(Settings.builder()
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.build())
|
||||
.build(),
|
||||
Settings.EMPTY);
|
||||
|
||||
assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(true, regularSettings));
|
||||
assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(false, regularSettings));
|
||||
assertFalse("no shadow replicas for primary shard with shadow settings", IndexService.useShadowEngine(true, shadowSettings));
|
||||
assertTrue("shadow replicas for replica shards with shadow settings",IndexService.useShadowEngine(false, shadowSettings));
|
||||
}
|
||||
|
||||
public static CompressedXContent filter(QueryBuilder filterBuilder) throws IOException {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
filterBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
|
|
|
@ -1,905 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShadowIndexShard;
|
||||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
||||
/**
|
||||
* Tests for indices that use shadow replicas and a shared filesystem
|
||||
*/
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||
public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
||||
|
||||
private Settings nodeSettings(Path dataPath) {
|
||||
return nodeSettings(dataPath.toString());
|
||||
}
|
||||
|
||||
private Settings nodeSettings(String dataPath) {
|
||||
return Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.getKey(), false)
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath)
|
||||
.put(FsDirectoryService.INDEX_LOCK_FACTOR_SETTING.getKey(), randomFrom("native", "simple"))
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(MockTransportService.TestPlugin.class);
|
||||
}
|
||||
|
||||
public void testCannotCreateWithBadPath() throws Exception {
|
||||
Settings nodeSettings = nodeSettings("/badpath");
|
||||
internalCluster().startNodes(1, nodeSettings);
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, "/etc/foo")
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
|
||||
try {
|
||||
assertAcked(prepareCreate("foo").setSettings(idxSettings));
|
||||
fail("should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertTrue(e.getMessage(),
|
||||
e.getMessage().contains("custom path [/etc/foo] is not a sub-path of path.shared_data"));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests the case where we create an index without shadow replicas, snapshot it and then restore into
|
||||
* an index with shadow replicas enabled.
|
||||
*/
|
||||
public void testRestoreToShadow() throws ExecutionException, InterruptedException {
|
||||
final Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodes(3, nodeSettings);
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
|
||||
assertAcked(prepareCreate("foo").setSettings(idxSettings));
|
||||
ensureGreen();
|
||||
final int numDocs = randomIntBetween(10, 100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
client().prepareIndex("foo", "doc", ""+i).setSource("foo", "bar").get();
|
||||
}
|
||||
assertNoFailures(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
|
||||
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("fs").setSettings(Settings.builder()
|
||||
.put("location", randomRepoPath())));
|
||||
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("foo").get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||
assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||
|
||||
Settings shadowSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2).build();
|
||||
|
||||
logger.info("--> restore the index into shadow replica index");
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap")
|
||||
.setIndexSettings(shadowSettings).setWaitForCompletion(true)
|
||||
.setRenamePattern("(.+)").setRenameReplacement("$1-copy")
|
||||
.execute().actionGet();
|
||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
ensureGreen();
|
||||
refresh();
|
||||
Index index = resolveIndex("foo-copy");
|
||||
for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
|
||||
|
||||
if (service.hasIndex(index)) {
|
||||
IndexShard shard = service.indexServiceSafe(index).getShardOrNull(0);
|
||||
if (shard.routingEntry().primary()) {
|
||||
assertFalse(shard instanceof ShadowIndexShard);
|
||||
} else {
|
||||
assertTrue(shard instanceof ShadowIndexShard);
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.info("--> performing query");
|
||||
SearchResponse resp = client().prepareSearch("foo-copy").setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, numDocs);
|
||||
|
||||
}
|
||||
|
||||
@TestLogging("org.elasticsearch.gateway:TRACE")
|
||||
public void testIndexWithFewDocuments() throws Exception {
|
||||
final Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodes(3, nodeSettings);
|
||||
final String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
// So basically, the primary should fail and the replica will need to
|
||||
// replay the translog, this is what this tests
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
|
||||
IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats(IDX).clear().setTranslog(true).get();
|
||||
assertEquals(2, indicesStatsResponse.getIndex(IDX).getPrimaries().getTranslog().estimatedNumberOfOperations());
|
||||
assertEquals(2, indicesStatsResponse.getIndex(IDX).getTotal().getTranslog().estimatedNumberOfOperations());
|
||||
Index index = resolveIndex(IDX);
|
||||
for (IndicesService service : internalCluster().getInstances(IndicesService.class)) {
|
||||
IndexService indexService = service.indexService(index);
|
||||
if (indexService != null) {
|
||||
IndexShard shard = indexService.getShard(0);
|
||||
TranslogStats translogStats = shard.translogStats();
|
||||
assertTrue(translogStats != null || shard instanceof ShadowIndexShard);
|
||||
if (translogStats != null) {
|
||||
assertEquals(2, translogStats.estimatedNumberOfOperations());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that we can get doc 1 and 2, because we are doing realtime
|
||||
// gets and getting from the primary
|
||||
GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
|
||||
GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
|
||||
flushAndRefresh(IDX);
|
||||
client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
|
||||
refresh();
|
||||
|
||||
// Check that we can get doc 1 and 2 without realtime
|
||||
gResp1 = client().prepareGet(IDX, "doc", "1").setRealtime(false).get();
|
||||
gResp2 = client().prepareGet(IDX, "doc", "2").setRealtime(false).get();
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
|
||||
logger.info("--> restarting all nodes");
|
||||
if (randomBoolean()) {
|
||||
logger.info("--> rolling restart");
|
||||
internalCluster().rollingRestart();
|
||||
} else {
|
||||
logger.info("--> full restart");
|
||||
internalCluster().fullRestart();
|
||||
}
|
||||
|
||||
client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
|
||||
ensureGreen(IDX);
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
logger.info("--> performing query");
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, 4);
|
||||
|
||||
logger.info("--> deleting index");
|
||||
assertAcked(client().admin().indices().prepareDelete(IDX));
|
||||
}
|
||||
|
||||
public void testReplicaToPrimaryPromotion() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
String node1 = internalCluster().startNode(nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
|
||||
GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
|
||||
GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
|
||||
assertTrue(gResp1.isExists());
|
||||
assertTrue(gResp2.isExists());
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
|
||||
// Node1 has the primary, now node2 has the replica
|
||||
internalCluster().startNode(nodeSettings);
|
||||
ensureGreen(IDX);
|
||||
client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
logger.info("--> stopping node1 [{}]", node1);
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1));
|
||||
ensureClusterSizeConsistency(); // wait for the new node to be elected and process the node leave
|
||||
ensureYellow(IDX);
|
||||
|
||||
logger.info("--> performing query");
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, 2);
|
||||
|
||||
gResp1 = client().prepareGet(IDX, "doc", "1").get();
|
||||
gResp2 = client().prepareGet(IDX, "doc", "2").get();
|
||||
assertTrue(gResp1.isExists());
|
||||
assertTrue(gResp2.toString(), gResp2.isExists());
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "foobar").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "foobar").get();
|
||||
gResp1 = client().prepareGet(IDX, "doc", "1").get();
|
||||
gResp2 = client().prepareGet(IDX, "doc", "2").get();
|
||||
assertTrue(gResp1.isExists());
|
||||
assertTrue(gResp2.toString(), gResp2.isExists());
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("foobar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("foobar"));
|
||||
}
|
||||
|
||||
public void testPrimaryRelocation() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
String node1 = internalCluster().startNode(nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
|
||||
GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
|
||||
GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
|
||||
assertTrue(gResp1.isExists());
|
||||
assertTrue(gResp2.isExists());
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
|
||||
// Node1 has the primary, now node2 has the replica
|
||||
String node2 = internalCluster().startNode(nodeSettings);
|
||||
ensureGreen(IDX);
|
||||
client().admin().cluster().prepareHealth().setWaitForNodes("2").get();
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
// now prevent primary from being allocated on node 1 move to node_3
|
||||
String node3 = internalCluster().startNode(nodeSettings);
|
||||
Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
|
||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
|
||||
|
||||
ensureGreen(IDX);
|
||||
// check if primary has relocated to node3
|
||||
assertEquals(internalCluster().clusterService(node3).localNode().getId(),
|
||||
client().admin().cluster().prepareState().get().getState().routingTable().index(IDX).shard(0).primaryShard().currentNodeId());
|
||||
logger.info("--> performing query");
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, 2);
|
||||
|
||||
gResp1 = client().prepareGet(IDX, "doc", "1").get();
|
||||
gResp2 = client().prepareGet(IDX, "doc", "2").get();
|
||||
assertTrue(gResp1.isExists());
|
||||
assertTrue(gResp2.toString(), gResp2.isExists());
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
|
||||
client().prepareIndex(IDX, "doc", "3").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "4").setSource("foo", "bar").get();
|
||||
gResp1 = client().prepareGet(IDX, "doc", "3").setPreference("_primary").get();
|
||||
gResp2 = client().prepareGet(IDX, "doc", "4").setPreference("_primary").get();
|
||||
assertTrue(gResp1.isExists());
|
||||
assertTrue(gResp2.isExists());
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
}
|
||||
|
||||
public void testPrimaryRelocationWithConcurrentIndexing() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
String node1 = internalCluster().startNode(nodeSettings);
|
||||
final String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
// Node1 has the primary, now node2 has the replica
|
||||
String node2 = internalCluster().startNode(nodeSettings);
|
||||
ensureGreen(IDX);
|
||||
flushAndRefresh(IDX);
|
||||
String node3 = internalCluster().startNode(nodeSettings);
|
||||
final AtomicInteger counter = new AtomicInteger(0);
|
||||
final CountDownLatch started = new CountDownLatch(1);
|
||||
|
||||
final int numPhase1Docs = scaledRandomIntBetween(25, 200);
|
||||
final int numPhase2Docs = scaledRandomIntBetween(25, 200);
|
||||
final CountDownLatch phase1finished = new CountDownLatch(1);
|
||||
final CountDownLatch phase2finished = new CountDownLatch(1);
|
||||
final CopyOnWriteArrayList<Exception> exceptions = new CopyOnWriteArrayList<>();
|
||||
Thread thread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
started.countDown();
|
||||
while (counter.get() < (numPhase1Docs + numPhase2Docs)) {
|
||||
try {
|
||||
final IndexResponse indexResponse = client().prepareIndex(IDX, "doc",
|
||||
Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get();
|
||||
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
|
||||
} catch (Exception e) {
|
||||
exceptions.add(e);
|
||||
}
|
||||
final int docCount = counter.get();
|
||||
if (docCount == numPhase1Docs) {
|
||||
phase1finished.countDown();
|
||||
}
|
||||
}
|
||||
logger.info("--> stopping indexing thread");
|
||||
phase2finished.countDown();
|
||||
}
|
||||
};
|
||||
thread.start();
|
||||
started.await();
|
||||
phase1finished.await(); // wait for a certain number of documents to be indexed
|
||||
logger.info("--> excluding {} from allocation", node1);
|
||||
// now prevent primary from being allocated on node 1 move to node_3
|
||||
Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
|
||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
|
||||
// wait for more documents to be indexed post-recovery, also waits for
|
||||
// indexing thread to stop
|
||||
phase2finished.await();
|
||||
ExceptionsHelper.rethrowAndSuppress(exceptions);
|
||||
ensureGreen(IDX);
|
||||
thread.join();
|
||||
logger.info("--> performing query");
|
||||
flushAndRefresh();
|
||||
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, counter.get());
|
||||
assertHitCount(resp, numPhase1Docs + numPhase2Docs);
|
||||
}
|
||||
|
||||
public void testPrimaryRelocationWhereRecoveryFails() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = Settings.builder()
|
||||
.put("node.add_lock_id_to_custom_path", false)
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), dataPath)
|
||||
.build();
|
||||
|
||||
String node1 = internalCluster().startNode(nodeSettings);
|
||||
final String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
// Node1 has the primary, now node2 has the replica
|
||||
String node2 = internalCluster().startNode(nodeSettings);
|
||||
ensureGreen(IDX);
|
||||
flushAndRefresh(IDX);
|
||||
String node3 = internalCluster().startNode(nodeSettings);
|
||||
final AtomicInteger counter = new AtomicInteger(0);
|
||||
final CountDownLatch started = new CountDownLatch(1);
|
||||
|
||||
final int numPhase1Docs = scaledRandomIntBetween(25, 200);
|
||||
final int numPhase2Docs = scaledRandomIntBetween(25, 200);
|
||||
final int numPhase3Docs = scaledRandomIntBetween(25, 200);
|
||||
final CountDownLatch phase1finished = new CountDownLatch(1);
|
||||
final CountDownLatch phase2finished = new CountDownLatch(1);
|
||||
final CountDownLatch phase3finished = new CountDownLatch(1);
|
||||
|
||||
final AtomicBoolean keepFailing = new AtomicBoolean(true);
|
||||
|
||||
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, node1));
|
||||
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, node3),
|
||||
new MockTransportService.DelegateTransport(mockTransportService.original()) {
|
||||
|
||||
@Override
|
||||
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request,
|
||||
TransportRequestOptions options) throws IOException {
|
||||
if (keepFailing.get() && action.equals(PeerRecoveryTargetService.Actions.TRANSLOG_OPS)) {
|
||||
logger.info("--> failing translog ops");
|
||||
throw new ElasticsearchException("failing on purpose");
|
||||
}
|
||||
super.sendRequest(connection, requestId, action, request, options);
|
||||
}
|
||||
});
|
||||
|
||||
Thread thread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
started.countDown();
|
||||
while (counter.get() < (numPhase1Docs + numPhase2Docs + numPhase3Docs)) {
|
||||
final IndexResponse indexResponse = client().prepareIndex(IDX, "doc",
|
||||
Integer.toString(counter.incrementAndGet())).setSource("foo", "bar").get();
|
||||
assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
|
||||
final int docCount = counter.get();
|
||||
if (docCount == numPhase1Docs) {
|
||||
phase1finished.countDown();
|
||||
} else if (docCount == (numPhase1Docs + numPhase2Docs)) {
|
||||
phase2finished.countDown();
|
||||
}
|
||||
}
|
||||
logger.info("--> stopping indexing thread");
|
||||
phase3finished.countDown();
|
||||
}
|
||||
};
|
||||
thread.start();
|
||||
started.await();
|
||||
phase1finished.await(); // wait for a certain number of documents to be indexed
|
||||
logger.info("--> excluding {} from allocation", node1);
|
||||
// now prevent primary from being allocated on node 1 move to node_3
|
||||
Settings build = Settings.builder().put("index.routing.allocation.exclude._name", node1).build();
|
||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(build).execute().actionGet();
|
||||
// wait for more documents to be indexed post-recovery, also waits for
|
||||
// indexing thread to stop
|
||||
phase2finished.await();
|
||||
// stop failing
|
||||
keepFailing.set(false);
|
||||
// wait for more docs to be indexed
|
||||
phase3finished.await();
|
||||
ensureGreen(IDX);
|
||||
thread.join();
|
||||
logger.info("--> performing query");
|
||||
flushAndRefresh();
|
||||
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, counter.get());
|
||||
}
|
||||
|
||||
public void testIndexWithShadowReplicasCleansUp() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
final int nodeCount = randomIntBetween(2, 5);
|
||||
logger.info("--> starting {} nodes", nodeCount);
|
||||
final List<String> nodes = internalCluster().startNodes(nodeCount, nodeSettings);
|
||||
final String IDX = "test";
|
||||
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(nodeCount);
|
||||
final int numPrimaries = numPrimariesAndReplicas.v1();
|
||||
final int numReplicas = numPrimariesAndReplicas.v2();
|
||||
logger.info("--> creating index {} with {} primary shards and {} replicas", IDX, numPrimaries, numReplicas);
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
GetResponse gResp1 = client().prepareGet(IDX, "doc", "1").get();
|
||||
GetResponse gResp2 = client().prepareGet(IDX, "doc", "2").get();
|
||||
assertThat(gResp1.getSource().get("foo"), equalTo("bar"));
|
||||
assertThat(gResp2.getSource().get("foo"), equalTo("bar"));
|
||||
|
||||
logger.info("--> performing query");
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, 2);
|
||||
|
||||
logger.info("--> deleting index " + IDX);
|
||||
assertAcked(client().admin().indices().prepareDelete(IDX));
|
||||
assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class));
|
||||
assertPathHasBeenCleared(dataPath);
|
||||
//TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved.
|
||||
//assertIndicesDirsDeleted(nodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that shadow replicas can be "naturally" rebalanced and relocated
|
||||
* around the cluster. By "naturally" I mean without using the reroute API
|
||||
*/
|
||||
// This test failed on CI when trying to assert that all the shard data has been deleted
|
||||
// from the index path. It has not been reproduced locally. Despite the IndicesService
|
||||
// deleting the index and hence, deleting all the shard data for the index, the test
|
||||
// failure still showed some Lucene files in the data directory for that index. Not sure
|
||||
// why that is, so turning on more logging here.
|
||||
@TestLogging("org.elasticsearch.indices:TRACE,org.elasticsearch.env:TRACE,_root:DEBUG")
|
||||
public void testShadowReplicaNaturalRelocation() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
final List<String> nodes = internalCluster().startNodes(2, nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
int docCount = randomIntBetween(10, 100);
|
||||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
for (int i = 0; i < docCount; i++) {
|
||||
builders.add(client().prepareIndex(IDX, "doc", i + "").setSource("foo", "bar"));
|
||||
}
|
||||
indexRandom(true, true, true, builders);
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
// start a third node, with 5 shards each on the other nodes, they
|
||||
// should relocate some to the third node
|
||||
final String node3 = internalCluster().startNode(nodeSettings);
|
||||
nodes.add(node3);
|
||||
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
|
||||
ClusterStateResponse resp = client().admin().cluster().prepareState().get();
|
||||
RoutingNodes nodes = resp.getState().getRoutingNodes();
|
||||
for (RoutingNode node : nodes) {
|
||||
logger.info("--> node has {} shards (needs at least 2)", node.numberOfOwningShards());
|
||||
assertThat("at least 2 shards on node", node.numberOfOwningShards(), greaterThanOrEqualTo(2));
|
||||
}
|
||||
}
|
||||
});
|
||||
ensureYellow(IDX);
|
||||
|
||||
logger.info("--> performing query");
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).get();
|
||||
assertHitCount(resp, docCount);
|
||||
|
||||
assertAcked(client().admin().indices().prepareDelete(IDX));
|
||||
assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class));
|
||||
assertPathHasBeenCleared(dataPath);
|
||||
//TODO: uncomment the test below when https://github.com/elastic/elasticsearch/issues/17695 is resolved.
|
||||
//assertIndicesDirsDeleted(nodes);
|
||||
}
|
||||
|
||||
public void testShadowReplicasUsingFieldData() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodes(3, nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get();
|
||||
client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get();
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
SearchResponse resp = client().prepareSearch(IDX).setQuery(matchAllQuery()).addDocValueField("foo").addSort("foo", SortOrder.ASC).get();
|
||||
assertHitCount(resp, 4);
|
||||
assertOrderedSearchHits(resp, "2", "3", "4", "1");
|
||||
SearchHit[] hits = resp.getHits().getHits();
|
||||
assertThat(hits[0].field("foo").getValue().toString(), equalTo("bar"));
|
||||
assertThat(hits[1].field("foo").getValue().toString(), equalTo("baz"));
|
||||
assertThat(hits[2].field("foo").getValue().toString(), equalTo("eggplant"));
|
||||
assertThat(hits[3].field("foo").getValue().toString(), equalTo("foo"));
|
||||
}
|
||||
|
||||
/** wait until none of the nodes have shards allocated on them */
|
||||
private void assertNoShardsOn(final List<String> nodeList) throws Exception {
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ClusterStateResponse resp = client().admin().cluster().prepareState().get();
|
||||
RoutingNodes nodes = resp.getState().getRoutingNodes();
|
||||
for (RoutingNode node : nodes) {
|
||||
logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards());
|
||||
if (nodeList.contains(node.node().getName())) {
|
||||
assertThat("no shards on node", node.numberOfOwningShards(), equalTo(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
/** wait until the node has the specified number of shards allocated on it */
|
||||
private void assertShardCountOn(final String nodeName, final int shardCount) throws Exception {
|
||||
assertBusy(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
ClusterStateResponse resp = client().admin().cluster().prepareState().get();
|
||||
RoutingNodes nodes = resp.getState().getRoutingNodes();
|
||||
for (RoutingNode node : nodes) {
|
||||
logger.info("--> node {} has {} shards", node.node().getName(), node.numberOfOwningShards());
|
||||
if (nodeName.equals(node.node().getName())) {
|
||||
assertThat(node.numberOfOwningShards(), equalTo(shardCount));
|
||||
}
|
||||
}
|
||||
}
|
||||
}, 1, TimeUnit.MINUTES);
|
||||
}
|
||||
|
||||
public void testIndexOnSharedFSRecoversToAnyNode() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
Settings fooSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "foo").build();
|
||||
Settings barSettings = Settings.builder().put(nodeSettings).put("node.attr.affinity", "bar").build();
|
||||
|
||||
List<String> allNodes = internalCluster().startNodes(fooSettings, fooSettings, barSettings, barSettings);
|
||||
List<String> fooNodes = allNodes.subList(0, 2);
|
||||
List<String> barNodes = allNodes.subList(2, 4);
|
||||
String IDX = "test";
|
||||
|
||||
Settings includeFoo = Settings.builder()
|
||||
.put("index.routing.allocation.include.affinity", "foo")
|
||||
.build();
|
||||
Settings includeBar = Settings.builder()
|
||||
.put("index.routing.allocation.include.affinity", "bar")
|
||||
.build();
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true)
|
||||
.put(includeFoo) // start with requiring the shards on "foo"
|
||||
.build();
|
||||
|
||||
// only one node, so all primaries will end up on node1
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=keyword").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
// Index some documents
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "foo").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "3").setSource("foo", "baz").get();
|
||||
client().prepareIndex(IDX, "doc", "4").setSource("foo", "eggplant").get();
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
// put shards on "bar"
|
||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeBar).get();
|
||||
|
||||
// wait for the shards to move from "foo" nodes to "bar" nodes
|
||||
assertNoShardsOn(fooNodes);
|
||||
|
||||
// put shards back on "foo"
|
||||
client().admin().indices().prepareUpdateSettings(IDX).setSettings(includeFoo).get();
|
||||
|
||||
// wait for the shards to move from "bar" nodes to "foo" nodes
|
||||
assertNoShardsOn(barNodes);
|
||||
|
||||
// Stop a foo node
|
||||
logger.info("--> stopping first 'foo' node");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(0)));
|
||||
|
||||
// Ensure that the other foo node has all the shards now
|
||||
assertShardCountOn(fooNodes.get(1), 5);
|
||||
|
||||
// Assert no shards on the "bar" nodes
|
||||
assertNoShardsOn(barNodes);
|
||||
|
||||
// Stop the second "foo" node
|
||||
logger.info("--> stopping second 'foo' node");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(fooNodes.get(1)));
|
||||
|
||||
// The index should still be able to be allocated (on the "bar" nodes),
|
||||
// all the "foo" nodes are gone
|
||||
ensureGreen(IDX);
|
||||
|
||||
// Start another "foo" node and make sure the index moves back
|
||||
logger.info("--> starting additional 'foo' node");
|
||||
String newFooNode = internalCluster().startNode(fooSettings);
|
||||
|
||||
assertShardCountOn(newFooNode, 5);
|
||||
assertNoShardsOn(barNodes);
|
||||
}
|
||||
|
||||
public void testDeletingClosedIndexRemovesFiles() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath.getParent());
|
||||
|
||||
final int numNodes = randomIntBetween(2, 5);
|
||||
logger.info("--> starting {} nodes", numNodes);
|
||||
final List<String> nodes = internalCluster().startNodes(numNodes, nodeSettings);
|
||||
final String IDX = "test";
|
||||
final Tuple<Integer, Integer> numPrimariesAndReplicas = randomPrimariesAndReplicas(numNodes);
|
||||
final int numPrimaries = numPrimariesAndReplicas.v1();
|
||||
final int numReplicas = numPrimariesAndReplicas.v2();
|
||||
logger.info("--> creating index {} with {} primary shards and {} replicas", IDX, numPrimaries, numReplicas);
|
||||
|
||||
assert numPrimaries > 0;
|
||||
assert numReplicas >= 0;
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numPrimaries)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numReplicas)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
int docCount = randomIntBetween(10, 100);
|
||||
List<IndexRequestBuilder> builders = new ArrayList<>();
|
||||
for (int i = 0; i < docCount; i++) {
|
||||
builders.add(client().prepareIndex(IDX, "doc", i + "").setSource("foo", "bar"));
|
||||
}
|
||||
indexRandom(true, true, true, builders);
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
logger.info("--> closing index {}", IDX);
|
||||
client().admin().indices().prepareClose(IDX).get();
|
||||
ensureGreen(IDX);
|
||||
|
||||
logger.info("--> deleting closed index");
|
||||
client().admin().indices().prepareDelete(IDX).get();
|
||||
assertAllIndicesRemovedAndDeletionCompleted(internalCluster().getInstances(IndicesService.class));
|
||||
assertPathHasBeenCleared(dataPath);
|
||||
assertIndicesDirsDeleted(nodes);
|
||||
}
|
||||
|
||||
public void testNodeJoinsWithoutShadowReplicaConfigured() throws Exception {
|
||||
Path dataPath = createTempDir();
|
||||
Settings nodeSettings = nodeSettings(dataPath);
|
||||
|
||||
internalCluster().startNodes(2, nodeSettings);
|
||||
String IDX = "test";
|
||||
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.build();
|
||||
|
||||
prepareCreate(IDX).setSettings(idxSettings).addMapping("doc", "foo", "type=text").get();
|
||||
|
||||
client().prepareIndex(IDX, "doc", "1").setSource("foo", "bar").get();
|
||||
client().prepareIndex(IDX, "doc", "2").setSource("foo", "bar").get();
|
||||
flushAndRefresh(IDX);
|
||||
|
||||
internalCluster().startNodes(1);
|
||||
ensureYellow(IDX);
|
||||
|
||||
final ClusterHealthResponse clusterHealth = client().admin().cluster()
|
||||
.prepareHealth()
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.execute()
|
||||
.actionGet();
|
||||
assertThat(clusterHealth.getNumberOfNodes(), equalTo(3));
|
||||
// the new node is not configured for a shadow replica index, so no shards should have been assigned to it
|
||||
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
}
|
||||
|
||||
private static void assertIndicesDirsDeleted(final List<String> nodes) throws IOException {
|
||||
for (String node : nodes) {
|
||||
final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class, node);
|
||||
assertThat(nodeEnv.availableIndexFolders(), equalTo(Collections.emptySet()));
|
||||
}
|
||||
}
|
||||
|
||||
private static Tuple<Integer, Integer> randomPrimariesAndReplicas(final int numNodes) {
|
||||
final int numPrimaries;
|
||||
final int numReplicas;
|
||||
if (randomBoolean()) {
|
||||
// test with some nodes having no shards
|
||||
numPrimaries = 1;
|
||||
numReplicas = randomIntBetween(0, numNodes - 2);
|
||||
} else {
|
||||
// test with all nodes having at least one shard
|
||||
numPrimaries = randomIntBetween(1, 5);
|
||||
numReplicas = numNodes - 1;
|
||||
}
|
||||
return Tuple.tuple(numPrimaries, numReplicas);
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue