Merge branch 'master' into tests

This commit is contained in:
Jack Conradson 2016-02-02 11:48:27 -08:00
commit 58ad172492
80 changed files with 1318 additions and 722 deletions

View File

@ -254,7 +254,11 @@ public class Version {
public static final int V_1_7_3_ID = 1070399;
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_4_ID = 1070499;
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_4 = new Version(V_1_7_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_5_ID = 1070599;
public static final Version V_1_7_5 = new Version(V_1_7_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_6_ID = 1070699;
public static final Version V_1_7_6 = new Version(V_1_7_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
@ -275,9 +279,13 @@ public class Version {
public static final int V_2_1_1_ID = 2010199;
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_2_ID = 2010299;
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_3_ID = 2010399;
public static final Version V_2_1_3 = new Version(V_2_1_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_2_2_1_ID = 2020199;
public static final Version V_2_2_1 = new Version(V_2_2_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_2_3_0_ID = 2030099;
public static final Version V_2_3_0 = new Version(V_2_3_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_3_0_0_ID = 3000099;
@ -299,8 +307,12 @@ public class Version {
return V_3_0_0;
case V_2_3_0_ID:
return V_2_3_0;
case V_2_2_1_ID:
return V_2_2_1;
case V_2_2_0_ID:
return V_2_2_0;
case V_2_1_3_ID:
return V_2_1_3;
case V_2_1_2_ID:
return V_2_1_2;
case V_2_1_1_ID:
@ -321,6 +333,10 @@ public class Version {
return V_2_0_0_beta2;
case V_2_0_0_beta1_ID:
return V_2_0_0_beta1;
case V_1_7_6_ID:
return V_1_7_6;
case V_1_7_5_ID:
return V_1_7_5;
case V_1_7_4_ID:
return V_1_7_4;
case V_1_7_3_ID:

View File

@ -58,7 +58,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
}
@Override
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable {
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.flush(shardRequest.getRequest());
logger.trace("{} flush request executed on primary", indexShard.shardId());

View File

@ -60,7 +60,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
}
@Override
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) throws Throwable {
protected Tuple<ReplicationResponse, BasicReplicationRequest> shardOperationOnPrimary(MetaData metaData, BasicReplicationRequest shardRequest) {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId());

View File

@ -140,7 +140,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
}
@Override
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable {
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
// validate, if routing is required, that we got routing
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
@ -200,7 +200,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
* Execute the given {@link IndexRequest} on a primary shard, throwing a
* {@link RetryOnPrimaryException} if the operation needs to be re-tried.
*/
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Throwable {
public static WriteResult<IndexResponse> executeIndexRequestOnPrimary(IndexRequest request, IndexShard indexShard, MappingUpdatedAction mappingUpdatedAction) throws Exception {
Engine.Index operation = prepareIndexOperationOnPrimary(request, indexShard);
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = indexShard.shardId();

View File

@ -55,6 +55,8 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
private long routedBasedOnClusterVersion = 0;
public ReplicationRequest() {
}
@ -141,6 +143,20 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
return (Request) this;
}
/**
* Sets the minimum version of the cluster state that is required on the next node before we redirect to another primary.
* Used to prevent redirect loops, see also {@link TransportReplicationAction.ReroutePhase#doRun()}
*/
@SuppressWarnings("unchecked")
Request routedBasedOnClusterVersion(long routedBasedOnClusterVersion) {
this.routedBasedOnClusterVersion = routedBasedOnClusterVersion;
return (Request) this;
}
long routedBasedOnClusterVersion() {
return routedBasedOnClusterVersion;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null;
@ -161,6 +177,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
timeout = TimeValue.readTimeValue(in);
index = in.readString();
routedBasedOnClusterVersion = in.readVLong();
}
@Override
@ -175,6 +192,7 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
out.writeByte(consistencyLevel.id());
timeout.writeTo(out);
out.writeString(index);
out.writeVLong(routedBasedOnClusterVersion);
}
/**

View File

@ -56,6 +56,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService;
@ -156,10 +157,11 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
/**
* Primary operation on node with primary copy, the provided metadata should be used for request validation if needed
*
* @return A tuple containing not null values, as first value the result of the primary operation and as second value
* the request to be executed on the replica shards.
*/
protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable;
protected abstract Tuple<Response, ReplicaRequest> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception;
/**
* Replica operation on nodes with replica copies
@ -299,7 +301,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
setShard(shardId);
}
public RetryOnReplicaException(StreamInput in) throws IOException{
public RetryOnReplicaException(StreamInput in) throws IOException {
super(in);
}
}
@ -326,8 +328,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public void onNewClusterState(ClusterState state) {
context.close();
// Forking a thread on local node via transport service so that custom transport service have an
// opportunity to execute custom logic before the replica operation begins
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
// opportunity to execute custom logic before the replica operation begins
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
}
@ -352,6 +354,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
}
}
private void failReplicaIfNeeded(Throwable t) {
String index = request.shardId().getIndex().getName();
int shardId = request.shardId().id();
@ -383,7 +386,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override
protected void doRun() throws Exception {
assert request.shardId() != null : "request shardId must be set";
try (Releasable ignored = getIndexShardOperationsCounter(request.shardId())) {
try (Releasable ignored = getIndexShardReferenceOnReplica(request.shardId())) {
shardOperationOnReplica(request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}]", transportReplicaAction, request.shardId(), request);
@ -399,7 +402,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
setShard(shardId);
}
public RetryOnPrimaryException(StreamInput in) throws IOException{
public RetryOnPrimaryException(StreamInput in) throws IOException {
super(in);
}
}
@ -445,6 +448,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
handleBlockException(blockException);
return;
}
// request does not have a shardId yet, we need to pass the concrete index to resolve shardId
resolveRequest(state.metaData(), concreteIndex, request);
assert request.shardId() != null : "request shardId must be set in resolveRequest";
@ -468,6 +472,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
performAction(node, transportPrimaryAction, true);
} else {
if (state.version() < request.routedBasedOnClusterVersion()) {
logger.trace("failed to find primary [{}] for request [{}] despite sender thinking it would be here. Local cluster state version [{}]] is older than on sending node (version [{}]), scheduling a retry...", request.shardId(), request, state.version(), request.routedBasedOnClusterVersion());
retryBecauseUnavailable(request.shardId(), "failed to find primary as current cluster state with version [" + state.version() + "] is stale (expected at least [" + request.routedBasedOnClusterVersion() + "]");
return;
} else {
// chasing the node with the active primary for a second hop requires that we are at least up-to-date with the current cluster state version
// this prevents redirect loops between two nodes when a primary was relocated and the relocation target is not aware that it is the active primary shard already.
request.routedBasedOnClusterVersion(state.version());
}
if (logger.isTraceEnabled()) {
logger.trace("send action [{}] on primary [{}] for request [{}] with cluster state version [{}] to [{}]", actionName, request.shardId(), request, state.version(), primary.currentNodeId());
}
@ -584,60 +597,71 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
/**
* Responsible for performing primary operation locally and delegating to replication action once successful
* Responsible for performing primary operation locally or delegating primary operation to relocation target in case where shard has
* been marked as RELOCATED. Delegates to replication action once successful.
* <p>
* Note that as soon as we move to replication action, state responsibility is transferred to {@link ReplicationPhase}.
*/
final class PrimaryPhase extends AbstractRunnable {
class PrimaryPhase extends AbstractRunnable {
private final Request request;
private final ShardId shardId;
private final TransportChannel channel;
private final ClusterState state;
private final AtomicBoolean finished = new AtomicBoolean();
private Releasable indexShardReference;
private IndexShardReference indexShardReference;
PrimaryPhase(Request request, TransportChannel channel) {
this.state = clusterService.state();
this.request = request;
assert request.shardId() != null : "request shardId must be set prior to primary phase";
this.shardId = request.shardId();
this.channel = channel;
}
@Override
public void onFailure(Throwable e) {
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
if (logger.isTraceEnabled()) {
logger.trace("failed to execute [{}] on [{}]", e, request, shardId);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("failed to execute [{}] on [{}]", e, request, shardId);
}
}
finishAsFailed(e);
}
@Override
protected void doRun() throws Exception {
// request shardID was set in ReroutePhase
assert request.shardId() != null : "request shardID must be set prior to primary phase";
final ShardId shardId = request.shardId();
final String writeConsistencyFailure = checkWriteConsistency(shardId);
if (writeConsistencyFailure != null) {
finishBecauseUnavailable(shardId, writeConsistencyFailure);
return;
}
final ReplicationPhase replicationPhase;
try {
indexShardReference = getIndexShardOperationsCounter(shardId);
// closed in finishAsFailed(e) in the case of error
indexShardReference = getIndexShardReferenceOnPrimary(shardId);
if (indexShardReference.isRelocated() == false) {
// execute locally
Tuple<Response, ReplicaRequest> primaryResponse = shardOperationOnPrimary(state.metaData(), request);
if (logger.isTraceEnabled()) {
logger.trace("action [{}] completed on shard [{}] for request [{}] with cluster state version [{}]", transportPrimaryAction, shardId, request, state.version());
}
replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
} catch (Throwable e) {
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
if (logger.isTraceEnabled()) {
logger.trace("failed to execute [{}] on [{}]", e, request, shardId);
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("failed to execute [{}] on [{}]", e, request, shardId);
}
}
finishAsFailed(e);
return;
ReplicationPhase replicationPhase = new ReplicationPhase(primaryResponse.v2(), primaryResponse.v1(), shardId, channel, indexShardReference);
finishAndMoveToReplication(replicationPhase);
} else {
// delegate primary phase to relocation target
// it is safe to execute primary phase on relocation target as there are no more in-flight operations where primary
// phase is executed on local shard and all subsequent operations are executed on relocation target as primary phase.
final ShardRouting primary = indexShardReference.routingEntry();
indexShardReference.close();
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
DiscoveryNode relocatingNode = state.nodes().get(primary.relocatingNodeId());
transportService.sendRequest(relocatingNode, transportPrimaryAction, request, transportOptions,
TransportChannelResponseHandler.responseHandler(logger, TransportReplicationAction.this::newResponseInstance, channel,
"rerouting indexing to target primary " + primary));
}
finishAndMoveToReplication(replicationPhase);
}
/**
@ -721,10 +745,24 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
}
protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
/**
* returns a new reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationPhase}).
*/
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReference(indexShard);
return new IndexShardReferenceImpl(indexShard, true);
}
/**
* returns a new reference to {@link IndexShard} on a node that the request is replicated to. The reference is closed as soon as
* replication is completed on the node.
*/
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReferenceImpl(indexShard, false);
}
/**
@ -777,17 +815,20 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
int numberOfIgnoredShardInstances = 0;
int numberOfPendingShardInstances = 0;
for (ShardRouting shard : shards) {
// the following logic to select the shards to replicate to is mirrored and explained in the doRun method below
if (shard.primary() == false && executeOnReplica == false) {
numberOfIgnoredShardInstances++;
} else if (shard.unassigned()) {
continue;
}
if (shard.unassigned()) {
numberOfIgnoredShardInstances++;
} else {
if (shard.currentNodeId().equals(nodes.localNodeId()) == false) {
numberOfPendingShardInstances++;
}
if (shard.relocating()) {
numberOfPendingShardInstances++;
}
continue;
}
if (nodes.localNodeId().equals(shard.currentNodeId()) == false) {
numberOfPendingShardInstances++;
}
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
numberOfPendingShardInstances++;
}
}
// one for the local primary copy
@ -795,7 +836,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
this.pending = new AtomicInteger(numberOfPendingShardInstances);
if (logger.isTraceEnabled()) {
logger.trace("replication phase started. pending [{}], action [{}], request [{}], cluster state version used [{}]", pending.get(),
transportReplicaAction, replicaRequest, state.version());
transportReplicaAction, replicaRequest, state.version());
}
}
@ -860,7 +901,8 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
performOnReplica(shard);
}
// send operation to relocating shard
if (shard.relocating()) {
// local shard can be a relocation target of a primary that is in relocated state
if (shard.relocating() && nodes.localNodeId().equals(shard.relocatingNodeId()) == false) {
performOnReplica(shard.buildTargetRelocatingShard());
}
}
@ -898,22 +940,22 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
String message = String.format(Locale.ROOT, "failed to perform %s on replica on node %s", transportReplicaAction, node);
logger.warn("[{}] {}", exp, shardId, message);
shardStateAction.shardFailed(
shard,
indexUUID,
message,
exp,
new ShardStateAction.Listener() {
@Override
public void onSuccess() {
onReplicaFailure(nodeId, exp);
}
shard,
indexUUID,
message,
exp,
new ShardStateAction.Listener() {
@Override
public void onSuccess() {
onReplicaFailure(nodeId, exp);
}
@Override
public void onFailure(Throwable t) {
// TODO: handle catastrophic non-channel failures
onReplicaFailure(nodeId, exp);
@Override
public void onFailure(Throwable t) {
// TODO: handle catastrophic non-channel failures
onReplicaFailure(nodeId, exp);
}
}
}
);
}
}
@ -993,21 +1035,39 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
return IndexMetaData.isIndexUsingShadowReplicas(settings) == false;
}
static class IndexShardReference implements Releasable {
interface IndexShardReference extends Releasable {
boolean isRelocated();
final private IndexShard counter;
private final AtomicBoolean closed = new AtomicBoolean();
ShardRouting routingEntry();
}
IndexShardReference(IndexShard counter) {
counter.incrementOperationCounter();
this.counter = counter;
static final class IndexShardReferenceImpl implements IndexShardReference {
private final IndexShard indexShard;
private final Releasable operationLock;
IndexShardReferenceImpl(IndexShard indexShard, boolean primaryAction) {
this.indexShard = indexShard;
if (primaryAction) {
operationLock = indexShard.acquirePrimaryOperationLock();
} else {
operationLock = indexShard.acquireReplicaOperationLock();
}
}
@Override
public void close() {
if (closed.compareAndSet(false, true)) {
counter.decrementOperationCounter();
}
operationLock.close();
}
@Override
public boolean isRelocated() {
return indexShard.state() == IndexShardState.RELOCATED;
}
@Override
public ShardRouting routingEntry() {
return indexShard.routingEntry();
}
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.monitor.jvm.JvmInfo;
@ -82,7 +83,9 @@ final class BootstrapCLIParser extends CliTool {
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
terminal.println("Version: %s, Build: %s/%s, JVM: %s", org.elasticsearch.Version.CURRENT, Build.CURRENT.shortHash(), Build.CURRENT.date(), JvmInfo.jvmInfo().version());
terminal.println("Version: " + org.elasticsearch.Version.CURRENT
+ ", Build: " + Build.CURRENT.shortHash() + "/" + Build.CURRENT.date()
+ ", JVM: " + JvmInfo.jvmInfo().version());
return ExitStatus.OK_AND_EXIT;
}
}
@ -103,7 +106,7 @@ final class BootstrapCLIParser extends CliTool {
// TODO: don't use system properties as a way to do this, its horrible...
@SuppressForbidden(reason = "Sets system properties passed as CLI parameters")
public static Command parse(Terminal terminal, CommandLine cli) {
public static Command parse(Terminal terminal, CommandLine cli) throws UserError {
if (cli.hasOption("V")) {
return Version.parse(terminal, cli);
}
@ -132,11 +135,11 @@ final class BootstrapCLIParser extends CliTool {
String arg = iterator.next();
if (!arg.startsWith("--")) {
if (arg.startsWith("-D") || arg.startsWith("-d") || arg.startsWith("-p")) {
throw new IllegalArgumentException(
throw new UserError(ExitStatus.USAGE,
"Parameter [" + arg + "] starting with \"-D\", \"-d\" or \"-p\" must be before any parameters starting with --"
);
} else {
throw new IllegalArgumentException("Parameter [" + arg + "]does not start with --");
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "]does not start with --");
}
}
// if there is no = sign, we have to get the next argu
@ -150,11 +153,11 @@ final class BootstrapCLIParser extends CliTool {
if (iterator.hasNext()) {
String value = iterator.next();
if (value.startsWith("--")) {
throw new IllegalArgumentException("Parameter [" + arg + "] needs value");
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
System.setProperty("es." + arg, value);
} else {
throw new IllegalArgumentException("Parameter [" + arg + "] needs value");
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
}
}
}

View File

@ -94,7 +94,7 @@ public class MappingUpdatedAction extends AbstractComponent {
}
}
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Throwable {
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null);
}
@ -102,7 +102,7 @@ public class MappingUpdatedAction extends AbstractComponent {
* Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)}
* using the default timeout.
*/
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Throwable {
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
}
@ -111,7 +111,7 @@ public class MappingUpdatedAction extends AbstractComponent {
* {@code timeout}. When this method returns successfully mappings have
* been applied to the master node and propagated to data nodes.
*/
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable {
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) {
throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
}

View File

@ -100,8 +100,9 @@ public abstract class CheckFileCommand extends CliTool.Command {
Set<PosixFilePermission> permissionsBeforeWrite = entry.getValue();
Set<PosixFilePermission> permissionsAfterWrite = Files.getPosixFilePermissions(entry.getKey());
if (!permissionsBeforeWrite.equals(permissionsAfterWrite)) {
terminal.printWarn("The file permissions of [%s] have changed from [%s] to [%s]",
entry.getKey(), PosixFilePermissions.toString(permissionsBeforeWrite), PosixFilePermissions.toString(permissionsAfterWrite));
terminal.printWarn("The file permissions of [" + entry.getKey() + "] have changed "
+ "from [" + PosixFilePermissions.toString(permissionsBeforeWrite) + "] "
+ "to [" + PosixFilePermissions.toString(permissionsAfterWrite) + "]");
terminal.printWarn("Please ensure that the user account running Elasticsearch has read access to this file!");
}
}
@ -115,7 +116,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String ownerBeforeWrite = entry.getValue();
String ownerAfterWrite = Files.getOwner(entry.getKey()).getName();
if (!ownerAfterWrite.equals(ownerBeforeWrite)) {
terminal.printWarn("WARN: Owner of file [%s] used to be [%s], but now is [%s]", entry.getKey(), ownerBeforeWrite, ownerAfterWrite);
terminal.printWarn("WARN: Owner of file [" + entry.getKey() + "] used to be [" + ownerBeforeWrite + "], but now is [" + ownerAfterWrite + "]");
}
}
@ -128,7 +129,7 @@ public abstract class CheckFileCommand extends CliTool.Command {
String groupBeforeWrite = entry.getValue();
String groupAfterWrite = Files.readAttributes(entry.getKey(), PosixFileAttributes.class).group().getName();
if (!groupAfterWrite.equals(groupBeforeWrite)) {
terminal.printWarn("WARN: Group of file [%s] used to be [%s], but now is [%s]", entry.getKey(), groupBeforeWrite, groupAfterWrite);
terminal.printWarn("WARN: Group of file [" + entry.getKey() + "] used to be [" + groupBeforeWrite + "], but now is [" + groupAfterWrite + "]");
}
}

View File

@ -19,14 +19,17 @@
package org.elasticsearch.common.cli;
import org.apache.commons.cli.AlreadySelectedException;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.MissingArgumentException;
import org.apache.commons.cli.MissingOptionException;
import org.apache.commons.cli.UnrecognizedOptionException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.IOException;
import java.util.Locale;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
@ -50,7 +53,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
public abstract class CliTool {
// based on sysexits.h
public static enum ExitStatus {
public enum ExitStatus {
OK(0),
OK_AND_EXIT(0),
USAGE(64), /* command line usage error */
@ -69,23 +72,13 @@ public abstract class CliTool {
final int status;
private ExitStatus(int status) {
ExitStatus(int status) {
this.status = status;
}
public int status() {
return status;
}
public static ExitStatus fromStatus(int status) {
for (ExitStatus exitStatus : values()) {
if (exitStatus.status() == status) {
return exitStatus;
}
}
return null;
}
}
protected final Terminal terminal;
@ -108,7 +101,7 @@ public abstract class CliTool {
settings = env.settings();
}
public final ExitStatus execute(String... args) {
public final ExitStatus execute(String... args) throws Exception {
// first lets see if the user requests tool help. We're doing it only if
// this is a multi-command tool. If it's a single command tool, the -h/--help
@ -132,7 +125,7 @@ public abstract class CliTool {
String cmdName = args[0];
cmd = config.cmd(cmdName);
if (cmd == null) {
terminal.printError("unknown command [%s]. Use [-h] option to list available commands", cmdName);
terminal.printError("unknown command [" + cmdName + "]. Use [-h] option to list available commands");
return ExitStatus.USAGE;
}
@ -146,23 +139,11 @@ public abstract class CliTool {
}
}
Command command = null;
try {
command = parse(cmd, args);
return command.execute(settings, env);
} catch (IOException ioe) {
terminal.printError(ioe);
return ExitStatus.IO_ERROR;
} catch (IllegalArgumentException ilae) {
terminal.printError(ilae);
return ExitStatus.USAGE;
} catch (Throwable t) {
terminal.printError(t);
if (command == null) {
return ExitStatus.USAGE;
}
return ExitStatus.CODE_ERROR;
return parse(cmd, args).execute(settings, env);
} catch (UserError error) {
terminal.printError(error.getMessage());
return error.exitStatus;
}
}
@ -177,7 +158,13 @@ public abstract class CliTool {
if (cli.hasOption("h")) {
return helpCmd(cmd);
}
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
try {
cli = parser.parse(cmd.options(), args, cmd.isStopAtNonOption());
} catch (AlreadySelectedException|MissingArgumentException|MissingOptionException|UnrecognizedOptionException e) {
// intentionally drop the stack trace here as these are really user errors,
// the stack trace into cli parsing lib is not important
throw new UserError(ExitStatus.USAGE, e.toString());
}
Terminal.Verbosity verbosity = Terminal.Verbosity.resolve(cli);
terminal.verbosity(verbosity);
return parse(cmd.name(), cli);

View File

@ -35,8 +35,6 @@ import java.util.Locale;
@SuppressForbidden(reason = "System#out")
public abstract class Terminal {
public static final String DEBUG_SYSTEM_PROPERTY = "es.cli.debug";
public static final Terminal DEFAULT = ConsoleTerminal.supported() ? new ConsoleTerminal() : new SystemTerminal();
public static enum Verbosity {
@ -64,7 +62,6 @@ public abstract class Terminal {
}
private Verbosity verbosity = Verbosity.NORMAL;
private final boolean isDebugEnabled;
public Terminal() {
this(Verbosity.NORMAL);
@ -72,7 +69,6 @@ public abstract class Terminal {
public Terminal(Verbosity verbosity) {
this.verbosity = verbosity;
this.isDebugEnabled = "true".equals(System.getProperty(DEBUG_SYSTEM_PROPERTY, "false"));
}
public void verbosity(Verbosity verbosity) {
@ -93,44 +89,37 @@ public abstract class Terminal {
println(Verbosity.NORMAL);
}
public void println(String msg, Object... args) {
println(Verbosity.NORMAL, msg, args);
public void println(String msg) {
println(Verbosity.NORMAL, msg);
}
public void print(String msg, Object... args) {
print(Verbosity.NORMAL, msg, args);
public void print(String msg) {
print(Verbosity.NORMAL, msg);
}
public void println(Verbosity verbosity) {
println(verbosity, "");
}
public void println(Verbosity verbosity, String msg, Object... args) {
print(verbosity, msg + System.lineSeparator(), args);
public void println(Verbosity verbosity, String msg) {
print(verbosity, msg + System.lineSeparator());
}
public void print(Verbosity verbosity, String msg, Object... args) {
public void print(Verbosity verbosity, String msg) {
if (this.verbosity.enabled(verbosity)) {
doPrint(msg, args);
doPrint(msg);
}
}
public void printError(String msg, Object... args) {
println(Verbosity.SILENT, "ERROR: " + msg, args);
public void printError(String msg) {
println(Verbosity.SILENT, "ERROR: " + msg);
}
public void printError(Throwable t) {
printError("%s", t.toString());
if (isDebugEnabled) {
printStackTrace(t);
}
public void printWarn(String msg) {
println(Verbosity.SILENT, "WARN: " + msg);
}
public void printWarn(String msg, Object... args) {
println(Verbosity.SILENT, "WARN: " + msg, args);
}
protected abstract void doPrint(String msg, Object... args);
protected abstract void doPrint(String msg);
private static class ConsoleTerminal extends Terminal {
@ -141,8 +130,8 @@ public abstract class Terminal {
}
@Override
public void doPrint(String msg, Object... args) {
console.printf(msg, args);
public void doPrint(String msg) {
console.printf("%s", msg);
console.flush();
}
@ -168,13 +157,13 @@ public abstract class Terminal {
private final PrintWriter printWriter = new PrintWriter(System.out);
@Override
public void doPrint(String msg, Object... args) {
System.out.print(String.format(Locale.ROOT, msg, args));
public void doPrint(String msg) {
System.out.print(msg);
}
@Override
public String readText(String text, Object... args) {
print(text, args);
print(text);
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
try {
return reader.readLine();

View File

@ -0,0 +1,35 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.cli;
/**
* An exception representing a user fixable problem in {@link CliTool} usage.
*/
public class UserError extends Exception {
/** The exist status the cli should use when catching this user error. */
public final CliTool.ExitStatus exitStatus;
/** Constructs a UserError with an exit status and message to show the user. */
public UserError(CliTool.ExitStatus exitStatus, String msg) {
super(msg);
this.exitStatus = exitStatus;
}
}

View File

@ -58,7 +58,6 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.analysis.HunspellService;
@ -160,7 +159,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,

View File

@ -0,0 +1,117 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.lease.Releasable;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
/**
* Container that represents a resource with reference counting capabilities. Provides operations to suspend acquisition of new references.
* This is useful for resource management when resources are intermittently unavailable.
*
* Assumes less than Integer.MAX_VALUE references are concurrently being held at one point in time.
*/
public final class SuspendableRefContainer {
private static final int TOTAL_PERMITS = Integer.MAX_VALUE;
private final Semaphore semaphore;
public SuspendableRefContainer() {
// fair semaphore to ensure that blockAcquisition() does not starve under thread contention
this.semaphore = new Semaphore(TOTAL_PERMITS, true);
}
/**
* Tries acquiring a reference. Returns reference holder if reference acquisition is not blocked at the time of invocation (see
* {@link #blockAcquisition()}). Returns null if reference acquisition is blocked at the time of invocation.
*
* @return reference holder if reference acquisition is not blocked, null otherwise
* @throws InterruptedException if the current thread is interrupted
*/
public Releasable tryAcquire() throws InterruptedException {
if (semaphore.tryAcquire(1, 0, TimeUnit.SECONDS)) { // the untimed tryAcquire methods do not honor the fairness setting
return idempotentRelease(1);
} else {
return null;
}
}
/**
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
*
* @return reference holder
* @throws InterruptedException if the current thread is interrupted
*/
public Releasable acquire() throws InterruptedException {
semaphore.acquire();
return idempotentRelease(1);
}
/**
* Acquires a reference. Blocks if reference acquisition is blocked at the time of invocation.
*
* @return reference holder
*/
public Releasable acquireUninterruptibly() {
semaphore.acquireUninterruptibly();
return idempotentRelease(1);
}
/**
* Disables reference acquisition and waits until all existing references are released.
* When released, reference acquisition is enabled again.
* This guarantees that between successful acquisition and release, no one is holding a reference.
*
* @return references holder to all references
*/
public Releasable blockAcquisition() {
semaphore.acquireUninterruptibly(TOTAL_PERMITS);
return idempotentRelease(TOTAL_PERMITS);
}
/**
* Helper method that ensures permits are only released once
*
* @return reference holder
*/
private Releasable idempotentRelease(int permits) {
AtomicBoolean closed = new AtomicBoolean();
return () -> {
if (closed.compareAndSet(false, true)) {
semaphore.release(permits);
}
};
}
/**
* Returns the number of references currently being held.
*/
public int activeRefs() {
int availablePermits = semaphore.availablePermits();
if (availablePermits == 0) {
// when blockAcquisition is holding all permits
return 0;
} else {
return TOTAL_PERMITS - availablePermits;
}
}
}

View File

@ -89,17 +89,16 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
*/
public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implements Discovery, PingContextProvider {
public final static Setting<Boolean> REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> JOIN_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.join_timeout",
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
settings -> TimeValue.timeValueMillis(PING_TIMEOUT_SETTING.get(settings).millis() * 20).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
public final static Setting<Integer> JOIN_RETRY_ATTEMPTS_SETTING = Setting.intSetting("discovery.zen.join_retry_attempts", 3, 1, false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> JOIN_RETRY_DELAY_SETTING = Setting.positiveTimeSetting("discovery.zen.join_retry_delay", TimeValue.timeValueMillis(100), false, Setting.Scope.CLUSTER);
public final static Setting<Integer> MAX_PINGS_FROM_ANOTHER_MASTER_SETTING = Setting.intSetting("discovery.zen.max_pings_from_another_master", 3, 1, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> SEND_LEAVE_REQUEST_SETTING = Setting.boolSetting("discovery.zen.send_leave_request", true, false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_CLIENT_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_client", true, false, Setting.Scope.CLUSTER);
public final static Setting<TimeValue> MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING = Setting.timeSetting("discovery.zen.master_election.wait_for_joins_timeout",
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
settings -> TimeValue.timeValueMillis(JOIN_TIMEOUT_SETTING.get(settings).millis() / 2).toString(), TimeValue.timeValueMillis(0), false, Setting.Scope.CLUSTER);
public final static Setting<Boolean> MASTER_ELECTION_FILTER_DATA_SETTING = Setting.boolSetting("discovery.zen.master_election.filter_data", false, false, Setting.Scope.CLUSTER);
public static final String DISCOVERY_REJOIN_ACTION_NAME = "internal:discovery/zen/rejoin";
@ -142,8 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
private final AtomicBoolean initialStateSent = new AtomicBoolean();
private volatile boolean rejoinOnMasterGone;
/** counts the time this node has joined the cluster or have elected it self as master */
private final AtomicLong clusterJoinsCounter = new AtomicLong();
@ -177,7 +174,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
this.masterElectionFilterClientNodes = MASTER_ELECTION_FILTER_CLIENT_SETTING.get(settings);
this.masterElectionFilterDataNodes = MASTER_ELECTION_FILTER_DATA_SETTING.get(settings);
this.masterElectionWaitForJoinsTimeout = MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING.get(settings);
this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings);
logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes);
@ -188,7 +184,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]");
}
});
clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone);
this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService);
this.masterFD.addListener(new MasterNodeFailureListener());
@ -323,10 +318,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
return clusterJoinsCounter.get() > 0;
}
private void setRejoingOnMasterGone(boolean rejoin) {
this.rejoinOnMasterGone = rejoin;
}
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
@ -670,35 +661,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// flush any pending cluster states from old master, so it will not be set as master again
publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason));
if (rejoinOnMasterGone) {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")");
}
if (!electMaster.hasEnoughMasterNodes(discoveryNodes)) {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "not enough master nodes after master left (reason = " + reason + ")");
}
final DiscoveryNode electedMaster = electMaster.electMaster(discoveryNodes); // elect master
final DiscoveryNode localNode = currentState.nodes().localNode();
if (localNode.equals(electedMaster)) {
masterFD.stop("got elected as new master since master left (reason = " + reason + ")");
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(localNode.id()).build();
ClusterState newState = ClusterState.builder(currentState).nodes(discoveryNodes).build();
nodesFD.updateNodesAndPing(newState);
return newState;
} else {
nodesFD.stop();
if (electedMaster != null) {
discoveryNodes = DiscoveryNodes.builder(discoveryNodes).masterNodeId(electedMaster.id()).build();
masterFD.restart(electedMaster, "possible elected master since master left (reason = " + reason + ")");
return ClusterState.builder(currentState)
.nodes(discoveryNodes)
.build();
} else {
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master_left and no other node elected to become master");
}
}
return rejoin(ClusterState.builder(currentState).nodes(discoveryNodes).build(), "master left (reason = " + reason + ")");
}
@Override
@ -857,7 +820,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
// Sanity check: maybe we don't end up here, because serialization may have failed.
if (node.getVersion().before(minimumNodeJoinVersion)) {
callback.onFailure(
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
);
return;
}
@ -1109,10 +1072,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
}
}
boolean isRejoinOnMasterGone() {
return rejoinOnMasterGone;
}
public static class RejoinClusterRequest extends TransportRequest {
private String fromNodeId;

View File

@ -330,31 +330,4 @@ public class Environment {
public static FileStore getFileStore(Path path) throws IOException {
return ESFileStore.getMatchingFileStore(path, fileStores);
}
/**
* Returns true if the path is writable.
* Acts just like {@link Files#isWritable(Path)}, except won't
* falsely return false for paths on SUBST'd drive letters
* See https://bugs.openjdk.java.net/browse/JDK-8034057
* Note this will set the file modification time (to its already-set value)
* to test access.
*/
@SuppressForbidden(reason = "works around https://bugs.openjdk.java.net/browse/JDK-8034057")
public static boolean isWritable(Path path) throws IOException {
boolean v = Files.isWritable(path);
if (v || Constants.WINDOWS == false) {
return v;
}
// isWritable returned false on windows, the hack begins!!!!!!
// resetting the modification time is the least destructive/simplest
// way to check for both files and directories, and fails early just
// in getting the current value if file doesn't exist, etc
try {
Files.setLastModifiedTime(path, Files.getLastModifiedTime(path));
return true;
} catch (Throwable e) {
return false;
}
}
}

View File

@ -42,17 +42,17 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.SuspendableRefContainer;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
@ -189,9 +189,17 @@ public class IndexShard extends AbstractIndexShardComponent {
private final ShardPath path;
private final IndexShardOperationCounter indexShardOperationCounter;
private final SuspendableRefContainer suspendableRefContainer;
private final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
private static final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
// in state RECOVERING or POST_RECOVERY. After a primary has been marked as RELOCATED, we only allow writes to the relocation target
// which can be either in POST_RECOVERY or already STARTED (this prevents writing concurrently to two primaries).
public static final EnumSet<IndexShardState> writeAllowedStatesForPrimary = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
// replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent
// a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source
private static final EnumSet<IndexShardState> writeAllowedStatesForReplica = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED, IndexShardState.RELOCATED);
private final IndexSearcherWrapper searcherWrapper;
@ -250,7 +258,7 @@ public class IndexShard extends AbstractIndexShardComponent {
}
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
this.suspendableRefContainer = new SuspendableRefContainer();
this.provider = provider;
this.searcherWrapper = indexSearcherWrapper;
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, newQueryShardContext());
@ -321,6 +329,8 @@ public class IndexShard extends AbstractIndexShardComponent {
* Updates the shards routing entry. This mutate the shards internal state depending
* on the changes that get introduced by the new routing value. This method will persist shard level metadata
* unless explicitly disabled.
*
* @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
*/
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) {
final ShardRouting currentRouting = this.shardRouting;
@ -368,6 +378,14 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
}
if (state == IndexShardState.RELOCATED &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
// active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
}
this.shardRouting = newRouting;
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
} finally {
@ -404,12 +422,16 @@ public class IndexShard extends AbstractIndexShardComponent {
}
public IndexShard relocated(String reason) throws IndexShardNotStartedException {
synchronized (mutex) {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
try (Releasable block = suspendableRefContainer.blockAcquisition()) {
// no shard operation locks are being held here, move state from started to relocated
synchronized (mutex) {
if (state != IndexShardState.STARTED) {
throw new IndexShardNotStartedException(shardId, state);
}
changeState(IndexShardState.RELOCATED, reason);
}
changeState(IndexShardState.RELOCATED, reason);
}
return this;
}
@ -796,7 +818,6 @@ public class IndexShard extends AbstractIndexShardComponent {
refreshScheduledFuture = null;
}
changeState(IndexShardState.CLOSED, reason);
indexShardOperationCounter.decRef();
} finally {
final Engine engine = this.currentEngineReference.getAndSet(null);
try {
@ -810,7 +831,6 @@ public class IndexShard extends AbstractIndexShardComponent {
}
}
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
refresh("percolator_load_queries");
@ -967,16 +987,17 @@ public class IndexShard extends AbstractIndexShardComponent {
IndexShardState state = this.state; // one time volatile read
if (origin == Engine.Operation.Origin.PRIMARY) {
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// otherwise, we need to retry, we also want to still allow to index if we are relocated in case it fails
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
if (writeAllowedStatesForPrimary.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForPrimary + ", origin [" + origin + "]");
}
} else if (origin == Engine.Operation.Origin.RECOVERY) {
if (state != IndexShardState.RECOVERING) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]");
}
} else {
// for replicas, we allow to write also while recovering, since we index also during recovery to replicas
// and rely on version checks to make sure its consistent
if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED && state != IndexShardState.RECOVERING && state != IndexShardState.POST_RECOVERY) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when started/recovering, origin [" + origin + "]");
assert origin == Engine.Operation.Origin.REPLICA;
if (writeAllowedStatesForReplica.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStatesForReplica + ", origin [" + origin + "]");
}
}
}
@ -995,7 +1016,7 @@ public class IndexShard extends AbstractIndexShardComponent {
private void verifyNotClosed(Throwable suppressed) throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (state == IndexShardState.CLOSED) {
final IllegalIndexShardStateException exc = new IllegalIndexShardStateException(shardId, state, "operation only allowed when not closed");
final IllegalIndexShardStateException exc = new IndexShardClosedException(shardId, "operation only allowed when not closed");
if (suppressed != null) {
exc.addSuppressed(suppressed);
}
@ -1390,37 +1411,21 @@ public class IndexShard extends AbstractIndexShardComponent {
idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
}
private static class IndexShardOperationCounter extends AbstractRefCounted {
final private ESLogger logger;
private final ShardId shardId;
public IndexShardOperationCounter(ESLogger logger, ShardId shardId) {
super("index-shard-operations-counter");
this.logger = logger;
this.shardId = shardId;
}
@Override
protected void closeInternal() {
logger.debug("operations counter reached 0, will not accept any further writes");
}
@Override
protected void alreadyClosed() {
throw new IndexShardClosedException(shardId, "could not increment operation counter. shard is closed.");
public Releasable acquirePrimaryOperationLock() {
verifyNotClosed();
if (shardRouting.primary() == false) {
throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
}
return suspendableRefContainer.acquireUninterruptibly();
}
public void incrementOperationCounter() {
indexShardOperationCounter.incRef();
public Releasable acquireReplicaOperationLock() {
verifyNotClosed();
return suspendableRefContainer.acquireUninterruptibly();
}
public void decrementOperationCounter() {
indexShardOperationCounter.decRef();
}
public int getOperationsCount() {
return Math.max(0, indexShardOperationCounter.refCount() - 1); // refCount is incremented on creation and decremented on close
public int getActiveOperationsCount() {
return suspendableRefContainer.activeRefs(); // refCount is incremented on creation and decremented on close
}
/**

View File

@ -29,7 +29,11 @@ import java.io.IOException;
public class IndexShardRelocatedException extends IllegalIndexShardStateException {
public IndexShardRelocatedException(ShardId shardId) {
super(shardId, IndexShardState.RELOCATED, "Already relocated");
this(shardId, "Already relocated");
}
public IndexShardRelocatedException(ShardId shardId, String reason) {
super(shardId, IndexShardState.RELOCATED, reason);
}
public IndexShardRelocatedException(StreamInput in) throws IOException{

View File

@ -39,13 +39,11 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.IndexService;
@ -93,26 +91,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private static final ShardStateAction.Listener SHARD_STATE_ACTION_LISTENER = new ShardStateAction.Listener() {};
// a map of mappings type we have seen per index due to cluster state
// we need this so we won't remove types automatically created as part of the indexing process
private final ConcurrentMap<Tuple<String, String>, Boolean> seenMappings = ConcurrentCollections.newConcurrentMap();
// a list of shards that failed during recovery
// we keep track of these shards in order to prevent repeated recovery of these shards on each cluster state update
private final ConcurrentMap<ShardId, FailedShard> failedShards = ConcurrentCollections.newConcurrentMap();
private final ConcurrentMap<ShardId, ShardRouting> failedShards = ConcurrentCollections.newConcurrentMap();
private final RestoreService restoreService;
private final RepositoriesService repositoriesService;
static class FailedShard {
public final long version;
public final long timestamp;
FailedShard(long version) {
this.version = version;
this.timestamp = System.currentTimeMillis();
}
}
private final Object mutex = new Object();
private final FailedShardHandler failedShardHandler = new FailedShardHandler();
@ -431,11 +415,6 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
RoutingTable routingTable = event.state().routingTable();
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
failedShards.clear();
return;
}
DiscoveryNodes nodes = event.state().nodes();
for (final ShardRouting shardRouting : routingNode) {
@ -492,7 +471,11 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// shadow replicas do not support primary promotion. The master would reinitialize the shard, giving it a new allocation, meaning we should be there.
assert (shardRouting.primary() && currentRoutingEntry.primary() == false) == false || indexShard.allowsPrimaryPromotion() :
"shard for doesn't support primary promotion but master promoted it with changing allocation. New routing " + shardRouting + ", current routing " + currentRoutingEntry;
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
try {
indexShard.updateRoutingEntry(shardRouting, event.state().blocks().disableStatePersistence() == false);
} catch (Throwable e) {
failAndRemoveShard(shardRouting, indexService.indexUUID(), indexService, true, "failed updating shard routing entry", e);
}
}
}
@ -503,40 +486,29 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
}
private void cleanFailedShards(final ClusterChangedEvent event) {
RoutingTable routingTable = event.state().routingTable();
RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId());
if (routingNode == null) {
failedShards.clear();
return;
}
DiscoveryNodes nodes = event.state().nodes();
long now = System.currentTimeMillis();
String localNodeId = nodes.localNodeId();
Iterator<Map.Entry<ShardId, FailedShard>> iterator = failedShards.entrySet().iterator();
shards:
while (iterator.hasNext()) {
Map.Entry<ShardId, FailedShard> entry = iterator.next();
FailedShard failedShard = entry.getValue();
IndexRoutingTable indexRoutingTable = routingTable.index(entry.getKey().getIndex());
if (indexRoutingTable != null) {
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(entry.getKey().id());
if (shardRoutingTable != null) {
for (ShardRouting shardRouting : shardRoutingTable.assignedShards()) {
if (localNodeId.equals(shardRouting.currentNodeId())) {
// we have a timeout here just to make sure we don't have dangled failed shards for some reason
// its just another safely layer
if (shardRouting.version() == failedShard.version && ((now - failedShard.timestamp) < TimeValue.timeValueMinutes(60).millis())) {
// It's the same failed shard - keep it if it hasn't timed out
continue shards;
} else {
// Different version or expired, remove it
break;
}
}
}
}
RoutingTable routingTable = event.state().routingTable();
for (Iterator<Map.Entry<ShardId, ShardRouting>> iterator = failedShards.entrySet().iterator(); iterator.hasNext(); ) {
Map.Entry<ShardId, ShardRouting> entry = iterator.next();
ShardId failedShardId = entry.getKey();
ShardRouting failedShardRouting = entry.getValue();
IndexRoutingTable indexRoutingTable = routingTable.index(failedShardId.getIndex());
if (indexRoutingTable == null) {
iterator.remove();
continue;
}
IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(failedShardId.id());
if (shardRoutingTable == null) {
iterator.remove();
continue;
}
if (shardRoutingTable.assignedShards().stream().noneMatch(shr -> shr.isSameAllocation(failedShardRouting))) {
iterator.remove();
}
iterator.remove();
}
}
@ -626,7 +598,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// For primaries: requests in any case are routed to both when its relocating and that way we handle
// the edge case where its mark as relocated, and we might need to roll it back...
// For replicas: we are recovering a backup from a primary
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.RELOCATION : RecoveryState.Type.REPLICA;
RecoveryState.Type type = shardRouting.primary() ? RecoveryState.Type.PRIMARY_RELOCATION : RecoveryState.Type.REPLICA;
RecoveryState recoveryState = new RecoveryState(indexShard.shardId(), shardRouting.primary(), type, sourceNode, nodes.localNode());
indexShard.markAsRecovering("from " + sourceNode, recoveryState);
recoveryTarget.startRecovery(indexShard, type, sourceNode, new PeerRecoveryListener(shardRouting, indexService, indexMetaData));
@ -784,7 +756,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
private void sendFailShard(ShardRouting shardRouting, String indexUUID, String message, @Nullable Throwable failure) {
try {
logger.warn("[{}] marking and sending shard failed due to [{}]", failure, shardRouting.shardId(), message);
failedShards.put(shardRouting.shardId(), new FailedShard(shardRouting.version()));
failedShards.put(shardRouting.shardId(), shardRouting);
shardStateAction.shardFailed(shardRouting, indexUUID, message, failure, SHARD_STATE_ACTION_LISTENER);
} catch (Throwable e1) {
logger.warn("[{}][{}] failed to mark shard as failed (because of [{}])", e1, shardRouting.getIndexName(), shardRouting.getId(), message);

View File

@ -435,7 +435,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
if (indexShard.routingEntry().primary() == false) {
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
}
int opCount = indexShard.getOperationsCount();
int opCount = indexShard.getActiveOperationsCount();
logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
return new InFlightOpsResponse(opCount);
}

View File

@ -61,8 +61,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
private final ClusterService clusterService;
private final OngoingRecoveres ongoingRecoveries = new OngoingRecoveres();
private final OngoingRecoveries ongoingRecoveries = new OngoingRecoveries();
@Inject
public RecoverySource(Settings settings, TransportService transportService, IndicesService indicesService,
@ -107,11 +106,11 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
}
if (!targetShardRouting.initializing()) {
logger.debug("delaying recovery of {} as it is not listed as initializing on the target node {}. known shards state is [{}]",
request.shardId(), request.targetNode(), targetShardRouting.state());
request.shardId(), request.targetNode(), targetShardRouting.state());
throw new DelayRecoveryException("source node has the state of the target shard to be [" + targetShardRouting.state() + "], expecting to be [initializing]");
}
logger.trace("[{}][{}] starting recovery to {}, mark_as_relocated {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode(), request.markAsRelocated());
logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode());
final RecoverySourceHandler handler;
if (shard.indexSettings().isOnSharedFilesystem()) {
handler = new SharedFSRecoverySourceHandler(shard, request, recoverySettings, transportService, logger);
@ -134,8 +133,7 @@ public class RecoverySource extends AbstractComponent implements IndexEventListe
}
}
private static final class OngoingRecoveres {
private static final class OngoingRecoveries {
private final Map<IndexShard, Set<RecoverySourceHandler>> ongoingRecoveries = new HashMap<>();
synchronized void add(IndexShard shard, RecoverySourceHandler handler) {

View File

@ -393,9 +393,11 @@ public class RecoverySourceHandler {
}
});
if (request.markAsRelocated()) {
// TODO what happens if the recovery process fails afterwards, we need to mark this back to started
if (isPrimaryRelocation()) {
/**
* if the recovery process fails after setting the shard state to RELOCATED, both relocation source and
* target are failed (see {@link IndexShard#updateRoutingEntry}).
*/
try {
shard.relocated("to " + request.targetNode());
} catch (IllegalIndexShardStateException e) {
@ -406,7 +408,11 @@ public class RecoverySourceHandler {
}
stopWatch.stop();
logger.trace("[{}][{}] finalizing recovery to {}: took [{}]",
indexName, shardId, request.targetNode(), stopWatch.totalTime());
indexName, shardId, request.targetNode(), stopWatch.totalTime());
}
protected boolean isPrimaryRelocation() {
return request.recoveryType() == RecoveryState.Type.PRIMARY_RELOCATION;
}
/**

View File

@ -101,7 +101,7 @@ public class RecoveryState implements ToXContent, Streamable {
STORE((byte) 0),
SNAPSHOT((byte) 1),
REPLICA((byte) 2),
RELOCATION((byte) 3);
PRIMARY_RELOCATION((byte) 3);
private static final Type[] TYPES = new Type[Type.values().length];

View File

@ -138,7 +138,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
// create a new recovery status, and process...
final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout());
threadPool.generic().execute(new RecoveryRunner(recoveryId));
}
protected void retryRecovery(final RecoveryStatus recoveryStatus, final Throwable reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
@ -178,7 +177,7 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
return;
}
final StartRecoveryRequest request = new StartRecoveryRequest(recoveryStatus.shardId(), recoveryStatus.sourceNode(), clusterService.localNode(),
false, metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
metadataSnapshot, recoveryStatus.state().getType(), recoveryStatus.recoveryId());
final AtomicReference<RecoveryResponse> responseHolder = new AtomicReference<>();
try {
@ -267,7 +266,6 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source shard is closed", cause), false);
return;
}
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, e), true);
}
}

View File

@ -84,8 +84,4 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
return 0;
}
private boolean isPrimaryRelocation() {
return request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary();
}
}

View File

@ -41,8 +41,6 @@ public class StartRecoveryRequest extends TransportRequest {
private DiscoveryNode targetNode;
private boolean markAsRelocated;
private Store.MetadataSnapshot metadataSnapshot;
private RecoveryState.Type recoveryType;
@ -56,12 +54,11 @@ public class StartRecoveryRequest extends TransportRequest {
* @param sourceNode The node to recover from
* @param targetNode The node to recover to
*/
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, boolean markAsRelocated, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
public StartRecoveryRequest(ShardId shardId, DiscoveryNode sourceNode, DiscoveryNode targetNode, Store.MetadataSnapshot metadataSnapshot, RecoveryState.Type recoveryType, long recoveryId) {
this.recoveryId = recoveryId;
this.shardId = shardId;
this.sourceNode = sourceNode;
this.targetNode = targetNode;
this.markAsRelocated = markAsRelocated;
this.recoveryType = recoveryType;
this.metadataSnapshot = metadataSnapshot;
}
@ -82,10 +79,6 @@ public class StartRecoveryRequest extends TransportRequest {
return targetNode;
}
public boolean markAsRelocated() {
return markAsRelocated;
}
public RecoveryState.Type recoveryType() {
return recoveryType;
}
@ -101,7 +94,6 @@ public class StartRecoveryRequest extends TransportRequest {
shardId = ShardId.readShardId(in);
sourceNode = DiscoveryNode.readNode(in);
targetNode = DiscoveryNode.readNode(in);
markAsRelocated = in.readBoolean();
metadataSnapshot = new Store.MetadataSnapshot(in);
recoveryType = RecoveryState.Type.fromId(in.readByte());
@ -114,7 +106,6 @@ public class StartRecoveryRequest extends TransportRequest {
shardId.writeTo(out);
sourceNode.writeTo(out);
targetNode.writeTo(out);
out.writeBoolean(markAsRelocated);
metadataSnapshot.writeTo(out);
out.writeByte(recoveryType.id());
}

View File

@ -247,8 +247,8 @@ public class InternalSettingsPreparer {
}
if (secret) {
return new String(terminal.readSecret("Enter value for [%s]: ", key));
return new String(terminal.readSecret("Enter value for [" + key + "]: ", key));
}
return terminal.readText("Enter value for [%s]: ", key);
return terminal.readText("Enter value for [" + key + "]: ", key);
}
}

View File

@ -19,6 +19,18 @@
package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
@ -42,17 +54,6 @@ import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
@ -132,14 +133,10 @@ class InstallPluginCommand extends CliTool.Command {
// TODO: remove this leniency!! is it needed anymore?
if (Files.exists(env.pluginsFile()) == false) {
terminal.println("Plugins directory [%s] does not exist. Creating...", env.pluginsFile());
terminal.println("Plugins directory [" + env.pluginsFile() + "] does not exist. Creating...");
Files.createDirectory(env.pluginsFile());
}
if (Environment.isWritable(env.pluginsFile()) == false) {
throw new IOException("Plugins directory is read only: " + env.pluginsFile());
}
Path pluginZip = download(pluginId, env.tmpFile());
Path extractedZip = unzip(pluginZip, env.pluginsFile());
install(extractedZip, env);
@ -148,7 +145,7 @@ class InstallPluginCommand extends CliTool.Command {
}
/** Downloads the plugin and returns the file it was downloaded to. */
private Path download(String pluginId, Path tmpDir) throws IOException {
private Path download(String pluginId, Path tmpDir) throws Exception {
if (OFFICIAL_PLUGINS.contains(pluginId)) {
final String version = Version.CURRENT.toString();
final String url;
@ -189,7 +186,7 @@ class InstallPluginCommand extends CliTool.Command {
}
/** Downloads a zip from the url, as well as a SHA1 checksum, and checks the checksum. */
private Path downloadZipAndChecksum(String urlString, Path tmpDir) throws IOException {
private Path downloadZipAndChecksum(String urlString, Path tmpDir) throws Exception {
Path zip = downloadZip(urlString, tmpDir);
URL checksumUrl = new URL(urlString + ".sha1");
@ -198,14 +195,14 @@ class InstallPluginCommand extends CliTool.Command {
BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
expectedChecksum = checksumReader.readLine();
if (checksumReader.readLine() != null) {
throw new IllegalArgumentException("Invalid checksum file at " + urlString.toString());
throw new UserError(CliTool.ExitStatus.IO_ERROR, "Invalid checksum file at " + checksumUrl);
}
}
byte[] zipbytes = Files.readAllBytes(zip);
String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes));
if (expectedChecksum.equals(gotChecksum) == false) {
throw new IllegalStateException("SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum);
throw new UserError(CliTool.ExitStatus.IO_ERROR, "SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum);
}
return zip;
@ -245,12 +242,12 @@ class InstallPluginCommand extends CliTool.Command {
private PluginInfo verify(Path pluginRoot, Environment env) throws Exception {
// read and validate the plugin descriptor
PluginInfo info = PluginInfo.readFromProperties(pluginRoot);
terminal.println(VERBOSE, "%s", info);
terminal.println(VERBOSE, info.toString());
// don't let luser install plugin as a module...
// they might be unavoidably in maven central and are packaged up the same way)
if (MODULES.contains(info.getName())) {
throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
throw new UserError(CliTool.ExitStatus.USAGE, "plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
}
// check for jar hell before any copying
@ -306,7 +303,7 @@ class InstallPluginCommand extends CliTool.Command {
final Path destination = env.pluginsFile().resolve(info.getName());
if (Files.exists(destination)) {
throw new IOException("plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command");
throw new UserError(CliTool.ExitStatus.USAGE, "plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command");
}
Path tmpBinDir = tmpRoot.resolve("bin");
@ -337,9 +334,9 @@ class InstallPluginCommand extends CliTool.Command {
}
/** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */
private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws IOException {
private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws Exception {
if (Files.isDirectory(tmpBinDir) == false) {
throw new IOException("bin in plugin " + info.getName() + " is not a directory");
throw new UserError(CliTool.ExitStatus.IO_ERROR, "bin in plugin " + info.getName() + " is not a directory");
}
Files.createDirectory(destBinDir);
@ -357,7 +354,7 @@ class InstallPluginCommand extends CliTool.Command {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpBinDir)) {
for (Path srcFile : stream) {
if (Files.isDirectory(srcFile)) {
throw new IOException("Directories not allowed in bin dir for plugin " + info.getName());
throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in bin dir for plugin " + info.getName() + ", found " + srcFile.getFileName());
}
Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile));
@ -376,9 +373,9 @@ class InstallPluginCommand extends CliTool.Command {
* Copies the files from {@code tmpConfigDir} into {@code destConfigDir}.
* Any files existing in both the source and destination will be skipped.
*/
private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws IOException {
private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws Exception {
if (Files.isDirectory(tmpConfigDir) == false) {
throw new IOException("config in plugin " + info.getName() + " is not a directory");
throw new UserError(CliTool.ExitStatus.IO_ERROR, "config in plugin " + info.getName() + " is not a directory");
}
// create the plugin's config dir "if necessary"
@ -387,7 +384,7 @@ class InstallPluginCommand extends CliTool.Command {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpConfigDir)) {
for (Path srcFile : stream) {
if (Files.isDirectory(srcFile)) {
throw new IOException("Directories not allowed in config dir for plugin " + info.getName());
throw new UserError(CliTool.ExitStatus.DATA_ERROR, "Directories not allowed in config dir for plugin " + info.getName());
}
Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile));

View File

@ -55,7 +55,7 @@ public class PluginCli extends CliTool {
.cmds(LIST_CMD, INSTALL_CMD, REMOVE_CMD)
.build();
public static void main(String[] args) {
public static void main(String[] args) throws Exception {
// initialize default for es.logger.level because we will not read the logging.yml
String loggerLevel = System.getProperty("es.logger.level", "INFO");
// Set the appender for all potential log files to terminal so that other components that use the logger print out the

View File

@ -87,7 +87,7 @@ class PluginSecurity {
terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@");
// print all permissions:
for (Permission permission : requested) {
terminal.println(Verbosity.NORMAL, "* %s", formatPermission(permission));
terminal.println(Verbosity.NORMAL, "* " + formatPermission(permission));
}
terminal.println(Verbosity.NORMAL, "See http://docs.oracle.com/javase/8/docs/technotes/guides/security/permissions.html");
terminal.println(Verbosity.NORMAL, "for descriptions of what these permissions allow and the associated risks.");

View File

@ -19,19 +19,20 @@
package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
/**
@ -51,7 +52,7 @@ class RemovePluginCommand extends CliTool.Command {
Path pluginDir = env.pluginsFile().resolve(pluginName);
if (Files.exists(pluginDir) == false) {
throw new IllegalArgumentException("Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins.");
throw new UserError(CliTool.ExitStatus.USAGE, "Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins.");
}
List<Path> pluginPaths = new ArrayList<>();
@ -59,13 +60,13 @@ class RemovePluginCommand extends CliTool.Command {
Path pluginBinDir = env.binFile().resolve(pluginName);
if (Files.exists(pluginBinDir)) {
if (Files.isDirectory(pluginBinDir) == false) {
throw new IllegalStateException("Bin dir for " + pluginName + " is not a directory");
throw new UserError(CliTool.ExitStatus.IO_ERROR, "Bin dir for " + pluginName + " is not a directory");
}
pluginPaths.add(pluginBinDir);
terminal.println(VERBOSE, "Removing: %s", pluginBinDir);
terminal.println(VERBOSE, "Removing: " + pluginBinDir);
}
terminal.println(VERBOSE, "Removing: %s", pluginDir);
terminal.println(VERBOSE, "Removing: " + pluginDir);
Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
pluginPaths.add(tmpPluginDir);

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.function.Supplier;
/**
* Base class for delegating transport response to a transport channel
@ -30,7 +31,7 @@ import java.io.IOException;
public abstract class TransportChannelResponseHandler<T extends TransportResponse> implements TransportResponseHandler<T> {
/**
* Convenience method for delegating an empty response to the provided changed
* Convenience method for delegating an empty response to the provided transport channel
*/
public static TransportChannelResponseHandler<TransportResponse.Empty> emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) {
return new TransportChannelResponseHandler<TransportResponse.Empty>(logger, channel, extraInfoOnError) {
@ -41,6 +42,19 @@ public abstract class TransportChannelResponseHandler<T extends TransportRespons
};
}
/**
* Convenience method for delegating a response provided by supplier to the provided transport channel
*/
public static <T extends TransportResponse> TransportChannelResponseHandler responseHandler(ESLogger logger, Supplier<T> responseSupplier, TransportChannel channel, String extraInfoOnError) {
return new TransportChannelResponseHandler<T>(logger, channel, extraInfoOnError) {
@Override
public T newInstance() {
return responseSupplier.get();
}
};
}
private final ESLogger logger;
private final TransportChannel channel;
private final String extraInfoOnError;

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
@ -39,8 +40,8 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.ConcurrentMapLong;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
@ -56,6 +57,8 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import java.util.function.Supplier;
import static java.util.Collections.emptyList;
import static org.elasticsearch.common.settings.Setting.listSetting;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/**
@ -92,9 +95,10 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
// tracer log
public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), true, Setting.Scope.CLUSTER);
public static final Setting<List<String>> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Setting.Scope.CLUSTER);
public static final Setting<List<String>> TRACE_LOG_INCLUDE_SETTING = listSetting("transport.tracer.include", emptyList(),
Function.identity(), true, Scope.CLUSTER);
public static final Setting<List<String>> TRACE_LOG_EXCLUDE_SETTING = listSetting("transport.tracer.exclude",
Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Scope.CLUSTER);
private final ESLogger tracerLog;
@ -757,7 +761,8 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
final TransportServiceAdapter adapter;
final ThreadPool threadPool;
public DirectResponseChannel(ESLogger logger, DiscoveryNode localNode, String action, long requestId, TransportServiceAdapter adapter, ThreadPool threadPool) {
public DirectResponseChannel(ESLogger logger, DiscoveryNode localNode, String action, long requestId,
TransportServiceAdapter adapter, ThreadPool threadPool) {
this.logger = logger;
this.localNode = localNode;
this.action = action;

View File

@ -19,11 +19,15 @@
package org.elasticsearch.transport;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings;
import java.util.List;
import static java.util.Collections.emptyList;
import static org.elasticsearch.common.settings.Setting.groupSetting;
import static org.elasticsearch.common.settings.Setting.intSetting;
import static org.elasticsearch.common.settings.Setting.listSetting;
/**
* a collection of settings related to transport components, which are also needed in org.elasticsearch.bootstrap.Security
@ -31,13 +35,13 @@ import static java.util.Collections.emptyList;
*/
final public class TransportSettings {
public static final Setting<List<String>> HOST = Setting.listSetting("transport.host", emptyList(), s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> PUBLISH_HOST = Setting.listSetting("transport.publish_host", HOST, s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BIND_HOST = Setting.listSetting("transport.bind_host", HOST, s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> PUBLISH_PORT = Setting.intSetting("transport.publish_port", -1, -1, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> HOST = listSetting("transport.host", emptyList(), s -> s, false, Scope.CLUSTER);
public static final Setting<List<String>> PUBLISH_HOST = listSetting("transport.publish_host", HOST, s -> s, false, Scope.CLUSTER);
public static final Setting<List<String>> BIND_HOST = listSetting("transport.bind_host", HOST, s -> s, false, Scope.CLUSTER);
public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Scope.CLUSTER);
public static final Setting<Integer> PUBLISH_PORT = intSetting("transport.publish_port", -1, -1, false, Scope.CLUSTER);
public static final String DEFAULT_PROFILE = "default";
public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER);
public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = groupSetting("transport.profiles.", true, Scope.CLUSTER);
private TransportSettings() {

View File

@ -97,7 +97,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX);
this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory, threadPool.getThreadContext());
this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory,
threadPool.getThreadContext());
this.namedWriteableRegistry = namedWriteableRegistry;
}
@ -199,7 +200,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
}
@Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request,
TransportRequestOptions options) throws IOException, TransportException {
final Version version = Version.smallest(node.version(), this.version);
try (BytesStreamOutput stream = new BytesStreamOutput()) {
@ -237,7 +239,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
return this.workers;
}
protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version, @Nullable final Long sendRequestId) {
protected void messageReceived(byte[] data, String action, LocalTransport sourceTransport, Version version,
@Nullable final Long sendRequestId) {
Transports.assertTransportThread();
try {
transportServiceAdapter.received(data.length);
@ -278,7 +281,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
stream = new NamedWriteableAwareStreamInput(stream, namedWriteableRegistry);
final String action = stream.readString();
transportServiceAdapter.onRequestReceived(requestId, action);
final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action, requestId, version);
final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action,
requestId, version);
try {
final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action);
if (reg == null) {
@ -334,7 +338,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
try {
response.readFrom(buffer);
} catch (Throwable e) {
handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
handleException(handler, new TransportSerializationException(
"Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
return;
}
handleParsedResponse(response, handler);

View File

@ -46,7 +46,8 @@ public class LocalTransportChannel implements TransportChannel {
private final long requestId;
private final Version version;
public LocalTransportChannel(LocalTransport sourceTransport, TransportServiceAdapter sourceTransportServiceAdapter, LocalTransport targetTransport, String action, long requestId, Version version) {
public LocalTransportChannel(LocalTransport sourceTransport, TransportServiceAdapter sourceTransportServiceAdapter,
LocalTransport targetTransport, String action, long requestId, Version version) {
this.sourceTransport = sourceTransport;
this.sourceTransportServiceAdapter = sourceTransportServiceAdapter;
this.targetTransport = targetTransport;
@ -94,7 +95,8 @@ public class LocalTransportChannel implements TransportChannel {
public void sendResponse(Throwable error) throws IOException {
BytesStreamOutput stream = new BytesStreamOutput();
writeResponseExceptionHeader(stream);
RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(), targetTransport.boundAddress().boundAddresses()[0], action, error);
RemoteTransportException tx = new RemoteTransportException(targetTransport.nodeName(),
targetTransport.boundAddress().boundAddresses()[0], action, error);
stream.writeThrowable(tx);
final byte[] data = stream.bytes().toBytes();

View File

@ -116,7 +116,9 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
} catch (NotCompressedException ex) {
int maxToRead = Math.min(buffer.readableBytes(), 10);
int offset = buffer.readerIndex();
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(buffer.readableBytes()).append("] readable bytes with message size [").append(size).append("] ").append("] are [");
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead)
.append("] content bytes out of [").append(buffer.readableBytes())
.append("] readable bytes with message size [").append(size).append("] ").append("] are [");
for (int i = 0; i < maxToRead; i++) {
sb.append(buffer.getByte(offset + i)).append(",");
}
@ -134,15 +136,17 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
final int nextByte = streamIn.read();
// calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker
if (nextByte != -1) {
throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action ["
+ action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
throw new IllegalStateException("Message not fully read (request) for requestId [" + requestId + "], action [" + action
+ "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
}
if (buffer.readerIndex() < expectedIndexReader) {
throw new IllegalStateException("Message is fully read (request), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
throw new IllegalStateException("Message is fully read (request), yet there are "
+ (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
}
if (buffer.readerIndex() > expectedIndexReader) {
throw new IllegalStateException("Message read past expected size (request) for requestId [" + requestId + "], action ["
+ action + "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
throw new IllegalStateException(
"Message read past expected size (request) for requestId [" + requestId + "], action [" + action
+ "], readerIndex [" + buffer.readerIndex() + "] vs expected [" + expectedIndexReader + "]; resetting");
}
} else {
@ -163,11 +167,12 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
+ handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
}
if (buffer.readerIndex() < expectedIndexReader) {
throw new IllegalStateException("Message is fully read (response), yet there are " + (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
throw new IllegalStateException("Message is fully read (response), yet there are "
+ (expectedIndexReader - buffer.readerIndex()) + " remaining bytes; resetting");
}
if (buffer.readerIndex() > expectedIndexReader) {
throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId + "], handler ["
+ handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
throw new IllegalStateException("Message read past expected size (response) for requestId [" + requestId
+ "], handler [" + handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
}
}
@ -193,7 +198,8 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
try {
response.readFrom(buffer);
} catch (Throwable e) {
handleException(handler, new TransportSerializationException("Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
handleException(handler, new TransportSerializationException(
"Failed to deserialize response of type [" + response.getClass().getName() + "]", e));
return;
}
try {
@ -247,7 +253,8 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler {
buffer = new NamedWriteableAwareStreamInput(buffer, transport.namedWriteableRegistry);
final String action = buffer.readString();
transportServiceAdapter.onRequestReceived(requestId, action);
final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, profileName);
final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel,
requestId, version, profileName);
try {
final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action);
if (reg == null) {

View File

@ -42,6 +42,7 @@ import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkService.TcpSettings;
import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
@ -119,6 +120,10 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.settings.Setting.boolSetting;
import static org.elasticsearch.common.settings.Setting.byteSizeSetting;
import static org.elasticsearch.common.settings.Setting.intSetting;
import static org.elasticsearch.common.settings.Setting.timeSetting;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException;
@ -143,21 +148,33 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
public static final Setting<Integer> WORKER_COUNT = new Setting<>("transport.netty.worker_count",
(s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2), (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"),
false, Setting.Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY = Setting.intSetting("transport.connections_per_node.recovery", 2, 1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_BULK = Setting.intSetting("transport.connections_per_node.bulk", 3, 1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_REG = Setting.intSetting("transport.connections_per_node.reg", 6, 1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_STATE = Setting.intSetting("transport.connections_per_node.state", 1, 1, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = Setting.intSetting("transport.connections_per_node.ping", 1, 1, false, Setting.Scope.CLUSTER);
(s) -> Integer.toString(EsExecutors.boundedNumberOfProcessors(s) * 2),
(s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, false,
Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_BULK = intSetting("transport.connections_per_node.bulk", 3, 1, false,
Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_REG = intSetting("transport.connections_per_node.reg", 6, 1, false,
Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_STATE = intSetting("transport.connections_per_node.state", 1, 1, false,
Scope.CLUSTER);
public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = intSetting("transport.connections_per_node.ping", 1, 1, false,
Scope.CLUSTER);
// the scheduled internal ping interval setting, defaults to disabled (-1)
public static final Setting<TimeValue> PING_SCHEDULE = Setting.timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> PING_SCHEDULE = timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false,
Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT,
false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = timeSetting("transport.tcp.connect_timeout",
TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_NO_DELAY = boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false,
Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false,
Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER,
false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS,
false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
@ -165,9 +182,9 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER);
// See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting("transport.netty.receive_predictor_size",
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting(
"transport.netty.receive_predictor_size",
settings -> {
long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
@ -177,10 +194,11 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
return new ByteSizeValue(defaultReceiverPredictor).toString();
}, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MIN = Setting.byteSizeSetting("transport.netty.receive_predictor_min", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = Setting.byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> NETTY_BOSS_COUNT = Setting.intSetting("transport.netty.boss_count", 1, 1, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting("transport.netty.receive_predictor_min",
NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER);
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting("transport.netty.receive_predictor_max",
NETTY_RECEIVE_PREDICTOR_SIZE, false, Scope.CLUSTER);
public static final Setting<Integer> NETTY_BOSS_COUNT = intSetting("transport.netty.boss_count", 1, 1, false, Scope.CLUSTER);
protected final NetworkService networkService;
protected final Version version;
@ -226,7 +244,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
final ScheduledPing scheduledPing;
@Inject
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version, NamedWriteableRegistry namedWriteableRegistry) {
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version,
NamedWriteableRegistry namedWriteableRegistry) {
super(settings);
this.threadPool = threadPool;
this.networkService = networkService;
@ -252,7 +271,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
if (receivePredictorMax.bytes() == receivePredictorMin.bytes()) {
receiveBufferSizePredictorFactory = new FixedReceiveBufferSizePredictorFactory((int) receivePredictorMax.bytes());
} else {
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(),
(int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
}
this.scheduledPing = new ScheduledPing();
@ -305,7 +325,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
String name = entry.getKey();
if (!Strings.hasLength(name)) {
logger.info("transport profile configured without a name. skipping profile with settings [{}]", profileSettings.toDelimitedString(','));
logger.info("transport profile configured without a name. skipping profile with settings [{}]",
profileSettings.toDelimitedString(','));
continue;
} else if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
profileSettings = settingsBuilder()
@ -345,13 +366,16 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
private ClientBootstrap createClientBootstrap() {
if (blockingClient) {
clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX))));
clientBootstrap = new ClientBootstrap(new OioClientSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX))));
} else {
int bossCount = NETTY_BOSS_COUNT.get(settings);
clientBootstrap = new ClientBootstrap(new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)),
bossCount,
new NioWorkerPool(Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount),
clientBootstrap = new ClientBootstrap(
new NioClientSocketChannelFactory(
Executors.newCachedThreadPool(daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX)),
bossCount,
new NioWorkerPool(Executors.newCachedThreadPool(
daemonThreadFactory(settings, TRANSPORT_CLIENT_WORKER_THREAD_NAME_PREFIX)), workerCount),
new HashedWheelTimer(daemonThreadFactory(settings, "transport_client_timer"))));
}
clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory());
@ -403,12 +427,14 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings));
fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress);
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE.get(settings));
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size",
TCP_SEND_BUFFER_SIZE.get(settings));
if (fallbackTcpSendBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize);
}
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings));
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size",
TCP_RECEIVE_BUFFER_SIZE.get(settings));
if (fallbackTcpBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize);
}
@ -485,7 +511,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
return boundSocket.get();
}
private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings, List<InetSocketAddress> boundAddresses) {
private BoundTransportAddress createBoundTransportAddress(String name, Settings profileSettings,
List<InetSocketAddress> boundAddresses) {
String[] boundAddressesHostStrings = new String[boundAddresses.size()];
TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()];
for (int i = 0; i < boundAddresses.size(); i++) {
@ -531,7 +558,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
// TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address
// In case of a custom profile, we might use the publish address of the default profile
publishPort = boundAddresses.get(0).getPort();
logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort);
logger.warn("Publish port not found by matching publish address [{}] to bound addresses [{}], "
+ "falling back to port [{}] of first bound address", publishInetAddress, boundAddresses, publishPort);
}
final TransportAddress publishAddress = new InetSocketTransportAddress(new InetSocketAddress(publishInetAddress, publishPort));
@ -549,8 +577,13 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings);
ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings);
logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]",
name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax);
if (logger.isDebugEnabled()) {
logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], "
+ "connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]",
name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery,
connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin,
receivePredictorMax);
}
final ThreadFactory bossFactory = daemonThreadFactory(this.settings, HTTP_SERVER_BOSS_THREAD_NAME_PREFIX, name);
final ThreadFactory workerFactory = daemonThreadFactory(this.settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX, name);
@ -739,7 +772,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
return;
}
if (isCloseConnectionException(e.getCause())) {
logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel());
logger.trace("close connection exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(),
ctx.getChannel());
// close the channel, which will cause a node to be disconnected if relevant
ctx.getChannel().close();
disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
@ -754,7 +788,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
ctx.getChannel().close();
disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
} else if (e.getCause() instanceof CancelledKeyException) {
logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel());
logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(),
ctx.getChannel());
// close the channel as safe measure, which will cause a node to be disconnected if relevant
ctx.getChannel().close();
disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
@ -800,7 +835,8 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
@Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request,
TransportRequestOptions options) throws IOException, TransportException {
Channel targetChannel = nodeChannel(node, options);
@ -902,7 +938,9 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
if (light) {
nodeChannels = connectToChannelsLight(node);
} else {
nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk], new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState], new Channel[connectionsPerNodePing]);
nodeChannels = new NodeChannels(new Channel[connectionsPerNodeRecovery], new Channel[connectionsPerNodeBulk],
new Channel[connectionsPerNodeReg], new Channel[connectionsPerNodeState],
new Channel[connectionsPerNodePing]);
try {
connectToChannels(nodeChannels, node);
} catch (Throwable e) {

View File

@ -53,7 +53,8 @@ public class NettyTransportChannel implements TransportChannel {
private final long requestId;
private final String profileName;
public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel, long requestId, Version version, String profileName) {
public NettyTransportChannel(NettyTransport transport, TransportServiceAdapter transportServiceAdapter, String action, Channel channel,
long requestId, Version version, String profileName) {
this.transportServiceAdapter = transportServiceAdapter;
this.version = version;
this.transport = transport;
@ -119,7 +120,8 @@ public class NettyTransportChannel implements TransportChannel {
public void sendResponse(Throwable error) throws IOException {
BytesStreamOutput stream = new BytesStreamOutput();
stream.skip(NettyHeader.HEADER_SIZE);
RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()), action, error);
RemoteTransportException tx = new RemoteTransportException(transport.nodeName(), transport.wrapAddress(channel.getLocalAddress()),
action, error);
stream.writeThrowable(tx);
byte status = 0;
status = TransportStatus.setResponse(status);

View File

@ -80,8 +80,8 @@ public class SizeHeaderFrameDecoder extends FrameDecoder {
}
// safety against too large frames being sent
if (dataLen > NINETY_PER_HEAP_SIZE) {
throw new TooLongFrameException(
"transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded [" + new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]");
throw new TooLongFrameException("transport content length received [" + new ByteSizeValue(dataLen) + "] exceeded ["
+ new ByteSizeValue(NINETY_PER_HEAP_SIZE) + "]");
}
if (buffer.readableBytes() < dataLen + 6) {

View File

@ -20,6 +20,7 @@
package org.elasticsearch.tribe;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterChangedEvent;
@ -83,8 +84,10 @@ import static java.util.Collections.unmodifiableMap;
*/
public class TribeService extends AbstractLifecycleComponent<TribeService> {
public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE));
public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE));
public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false,
RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE));
public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false,
RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE));
public static Settings processSettings(Settings settings) {
if (TRIBE_NAME_SETTING.exists(settings)) {
@ -106,7 +109,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
Settings.Builder sb = Settings.builder().put(settings);
sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); // this node should just act as a node client
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery
sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0); // nothing is going to be discovered, since no master will be elected
// nothing is going to be discovered, since no master will be elected
sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
if (sb.get("cluster.name") == null) {
sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM
}
@ -114,7 +118,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
return sb.build();
}
private static final Setting<String> TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER); // internal settings only
// internal settings only
private static final Setting<String> TRIBE_NAME_SETTING = Setting.simpleString("tribe.name", false, Setting.Scope.CLUSTER);
private final ClusterService clusterService;
private final String[] blockIndicesWrite;
private final String[] blockIndicesRead;
@ -125,14 +130,20 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (ON_CONFLICT_ANY.equals(s) || ON_CONFLICT_DROP.equals(s) || s.startsWith(ON_CONFLICT_PREFER)) {
return s;
}
throw new IllegalArgumentException("Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: " +s);
throw new IllegalArgumentException(
"Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: " + s);
}, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices", Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> BLOCKS_METADATA_SETTING = Setting.boolSetting("tribe.blocks.metadata", false, false,
Setting.Scope.CLUSTER);
public static final Setting<Boolean> BLOCKS_WRITE_SETTING = Setting.boolSetting("tribe.blocks.write", false, false,
Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_WRITE_INDICES_SETTING = Setting.listSetting("tribe.blocks.write.indices",
Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_READ_INDICES_SETTING = Setting.listSetting("tribe.blocks.read.indices",
Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BLOCKS_METADATA_INDICES_SETTING = Setting.listSetting("tribe.blocks.metadata.indices",
Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
private final String onConflict;
private final Set<String> droppedIndices = ConcurrentCollections.newConcurrentSet();
@ -304,7 +315,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
tribeAttr.put(attr.key, attr.value);
}
tribeAttr.put(TRIBE_NAME_SETTING.getKey(), tribeName);
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version());
DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(),
tribe.address(), unmodifiableMap(tribeAttr), tribe.version());
clusterStateChanged = true;
logger.info("[{}] adding node [{}]", tribeName, discoNode);
nodes.put(discoNode);
@ -328,7 +340,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
// always make sure to update the metadata and routing table, in case
// there are changes in them (new mapping, shards moving from initializing to started)
routingTable.add(tribeState.routingTable().index(index.getIndex()));
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings())
.put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
}
}
@ -357,7 +370,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
} else if (ON_CONFLICT_DROP.equals(onConflict)) {
// drop the indices, there is a conflict
clusterStateChanged = true;
logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(),
existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex);
droppedIndices.add(tribeIndex.getIndex().getName());
} else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
@ -366,7 +380,8 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (tribeName.equals(preferredTribeName)) {
// the new one is hte preferred one, replace...
clusterStateChanged = true;
logger.info("[{}] adding index {}, preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
logger.info("[{}] adding index {}, preferred over [{}]", tribeName, tribeIndex.getIndex(),
existingFromTribe);
removeIndex(blocks, metaData, routingTable, tribeIndex);
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
} // else: either the existing one is the preferred one, or we haven't seen one, carry on
@ -378,17 +393,20 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
if (!clusterStateChanged) {
return currentState;
} else {
return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build();
return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData)
.routingTable(routingTable.build()).build();
}
}
private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) {
private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable,
IndexMetaData index) {
metaData.remove(index.getIndex().getName());
routingTable.remove(index.getIndex().getName());
blocks.removeIndexBlocks(index.getIndex().getName());
}
private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData,
RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));

View File

@ -83,7 +83,8 @@ public class ResourceWatcherService extends AbstractLifecycleComponent<ResourceW
TimeValue interval = settings.getAsTime("resource.reload.interval.low", Frequency.LOW.interval);
lowMonitor = new ResourceMonitor(interval, Frequency.LOW);
interval = settings.getAsTime("resource.reload.interval.medium", settings.getAsTime("resource.reload.interval", Frequency.MEDIUM.interval));
interval = settings.getAsTime("resource.reload.interval.medium",
settings.getAsTime("resource.reload.interval", Frequency.MEDIUM.interval));
mediumMonitor = new ResourceMonitor(interval, Frequency.MEDIUM);
interval = settings.getAsTime("resource.reload.interval.high", Frequency.HIGH.interval);
highMonitor = new ResourceMonitor(interval, Frequency.HIGH);

View File

@ -56,12 +56,12 @@ public class ClusterStateCreationUtils {
/**
* Creates cluster state with and index that has one shard and #(replicaStates) replicas
*
* @param index name of the index
* @param primaryLocal if primary should coincide with the local node in the cluster state
* @param primaryState state of primary
* @param replicaStates states of the replicas. length of this array determines also the number of replicas
* @param index name of the index
* @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param primaryState state of primary
* @param replicaStates states of the replicas. length of this array determines also the number of replicas
*/
public static ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) {
public static ClusterState state(String index, boolean activePrimaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) {
final int numberOfReplicas = replicaStates.length;
int numberOfNodes = numberOfReplicas + 1;
@ -97,7 +97,7 @@ public class ClusterStateCreationUtils {
String relocatingNode = null;
UnassignedInfo unassignedInfo = null;
if (primaryState != ShardRoutingState.UNASSIGNED) {
if (primaryLocal) {
if (activePrimaryLocal) {
primaryNode = newNode(0).id();
unassignedNodes.remove(primaryNode);
} else {
@ -173,13 +173,13 @@ public class ClusterStateCreationUtils {
* Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas.
* Primary will be STARTED in cluster state but replicas will be one of UNASSIGNED, INITIALIZING, STARTED or RELOCATING.
*
* @param index name of the index
* @param primaryLocal if primary should coincide with the local node in the cluster state
* @param numberOfReplicas number of replicas
* @param index name of the index
* @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param numberOfReplicas number of replicas
*/
public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) {
public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int numberOfReplicas) {
int assignedReplicas = randomIntBetween(0, numberOfReplicas);
return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas);
return stateWithActivePrimary(index, activePrimaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas);
}
/**
@ -188,11 +188,11 @@ public class ClusterStateCreationUtils {
* some (assignedReplicas) will be one of INITIALIZING, STARTED or RELOCATING.
*
* @param index name of the index
* @param primaryLocal if primary should coincide with the local node in the cluster state
* @param activePrimaryLocal if active primary should coincide with the local node in the cluster state
* @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state
* @param unassignedReplicas number of replicas that should be unassigned
*/
public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) {
public static ClusterState stateWithActivePrimary(String index, boolean activePrimaryLocal, int assignedReplicas, int unassignedReplicas) {
ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas];
// no point in randomizing - node assignment later on does it too.
for (int i = 0; i < assignedReplicas; i++) {
@ -201,7 +201,7 @@ public class ClusterStateCreationUtils {
for (int i = assignedReplicas; i < replicaStates.length; i++) {
replicaStates[i] = ShardRoutingState.UNASSIGNED;
}
return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates);
return state(index, activePrimaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates);
}
/**

View File

@ -37,10 +37,13 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -52,6 +55,7 @@ import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.cluster.TestClusterService;
import org.elasticsearch.test.transport.CapturingTransport;
@ -66,6 +70,7 @@ import org.junit.BeforeClass;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -75,9 +80,10 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.Matchers.arrayWithSize;
import static org.hamcrest.Matchers.empty;
@ -203,6 +209,56 @@ public class TransportReplicationActionTests extends ESTestCase {
assertIndexShardCounter(1);
}
/**
* When relocating a primary shard, there is a cluster state update at the end of relocation where the active primary is switched from
* the relocation source to the relocation target. If relocation source receives and processes this cluster state
* before the relocation target, there is a time span where relocation source believes active primary to be on
* relocation target and relocation target believes active primary to be on relocation source. This results in replication
* requests being sent back and forth.
*
* This test checks that replication request is not routed back from relocation target to relocation source in case of
* stale index routing table on relocation target.
*/
public void testNoRerouteOnStaleClusterState() throws InterruptedException, ExecutionException {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = state(index, true, ShardRoutingState.RELOCATING);
String relocationTargetNode = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId();
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(relocationTargetNode)).build();
clusterService.setState(state);
logger.debug("--> relocation ongoing state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("1ms").routedBasedOnClusterVersion(clusterService.state().version() + 1);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase.run();
assertListenerThrows("cluster state too old didn't cause a timeout", listener, UnavailableShardsException.class);
request = new Request(shardId).routedBasedOnClusterVersion(clusterService.state().version() + 1);
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(null, request, listener);
reroutePhase.run();
assertFalse("cluster state too old didn't cause a retry", listener.isDone());
// finish relocation
ShardRouting relocationTarget = clusterService.state().getRoutingTable().shardRoutingTable(shardId).shardsWithState(ShardRoutingState.INITIALIZING).get(0);
AllocationService allocationService = ESAllocationTestCase.createAllocationService();
RoutingAllocation.Result result = allocationService.applyStartedShards(state, Arrays.asList(relocationTarget));
ClusterState updatedState = ClusterState.builder(clusterService.state()).routingResult(result).build();
clusterService.setState(updatedState);
logger.debug("--> relocation complete state:\n{}", clusterService.state().prettyPrint());
IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();
final List<CapturingTransport.CapturedRequest> capturedRequests =
transport.getCapturedRequestsByTargetNodeAndClear().get(primaryNodeId);
assertThat(capturedRequests, notNullValue());
assertThat(capturedRequests.size(), equalTo(1));
assertThat(capturedRequests.get(0).action, equalTo("testAction[p]"));
assertIndexShardCounter(1);
}
public void testUnknownIndexOrShardOnReroute() throws InterruptedException {
final String index = "test";
// no replicas in oder to skip the replication part
@ -225,7 +281,7 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
clusterService.setState(stateWithStartedPrimary(index, randomBoolean(), 3));
clusterService.setState(stateWithActivePrimary(index, randomBoolean(), 3));
logger.debug("using state: \n{}", clusterService.state().prettyPrint());
@ -249,33 +305,73 @@ public class TransportReplicationActionTests extends ESTestCase {
assertIndexShardUninitialized();
}
public void testPrimaryPhaseExecutesRequest() throws InterruptedException, ExecutionException {
public void testPrimaryPhaseExecutesOrDelegatesRequestToRelocationTarget() throws InterruptedException, ExecutionException {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
clusterService.setState(state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED));
ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
clusterService.setState(state);
Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
AtomicBoolean movedToReplication = new AtomicBoolean();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) {
@Override
void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) {
super.finishAndMoveToReplication(replicationPhase);
movedToReplication.set(true);
}
};
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
boolean executeOnPrimary = true;
if (primaryShard.relocating() && randomBoolean()) { // whether shard has been marked as relocated already (i.e. relocation completed)
isRelocated.set(true);
indexShardRouting.set(primaryShard);
executeOnPrimary = false;
}
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
final String replicaNodeId = clusterService.state().getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0).currentNodeId();
final List<CapturingTransport.CapturedRequest> requests = transport.getCapturedRequestsByTargetNodeAndClear().get(replicaNodeId);
assertThat(requests, notNullValue());
assertThat(requests.size(), equalTo(1));
assertThat("replica request was not sent", requests.get(0).action, equalTo("testAction[r]"));
assertThat(request.processedOnPrimary.get(), equalTo(executeOnPrimary));
assertThat(movedToReplication.get(), equalTo(executeOnPrimary));
if (executeOnPrimary == false) {
final List<CapturingTransport.CapturedRequest> requests = transport.capturedRequestsByTargetNode().get(primaryShard.relocatingNodeId());
assertThat(requests, notNullValue());
assertThat(requests.size(), equalTo(1));
assertThat("primary request was not delegated to relocation target", requests.get(0).action, equalTo("testAction[p]"));
}
}
public void testPrimaryPhaseExecutesDelegatedRequestOnRelocationTarget() throws InterruptedException, ExecutionException {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = state(index, true, ShardRoutingState.RELOCATING);
String primaryTargetNodeId = state.getRoutingTable().shardRoutingTable(shardId).primaryShard().relocatingNodeId();
// simulate execution of the primary phase on the relocation target node
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryTargetNodeId)).build();
clusterService.setState(state);
Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
AtomicBoolean movedToReplication = new AtomicBoolean();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener)) {
@Override
void finishAndMoveToReplication(TransportReplicationAction.ReplicationPhase replicationPhase) {
super.finishAndMoveToReplication(replicationPhase);
movedToReplication.set(true);
}
};
primaryPhase.run();
assertThat("request was not processed on primary relocation target", request.processedOnPrimary.get(), equalTo(true));
assertThat(movedToReplication.get(), equalTo(true));
}
public void testAddedReplicaAfterPrimaryOperation() {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
// start with no replicas
clusterService.setState(stateWithStartedPrimary(index, true, 0));
clusterService.setState(stateWithActivePrimary(index, true, 0));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
final ClusterState stateWithAddedReplicas = state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED);
final Action actionWithAddedReplicaAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// add replicas after primary operation
((TestClusterService) clusterService).setState(stateWithAddedReplicas);
@ -302,13 +398,13 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
// start with a replica
clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED));
clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
final ClusterState stateWithRelocatingReplica = state(index, true, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING);
final Action actionWithRelocatingReplicasAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// set replica to relocating
((TestClusterService) clusterService).setState(stateWithRelocatingReplica);
@ -341,7 +437,7 @@ public class TransportReplicationActionTests extends ESTestCase {
final Action actionWithDeletedIndexAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// delete index after primary op
((TestClusterService) clusterService).setState(stateWithDeletedIndex);
@ -432,7 +528,13 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5)));
ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the replication phase on the relocation target node after relocation source was marked as relocated
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
}
clusterService.setState(state);
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
int assignedReplicas = 0;
@ -455,12 +557,19 @@ public class TransportReplicationActionTests extends ESTestCase {
final String index = "test";
final ShardId shardId = new ShardId(index, "_na_", 0);
ClusterState state = stateWithStartedPrimary(index, true, randomInt(5));
ClusterState state = stateWithActivePrimary(index, true, randomInt(5));
MetaData.Builder metaData = MetaData.builder(state.metaData());
Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings());
settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true);
metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings));
clusterService.setState(ClusterState.builder(state).metaData(metaData));
state = ClusterState.builder(state).metaData(metaData).build();
ShardRouting primaryShard = state.getRoutingTable().shardRoutingTable(shardId).primaryShard();
if (primaryShard.relocating() && randomBoolean()) {
// simulate execution of the primary phase on the relocation target node
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(primaryShard.relocatingNodeId())).build();
}
clusterService.setState(state);
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
int assignedReplicas = 0;
@ -507,8 +616,9 @@ public class TransportReplicationActionTests extends ESTestCase {
assertEquals(request.shardId, replicationRequest.shardId);
}
String localNodeId = clusterService.state().getNodes().localNodeId();
// no request was sent to the local node
assertThat(nodesSentTo.keySet(), not(hasItem(clusterService.state().getNodes().localNodeId())));
assertThat(nodesSentTo.keySet(), not(hasItem(localNodeId)));
// requests were sent to the correct shard copies
for (ShardRouting shard : clusterService.state().getRoutingTable().shardRoutingTable(shardId)) {
@ -518,11 +628,11 @@ public class TransportReplicationActionTests extends ESTestCase {
if (shard.unassigned()) {
continue;
}
if (shard.primary() == false) {
nodesSentTo.remove(shard.currentNodeId());
if (localNodeId.equals(shard.currentNodeId()) == false) {
assertThat(nodesSentTo.remove(shard.currentNodeId()), notNullValue());
}
if (shard.relocating()) {
nodesSentTo.remove(shard.relocatingNodeId());
if (shard.relocating() && localNodeId.equals(shard.relocatingNodeId()) == false) { // for relocating primaries, we replicate from target to source if source is marked as relocated
assertThat(nodesSentTo.remove(shard.relocatingNodeId()), notNullValue());
}
}
@ -629,6 +739,7 @@ public class TransportReplicationActionTests extends ESTestCase {
// shard operation should be ongoing, so the counter is at 2
// we have to wait here because increment happens in thread
assertBusy(() -> assertIndexShardCounter(2));
assertThat(transport.capturedRequests().length, equalTo(0));
((ActionWithDelay) action).countDownLatch.countDown();
t.join();
@ -726,12 +837,28 @@ public class TransportReplicationActionTests extends ESTestCase {
private final AtomicInteger count = new AtomicInteger(0);
private final AtomicBoolean isRelocated = new AtomicBoolean(false);
private final AtomicReference<ShardRouting> indexShardRouting = new AtomicReference<>();
/*
* Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run.
* */
private synchronized Releasable getOrCreateIndexShardOperationsCounter() {
private synchronized TransportReplicationAction.IndexShardReference getOrCreateIndexShardOperationsCounter() {
count.incrementAndGet();
return new Releasable() {
return new TransportReplicationAction.IndexShardReference() {
@Override
public boolean isRelocated() {
return isRelocated.get();
}
@Override
public ShardRouting routingEntry() {
ShardRouting shardRouting = indexShardRouting.get();
assert shardRouting != null;
return shardRouting;
}
@Override
public void close() {
count.decrementAndGet();
@ -783,7 +910,7 @@ public class TransportReplicationActionTests extends ESTestCase {
}
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true);
assert executedBefore == false : "request has already been executed on the primary";
return new Tuple<>(new Response(), shardRequest);
@ -805,7 +932,11 @@ public class TransportReplicationActionTests extends ESTestCase {
}
@Override
protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
protected IndexShardReference getIndexShardReferenceOnPrimary(ShardId shardId) {
return getOrCreateIndexShardOperationsCounter();
}
protected IndexShardReference getIndexShardReferenceOnReplica(ShardId shardId) {
return getOrCreateIndexShardOperationsCounter();
}
}
@ -832,7 +963,7 @@ public class TransportReplicationActionTests extends ESTestCase {
}
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) {
return throwException(shardRequest.shardId());
}
@ -870,7 +1001,7 @@ public class TransportReplicationActionTests extends ESTestCase {
}
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Exception {
awaitLatch();
return new Tuple<>(new Response(), shardRequest);
}

View File

@ -279,6 +279,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
}
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/16373")
public void testOldIndexes() throws Exception {
setupCluster();

View File

@ -20,6 +20,7 @@
package org.elasticsearch.cluster.action.shard;
import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
@ -33,7 +34,6 @@ import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.cluster.TestClusterService;
import org.elasticsearch.test.transport.CapturingTransport;
@ -55,7 +55,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.LongConsumer;
import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.is;
@ -127,7 +126,7 @@ public class ShardStateActionTests extends ESTestCase {
public void testSuccess() throws InterruptedException {
final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5)));
clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
@ -169,7 +168,7 @@ public class ShardStateActionTests extends ESTestCase {
public void testNoMaster() throws InterruptedException {
final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5)));
clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
noMasterBuilder.masterNodeId(null);
@ -207,7 +206,7 @@ public class ShardStateActionTests extends ESTestCase {
public void testMasterChannelException() throws InterruptedException {
final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5)));
clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
@ -264,7 +263,7 @@ public class ShardStateActionTests extends ESTestCase {
public void testUnhandledFailure() {
final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5)));
clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();
@ -294,7 +293,7 @@ public class ShardStateActionTests extends ESTestCase {
public void testShardNotFound() throws InterruptedException {
final String index = "test";
clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5)));
clusterService.setState(ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5)));
String indexUUID = clusterService.state().metaData().index(index).getIndexUUID();

View File

@ -19,9 +19,6 @@
package org.elasticsearch.common.cli;
import java.nio.file.NoSuchFileException;
import java.util.List;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.hasSize;
@ -46,22 +43,9 @@ public class TerminalTests extends CliToolTestCase {
assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
}
public void testError() throws Exception {
try {
// actually throw so we have a stacktrace
throw new NoSuchFileException("/path/to/some/file");
} catch (NoSuchFileException e) {
CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL);
terminal.printError(e);
List<String> output = terminal.getTerminalOutput();
assertFalse(output.isEmpty());
assertTrue(output.get(0), output.get(0).contains("NoSuchFileException")); // exception class
assertTrue(output.get(0), output.get(0).contains("/path/to/some/file")); // message
assertEquals(1, output.size());
// TODO: we should test stack trace is printed in debug mode...except debug is a sysprop instead of
// a command line param...maybe it should be VERBOSE instead of a separate debug prop?
}
public void testEscaping() throws Exception {
CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL);
assertPrinted(terminal, Terminal.Verbosity.NORMAL, "This message contains percent like %20n");
}
private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) {

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
public class SuspendableRefContainerTests extends ESTestCase {
public void testBasicAcquire() throws InterruptedException {
SuspendableRefContainer refContainer = new SuspendableRefContainer();
assertThat(refContainer.activeRefs(), equalTo(0));
Releasable lock1 = randomLockingMethod(refContainer);
assertThat(refContainer.activeRefs(), equalTo(1));
Releasable lock2 = randomLockingMethod(refContainer);
assertThat(refContainer.activeRefs(), equalTo(2));
lock1.close();
assertThat(refContainer.activeRefs(), equalTo(1));
lock1.close(); // check idempotence
assertThat(refContainer.activeRefs(), equalTo(1));
lock2.close();
assertThat(refContainer.activeRefs(), equalTo(0));
}
public void testAcquisitionBlockingBlocksNewAcquisitions() throws InterruptedException {
SuspendableRefContainer refContainer = new SuspendableRefContainer();
assertThat(refContainer.activeRefs(), equalTo(0));
try (Releasable block = refContainer.blockAcquisition()) {
assertThat(refContainer.activeRefs(), equalTo(0));
assertThat(refContainer.tryAcquire(), nullValue());
assertThat(refContainer.activeRefs(), equalTo(0));
}
try (Releasable lock = refContainer.tryAcquire()) {
assertThat(refContainer.activeRefs(), equalTo(1));
}
// same with blocking acquire
AtomicBoolean acquired = new AtomicBoolean();
Thread t = new Thread(() -> {
try (Releasable lock = randomBoolean() ? refContainer.acquire() : refContainer.acquireUninterruptibly()) {
acquired.set(true);
assertThat(refContainer.activeRefs(), equalTo(1));
} catch (InterruptedException e) {
fail("Interrupted");
}
});
try (Releasable block = refContainer.blockAcquisition()) {
assertThat(refContainer.activeRefs(), equalTo(0));
t.start();
// check that blocking acquire really blocks
assertThat(acquired.get(), equalTo(false));
assertThat(refContainer.activeRefs(), equalTo(0));
}
t.join();
assertThat(acquired.get(), equalTo(true));
assertThat(refContainer.activeRefs(), equalTo(0));
}
public void testAcquisitionBlockingWaitsOnExistingAcquisitions() throws InterruptedException {
SuspendableRefContainer refContainer = new SuspendableRefContainer();
AtomicBoolean acquired = new AtomicBoolean();
Thread t = new Thread(() -> {
try (Releasable block = refContainer.blockAcquisition()) {
acquired.set(true);
assertThat(refContainer.activeRefs(), equalTo(0));
}
});
try (Releasable lock = randomLockingMethod(refContainer)) {
assertThat(refContainer.activeRefs(), equalTo(1));
t.start();
assertThat(acquired.get(), equalTo(false));
assertThat(refContainer.activeRefs(), equalTo(1));
}
t.join();
assertThat(acquired.get(), equalTo(true));
assertThat(refContainer.activeRefs(), equalTo(0));
}
private Releasable randomLockingMethod(SuspendableRefContainer refContainer) throws InterruptedException {
switch (randomInt(2)) {
case 0: return refContainer.tryAcquire();
case 1: return refContainer.acquire();
case 2: return refContainer.acquireUninterruptibly();
}
throw new IllegalArgumentException("randomLockingMethod inconsistent");
}
}

View File

@ -80,20 +80,6 @@ import static org.hamcrest.Matchers.sameInstance;
@ESIntegTestCase.SuppressLocalMode
@TestLogging("_root:DEBUG")
public class ZenDiscoveryIT extends ESIntegTestCase {
public void testChangeRejoinOnMasterOptionIsDynamic() throws Exception {
Settings nodeSettings = Settings.settingsBuilder()
.put("discovery.type", "zen") // <-- To override the local setting if set externally
.build();
String nodeName = internalCluster().startNode(nodeSettings);
ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName);
assertThat(zenDiscovery.isRejoinOnMasterGone(), is(true));
client().admin().cluster().prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING.getKey(), false))
.get();
assertThat(zenDiscovery.isRejoinOnMasterGone(), is(false));
}
public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception {
Settings defaultSettings = Settings.builder()

View File

@ -57,6 +57,8 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
@ -108,6 +110,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
@ -125,6 +128,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
/**
* Simple unit-test IndexShard related operations.
@ -316,36 +320,41 @@ public class IndexShardTests extends ESSingleNodeTestCase {
}
public void testDeleteIndexDecreasesCounter() throws InterruptedException, ExecutionException, IOException {
public void testDeleteIndexPreventsNewOperations() throws InterruptedException, ExecutionException, IOException {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test");
IndexShard indexShard = indexService.getShardOrNull(0);
client().admin().indices().prepareDelete("test").get();
assertThat(indexShard.getOperationsCount(), equalTo(0));
assertThat(indexShard.getActiveOperationsCount(), equalTo(0));
try {
indexShard.incrementOperationCounter();
indexShard.acquirePrimaryOperationLock();
fail("we should not be able to increment anymore");
} catch (IndexShardClosedException e) {
// expected
}
try {
indexShard.acquireReplicaOperationLock();
fail("we should not be able to increment anymore");
} catch (IndexShardClosedException e) {
// expected
}
}
public void testIndexShardCounter() throws InterruptedException, ExecutionException, IOException {
public void testIndexOperationsCounter() throws InterruptedException, ExecutionException, IOException {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get());
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test");
IndexShard indexShard = indexService.getShardOrNull(0);
assertEquals(0, indexShard.getOperationsCount());
indexShard.incrementOperationCounter();
assertEquals(1, indexShard.getOperationsCount());
indexShard.incrementOperationCounter();
assertEquals(2, indexShard.getOperationsCount());
indexShard.decrementOperationCounter();
indexShard.decrementOperationCounter();
assertEquals(0, indexShard.getOperationsCount());
assertEquals(0, indexShard.getActiveOperationsCount());
Releasable operation1 = indexShard.acquirePrimaryOperationLock();
assertEquals(1, indexShard.getActiveOperationsCount());
Releasable operation2 = indexShard.acquirePrimaryOperationLock();
assertEquals(2, indexShard.getActiveOperationsCount());
Releasables.close(operation1, operation2);
assertEquals(0, indexShard.getActiveOperationsCount());
}
public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
@ -777,6 +786,89 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertEquals(total + 1, shard.flushStats().getTotal());
}
public void testLockingBeforeAndAfterRelocated() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(
Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)
).get());
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test");
final IndexShard shard = test.getShardOrNull(0);
CountDownLatch latch = new CountDownLatch(1);
Thread recoveryThread = new Thread(() -> {
latch.countDown();
shard.relocated("simulated recovery");
});
try (Releasable ignored = shard.acquirePrimaryOperationLock()) {
// start finalization of recovery
recoveryThread.start();
latch.await();
// recovery can only be finalized after we release the current primaryOperationLock
assertThat(shard.state(), equalTo(IndexShardState.STARTED));
}
// recovery can be now finalized
recoveryThread.join();
assertThat(shard.state(), equalTo(IndexShardState.RELOCATED));
try (Releasable ignored = shard.acquirePrimaryOperationLock()) {
// lock can again be acquired
assertThat(shard.state(), equalTo(IndexShardState.RELOCATED));
}
}
public void testStressRelocated() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test").setSettings(
Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)
).get());
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test");
final IndexShard shard = test.getShardOrNull(0);
final int numThreads = randomIntBetween(2, 4);
Thread[] indexThreads = new Thread[numThreads];
CountDownLatch allPrimaryOperationLocksAcquired = new CountDownLatch(numThreads);
CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
for (int i = 0; i < indexThreads.length; i++) {
indexThreads[i] = new Thread() {
@Override
public void run() {
try (Releasable operationLock = shard.acquirePrimaryOperationLock()) {
allPrimaryOperationLocksAcquired.countDown();
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new RuntimeException(e);
}
}
};
indexThreads[i].start();
}
AtomicBoolean relocated = new AtomicBoolean();
final Thread recoveryThread = new Thread(() -> {
shard.relocated("simulated recovery");
relocated.set(true);
});
// ensure we wait for all primary operation locks to be acquired
allPrimaryOperationLocksAcquired.await();
// start recovery thread
recoveryThread.start();
assertThat(relocated.get(), equalTo(false));
assertThat(shard.getActiveOperationsCount(), greaterThan(0));
// ensure we only transition to RELOCATED state after pending operations completed
assertThat(shard.state(), equalTo(IndexShardState.STARTED));
// complete pending operations
barrier.await();
// complete recovery/relocation
recoveryThread.join();
// ensure relocated successfully once pending operations are done
assertThat(relocated.get(), equalTo(true));
assertThat(shard.state(), equalTo(IndexShardState.RELOCATED));
assertThat(shard.getActiveOperationsCount(), equalTo(0));
for (Thread indexThread : indexThreads) {
indexThread.join();
}
}
public void testRecoverFromStore() throws IOException {
createIndex("test");
ensureGreen();
@ -857,6 +949,27 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertHitCount(client().prepareSearch().get(), 1);
}
public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedException {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test");
final IndexShard shard = test.getShardOrNull(0);
ShardRouting origRouting = shard.routingEntry();
assertThat(shard.state(), equalTo(IndexShardState.STARTED));
ShardRouting inRecoveryRouting = new ShardRouting(origRouting);
ShardRoutingHelper.relocate(inRecoveryRouting, "some_node");
shard.updateRoutingEntry(inRecoveryRouting, true);
shard.relocated("simulate mark as relocated");
assertThat(shard.state(), equalTo(IndexShardState.RELOCATED));
ShardRouting failedRecoveryRouting = new ShardRouting(origRouting);
try {
shard.updateRoutingEntry(failedRecoveryRouting, true);
fail("Expected IndexShardRelocatedException");
} catch (IndexShardRelocatedException expected) {
}
}
public void testRestoreShard() throws IOException {
createIndex("test");
createIndex("test_target");

View File

@ -58,6 +58,7 @@ import static org.elasticsearch.index.shard.IndexShardState.CLOSED;
import static org.elasticsearch.index.shard.IndexShardState.CREATED;
import static org.elasticsearch.index.shard.IndexShardState.POST_RECOVERY;
import static org.elasticsearch.index.shard.IndexShardState.RECOVERING;
import static org.elasticsearch.index.shard.IndexShardState.RELOCATED;
import static org.elasticsearch.index.shard.IndexShardState.STARTED;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.CoreMatchers.equalTo;
@ -181,7 +182,7 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase {
ensureGreen();
//the 3 relocated shards get closed on the first node
assertShardStatesMatch(stateChangeListenerNode1, 3, CLOSED);
assertShardStatesMatch(stateChangeListenerNode1, 3, RELOCATED, CLOSED);
//the 3 relocated shards get created on the second node
assertShardStatesMatch(stateChangeListenerNode2, 3, CREATED, RECOVERING, POST_RECOVERY, STARTED);

View File

@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard;
@ -110,8 +111,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId();
shard.incrementOperationCounter();
try {
try (Releasable operationLock = shard.acquirePrimaryOperationLock()) {
SyncedFlushUtil.LatchedListener<ShardsSyncedFlushResult> listener = new SyncedFlushUtil.LatchedListener<>();
flushService.attemptSyncedFlush(shardId, listener);
listener.latch.await();
@ -121,8 +121,6 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
assertEquals(0, syncedFlushResult.successfulShards());
assertNotEquals(0, syncedFlushResult.totalShards());
assertEquals("[1] ongoing operations on primary", syncedFlushResult.failureReason());
} finally {
shard.decrementOperationCounter();
}
}

View File

@ -0,0 +1,89 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.recovery;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
import java.util.concurrent.atomic.AtomicBoolean;
import static org.hamcrest.Matchers.equalTo;
@TestLogging("_root:DEBUG")
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
public class IndexPrimaryRelocationIT extends ESIntegTestCase {
private static final int RELOCATION_COUNT = 25;
public void testPrimaryRelocationWhileIndexing() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3));
client().admin().indices().prepareCreate("test")
.setSettings(Settings.settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
.addMapping("type", "field", "type=string")
.get();
ensureGreen("test");
final AtomicBoolean finished = new AtomicBoolean(false);
Thread indexingThread = new Thread() {
@Override
public void run() {
while (finished.get() == false) {
IndexResponse indexResponse = client().prepareIndex("test", "type", "id").setSource("field", "value").get();
assertThat("deleted document was found", indexResponse.isCreated(), equalTo(true));
DeleteResponse deleteResponse = client().prepareDelete("test", "type", "id").get();
assertThat("indexed document was not found", deleteResponse.isFound(), equalTo(true));
}
}
};
indexingThread.start();
ClusterState initialState = client().admin().cluster().prepareState().get().getState();
DiscoveryNode[] dataNodes = initialState.getNodes().dataNodes().values().toArray(DiscoveryNode.class);
DiscoveryNode relocationSource = initialState.getNodes().dataNodes().get(initialState.getRoutingTable().shardRoutingTable("test", 0).primaryShard().currentNodeId());
for (int i = 0; i < RELOCATION_COUNT; i++) {
DiscoveryNode relocationTarget = randomFrom(dataNodes);
while (relocationTarget.equals(relocationSource)) {
relocationTarget = randomFrom(dataNodes);
}
logger.info("--> [iteration {}] relocating from {} to {} ", i, relocationSource.getName(), relocationTarget.getName());
client().admin().cluster().prepareReroute()
.add(new MoveAllocationCommand("test", 0, relocationSource.getId(), relocationTarget.getId()))
.execute().actionGet();
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.info("--> [iteration {}] relocation complete", i);
relocationSource = relocationTarget;
if (indexingThread.isAlive() == false) { // indexing process aborted early, no need for more relocations as test has already failed
break;
}
}
finished.set(true);
indexingThread.join();
}
}

View File

@ -286,7 +286,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.STORE, Stage.DONE, nodeA, nodeA, false);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, nodeA, nodeB, false);
assertOnGoingRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, nodeA, nodeB, false);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
logger.info("--> request node recovery stats");
@ -339,7 +339,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
assertThat(recoveryStates.size(), equalTo(1));
assertRecoveryState(recoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
assertRecoveryState(recoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false);
validateIndexRecoveryState(recoveryStates.get(0).getIndex());
statsResponse = client().admin().cluster().prepareNodesStats().clear().setIndices(new CommonStatsFlags(CommonStatsFlags.Flag.Recovery)).get();
@ -400,7 +400,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
assertRecoveryState(nodeARecoveryStates.get(0), 0, Type.REPLICA, Stage.DONE, nodeB, nodeA, false);
validateIndexRecoveryState(nodeARecoveryStates.get(0).getIndex());
assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
@ -421,7 +421,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
nodeCRecoveryStates = findRecoveriesForTargetNode(nodeC, recoveryStates);
assertThat(nodeCRecoveryStates.size(), equalTo(1));
assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.RELOCATION, Stage.DONE, nodeA, nodeB, false);
assertRecoveryState(nodeBRecoveryStates.get(0), 0, Type.PRIMARY_RELOCATION, Stage.DONE, nodeA, nodeB, false);
validateIndexRecoveryState(nodeBRecoveryStates.get(0).getIndex());
// relocations of replicas are marked as REPLICA and the source node is the node holding the primary (B)
@ -503,7 +503,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
final IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex(INDEX_NAME, INDEX_TYPE).
docs[i] = client().prepareIndex(name, INDEX_TYPE).
setSource("foo-int", randomInt(),
"foo-string", randomAsciiOfLength(32),
"foo-float", randomFloat());
@ -511,8 +511,8 @@ public class IndexRecoveryIT extends ESIntegTestCase {
indexRandom(true, docs);
flush();
assertThat(client().prepareSearch(INDEX_NAME).setSize(0).get().getHits().totalHits(), equalTo((long) numDocs));
return client().admin().indices().prepareStats(INDEX_NAME).execute().actionGet();
assertThat(client().prepareSearch(name).setSize(0).get().getHits().totalHits(), equalTo((long) numDocs));
return client().admin().indices().prepareStats(name).execute().actionGet();
}
private void validateIndexRecoveryState(RecoveryState.Index indexState) {

View File

@ -69,7 +69,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
randomBoolean(), null, RecoveryState.Type.STORE, randomLong());
null, RecoveryState.Type.STORE, randomLong());
Store store = newStore(createTempDir());
RecoverySourceHandler handler = new RecoverySourceHandler(null, request, recoverySettings, null, logger);
Directory dir = store.directory();
@ -118,7 +118,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
randomBoolean(), null, RecoveryState.Type.STORE, randomLong());
null, RecoveryState.Type.STORE, randomLong());
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);
@ -181,7 +181,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
StartRecoveryRequest request = new StartRecoveryRequest(shardId,
new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
new DiscoveryNode("b", DummyTransportAddress.INSTANCE, Version.CURRENT),
randomBoolean(), null, RecoveryState.Type.STORE, randomLong());
null, RecoveryState.Type.STORE, randomLong());
Path tempDir = createTempDir();
Store store = newStore(tempDir, false);
AtomicBoolean failedEngine = new AtomicBoolean(false);

View File

@ -43,11 +43,9 @@ public class StartRecoveryRequestTests extends ESTestCase {
new ShardId("test", "_na_", 0),
new DiscoveryNode("a", new LocalTransportAddress("1"), targetNodeVersion),
new DiscoveryNode("b", new LocalTransportAddress("1"), targetNodeVersion),
true,
Store.MetadataSnapshot.EMPTY,
RecoveryState.Type.RELOCATION,
RecoveryState.Type.PRIMARY_RELOCATION,
1L
);
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
@ -63,7 +61,6 @@ public class StartRecoveryRequestTests extends ESTestCase {
assertThat(outRequest.shardId(), equalTo(inRequest.shardId()));
assertThat(outRequest.sourceNode(), equalTo(inRequest.sourceNode()));
assertThat(outRequest.targetNode(), equalTo(inRequest.targetNode()));
assertThat(outRequest.markAsRelocated(), equalTo(inRequest.markAsRelocated()));
assertThat(outRequest.metadataSnapshot().asMap(), equalTo(inRequest.metadataSnapshot().asMap()));
assertThat(outRequest.recoveryId(), equalTo(inRequest.recoveryId()));
assertThat(outRequest.recoveryType(), equalTo(inRequest.recoveryType()));

View File

@ -62,9 +62,9 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase {
public void testMissingSetting() throws InterruptedException {
String collector = randomAsciiOfLength(5);
Set<AbstractMap.SimpleEntry<String, String>> entries = new HashSet<>();
entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".warn", randomTimeValue()));
entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".info", randomTimeValue()));
entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".debug", randomTimeValue()));
entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".warn", randomPositiveTimeValue()));
entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".info", randomPositiveTimeValue()));
entries.add(new AbstractMap.SimpleEntry<>("monitor.jvm.gc.collector." + collector + ".debug", randomPositiveTimeValue()));
Settings.Builder builder = Settings.builder();
// drop a random setting or two

View File

@ -19,21 +19,15 @@
package org.elasticsearch.plugins;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolTestCase;
import java.io.IOException;
import java.net.MalformedURLException;
import java.nio.file.Path;
import static org.elasticsearch.common.cli.CliTool.ExitStatus.IO_ERROR;
import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.hasItem;
import static org.hamcrest.Matchers.is;
public class PluginCliTests extends CliToolTestCase {
public void testHelpWorks() throws IOException {
public void testHelpWorks() throws Exception {
CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal();
assertThat(new PluginCli(terminal).execute(args("--help")), is(OK_AND_EXIT));
assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin.help");
@ -53,12 +47,4 @@ public class PluginCliTests extends CliToolTestCase {
assertThat(new PluginCli(terminal).execute(args("list -h")), is(OK_AND_EXIT));
assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-list.help");
}
public void testUrlSpacesInPath() throws MalformedURLException {
CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal();
Path tmpDir = createTempDir().resolve("foo deps");
String finalDir = tmpDir.toAbsolutePath().toUri().toURL().toString();
CliTool.ExitStatus execute = new PluginCli(terminal).execute("install", finalDir);
assertThat(execute.status(), is(IO_ERROR.status()));
}
}

View File

@ -151,7 +151,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
ClusterState state = client().admin().cluster().prepareState().get().getState();
RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getType() != RecoveryState.Type.RELOCATION);
assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode() + "\n" + state.prettyPrint(), recoveryState.getType() != RecoveryState.Type.PRIMARY_RELOCATION);
}
internalCluster().restartRandomDataNode();
ensureGreen();
@ -159,7 +159,7 @@ public class FullRollingRestartIT extends ESIntegTestCase {
recoveryResponse = client().admin().indices().prepareRecoveries("test").get();
for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) {
assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getType() != RecoveryState.Type.RELOCATION);
assertTrue("relocated from: " + recoveryState.getSourceNode() + " to: " + recoveryState.getTargetNode()+ "-- \nbefore: \n" + state.prettyPrint() + "\nafter: \n" + afterState.prettyPrint(), recoveryState.getType() != RecoveryState.Type.PRIMARY_RELOCATION);
}
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -22,7 +22,7 @@ parameter to indicate the paths to the required metrics. The syntax for defining
Pipeline aggregations cannot have sub-aggregations but depending on the type it can reference another pipeline in the `buckets_path`
allowing pipeline aggregations to be chained. For example, you can chain together two derivatives to calculate the second derivative
(e.g. a derivative of a derivative).
(i.e. a derivative of a derivative).
NOTE: Because pipeline aggregations only add to the output, when chaining pipeline aggregations the output of each pipeline aggregation
will be included in the final output.

View File

@ -126,6 +126,10 @@ to prevent clashes with the watcher plugin
* `watcher.interval.medium` is now `resource.reload.interval.medium`
* `watcher.interval.high` is now `resource.reload.interval.high`
==== index.gateway setting renamed
* `index.gateway.local.sync` is now `index.translog.sync_interval`
==== Hunspell dictionary configuration
The parameter `indices.analysis.hunspell.dictionary.location` has been

View File

@ -48,3 +48,8 @@ Proxy settings have been deprecated and renamed:
If you are using proxy settings, update your settings as deprecated ones will
be removed in next major version.
[float]
=== Multicast plugin deprecated
The `discovery-multicast` plugin has been deprecated in 2.2.0 and has
been removed in 3.0.0.

View File

@ -166,7 +166,7 @@ with `_parent` field mapping created before version `2.0.0`. The data of these i
The format of the join between parent and child documents have changed with the `2.0.0` release. The old
format can't read from version `3.0.0` and onwards. The new format allows for a much more efficient and
scalable join between parent and child documents and the join data structures are stored on on disk
scalable join between parent and child documents and the join data structures are stored on disk
data structures as opposed as before the join data structures were stored in the jvm heap space.
==== `score_type` has been removed

View File

@ -36,7 +36,6 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperParser;
import org.elasticsearch.index.mapper.ParseContext;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
@ -119,7 +118,7 @@ public class StandaloneRunner extends CliTool {
terminal.println("## Extracted text");
terminal.println("--------------------- BEGIN -----------------------");
terminal.println("%s", doc.get("file.content"));
terminal.println(doc.get("file.content"));
terminal.println("---------------------- END ------------------------");
terminal.println("## Metadata");
printMetadataContent(doc, AttachmentMapper.FieldNames.AUTHOR);
@ -135,18 +134,14 @@ public class StandaloneRunner extends CliTool {
}
private void printMetadataContent(ParseContext.Document doc, String field) {
terminal.println("- %s: %s", field, doc.get(docMapper.mappers().getMapper("file." + field).fieldType().name()));
terminal.println("- " + field + ":" + doc.get(docMapper.mappers().getMapper("file." + field).fieldType().name()));
}
public static byte[] copyToBytes(Path path) throws IOException {
try (InputStream is = Files.newInputStream(path)) {
if (is == null) {
throw new FileNotFoundException("Resource [" + path + "] not found in classpath");
}
try (BytesStreamOutput out = new BytesStreamOutput()) {
copy(is, out);
return out.bytes().toBytes();
}
try (InputStream is = Files.newInputStream(path);
BytesStreamOutput out = new BytesStreamOutput()) {
copy(is, out);
return out.bytes().toBytes();
}
}
@ -177,7 +172,7 @@ public class StandaloneRunner extends CliTool {
}
public static void main(String[] args) {
public static void main(String[] args) throws Exception {
StandaloneRunner pluginManager = new StandaloneRunner();
pluginManager.execute(args);
}

View File

@ -24,6 +24,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool.ExitStatus;
import org.elasticsearch.common.cli.CliToolTestCase;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.hamcrest.Matcher;
@ -167,7 +168,7 @@ public class BootstrapCliParserTests extends CliToolTestCase {
assertThatTerminalOutput(containsString("Parameter [network.host] needs value"));
}
public void testParsingErrors() {
public void testParsingErrors() throws Exception {
BootstrapCLIParser parser = new BootstrapCLIParser(terminal);
// unknown params
@ -229,12 +230,10 @@ public class BootstrapCliParserTests extends CliToolTestCase {
public void testThatHelpfulErrorMessageIsGivenWhenParametersAreOutOfOrder() throws Exception {
BootstrapCLIParser parser = new BootstrapCLIParser(terminal);
try {
parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"});
fail("expected IllegalArgumentException for out-of-order parameters");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString("must be before any parameters starting with --"));
}
UserError e = expectThrows(UserError.class, () -> {
parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"});
});
assertThat(e.getMessage(), containsString("must be before any parameters starting with --"));
}
private void registerProperties(String ... systemProperties) {

View File

@ -71,9 +71,9 @@ public class CliToolTests extends CliToolTestCase {
final AtomicReference<Boolean> executed = new AtomicReference<>(false);
final NamedCommand cmd = new NamedCommand("cmd", terminal) {
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) {
public CliTool.ExitStatus execute(Settings settings, Environment env) throws UserError {
executed.set(true);
return CliTool.ExitStatus.USAGE;
throw new UserError(CliTool.ExitStatus.USAGE, "bad usage");
}
};
SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
@ -82,39 +82,7 @@ public class CliToolTests extends CliToolTestCase {
assertCommandHasBeenExecuted(executed);
}
public void testIOError() throws Exception {
Terminal terminal = new MockTerminal();
final AtomicReference<Boolean> executed = new AtomicReference<>(false);
final NamedCommand cmd = new NamedCommand("cmd", terminal) {
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
executed.set(true);
throw new IOException("io error");
}
};
SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
CliTool.ExitStatus status = tool.execute();
assertStatus(status, CliTool.ExitStatus.IO_ERROR);
assertCommandHasBeenExecuted(executed);
}
public void testCodeError() throws Exception {
Terminal terminal = new MockTerminal();
final AtomicReference<Boolean> executed = new AtomicReference<>(false);
final NamedCommand cmd = new NamedCommand("cmd", terminal) {
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
executed.set(true);
throw new Exception("random error");
}
};
SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
CliTool.ExitStatus status = tool.execute();
assertStatus(status, CliTool.ExitStatus.CODE_ERROR);
assertCommandHasBeenExecuted(executed);
}
public void testMultiCommand() {
public void testMultiCommand() throws Exception {
Terminal terminal = new MockTerminal();
int count = randomIntBetween(2, 7);
List<AtomicReference<Boolean>> executed = new ArrayList<>(count);
@ -141,7 +109,7 @@ public class CliToolTests extends CliToolTestCase {
}
}
public void testMultiCommandUnknownCommand() {
public void testMultiCommandUnknownCommand() throws Exception {
Terminal terminal = new MockTerminal();
int count = randomIntBetween(2, 7);
List<AtomicReference<Boolean>> executed = new ArrayList<>(count);
@ -184,7 +152,7 @@ public class CliToolTests extends CliToolTestCase {
assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help")));
}
public void testMultiCommandToolHelp() {
public void testMultiCommandToolHelp() throws Exception {
CaptureOutputTerminal terminal = new CaptureOutputTerminal();
NamedCommand[] cmds = new NamedCommand[2];
cmds[0] = new NamedCommand("cmd0", terminal) {
@ -206,7 +174,7 @@ public class CliToolTests extends CliToolTestCase {
assertThat(terminal.getTerminalOutput(), hasItem(containsString("tool help")));
}
public void testMultiCommandCmdHelp() {
public void testMultiCommandCmdHelp() throws Exception {
CaptureOutputTerminal terminal = new CaptureOutputTerminal();
NamedCommand[] cmds = new NamedCommand[2];
cmds[0] = new NamedCommand("cmd0", terminal) {
@ -228,31 +196,19 @@ public class CliToolTests extends CliToolTestCase {
assertThat(terminal.getTerminalOutput(), hasItem(containsString("cmd1 help")));
}
public void testThatThrowExceptionCanBeLogged() throws Exception {
public void testNonUserErrorPropagates() throws Exception {
CaptureOutputTerminal terminal = new CaptureOutputTerminal();
NamedCommand cmd = new NamedCommand("cmd", terminal) {
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
throw new ElasticsearchException("error message");
throw new IOException("error message");
}
};
SingleCmdTool tool = new SingleCmdTool("tool", terminal, cmd);
assertStatus(tool.execute(), CliTool.ExitStatus.CODE_ERROR);
assertThat(terminal.getTerminalOutput(), hasSize(1));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("error message")));
// set env... and log stack trace
try {
System.setProperty(Terminal.DEBUG_SYSTEM_PROPERTY, "true");
terminal = new CaptureOutputTerminal();
assertStatus(new SingleCmdTool("tool", terminal, cmd).execute(), CliTool.ExitStatus.CODE_ERROR);
assertThat(terminal.getTerminalOutput(), hasSize(2));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("error message")));
// This class must be part of the stack strace
assertThat(terminal.getTerminalOutput(), hasItem(containsString(getClass().getName())));
} finally {
System.clearProperty(Terminal.DEBUG_SYSTEM_PROPERTY);
}
IOException e = expectThrows(IOException.class, () -> {
tool.execute();
});
assertEquals("error message", e.getMessage());
}
public void testMultipleLaunch() throws Exception {

View File

@ -19,8 +19,8 @@
package org.elasticsearch.plugins;
import java.io.File;
import java.io.IOException;
import java.net.URL;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.FileAlreadyExistsException;
@ -28,6 +28,7 @@ import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFileAttributes;
@ -43,6 +44,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolTestCase;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
@ -193,6 +195,16 @@ public class InstallPluginCommandTests extends ESTestCase {
assertPlugin("fake", pluginDir, env);
}
public void testSpaceInUrl() throws Exception {
Environment env = createEnv();
Path pluginDir = createTempDir();
String pluginZip = createPlugin("fake", pluginDir);
Path pluginZipWithSpaces = createTempFile("foo bar", ".zip");
Files.copy(new URL(pluginZip).openStream(), pluginZipWithSpaces, StandardCopyOption.REPLACE_EXISTING);
installPlugin(pluginZipWithSpaces.toUri().toURL().toString(), env);
assertPlugin("fake", pluginDir, env);
}
public void testPluginsDirMissing() throws Exception {
Environment env = createEnv();
Files.delete(env.pluginsFile());
@ -211,7 +223,7 @@ public class InstallPluginCommandTests extends ESTestCase {
IOException e = expectThrows(IOException.class, () -> {
installPlugin(pluginZip, env);
});
assertTrue(e.getMessage(), e.getMessage().contains("Plugins directory is read only"));
assertTrue(e.getMessage(), e.getMessage().contains(env.pluginsFile().toString()));
}
assertInstallCleaned(env);
}
@ -219,7 +231,7 @@ public class InstallPluginCommandTests extends ESTestCase {
public void testBuiltinModule() throws Exception {
Environment env = createEnv();
String pluginZip = createPlugin("lang-groovy", createTempDir());
IOException e = expectThrows(IOException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
installPlugin(pluginZip, env);
});
assertTrue(e.getMessage(), e.getMessage().contains("is a system module"));
@ -288,7 +300,7 @@ public class InstallPluginCommandTests extends ESTestCase {
Environment env = createEnv();
String pluginZip = createPlugin("fake", createTempDir());
installPlugin(pluginZip, env);
IOException e = expectThrows(IOException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
installPlugin(pluginZip, env);
});
assertTrue(e.getMessage(), e.getMessage().contains("already exists"));
@ -312,7 +324,7 @@ public class InstallPluginCommandTests extends ESTestCase {
Path binDir = pluginDir.resolve("bin");
Files.createFile(binDir);
String pluginZip = createPlugin("fake", pluginDir);
IOException e = expectThrows(IOException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
installPlugin(pluginZip, env);
});
assertTrue(e.getMessage(), e.getMessage().contains("not a directory"));
@ -326,7 +338,7 @@ public class InstallPluginCommandTests extends ESTestCase {
Files.createDirectories(dirInBinDir);
Files.createFile(dirInBinDir.resolve("somescript"));
String pluginZip = createPlugin("fake", pluginDir);
IOException e = expectThrows(IOException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
installPlugin(pluginZip, env);
});
assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in bin dir for plugin"));
@ -401,7 +413,7 @@ public class InstallPluginCommandTests extends ESTestCase {
Path configDir = pluginDir.resolve("config");
Files.createFile(configDir);
String pluginZip = createPlugin("fake", pluginDir);
IOException e = expectThrows(IOException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
installPlugin(pluginZip, env);
});
assertTrue(e.getMessage(), e.getMessage().contains("not a directory"));
@ -415,7 +427,7 @@ public class InstallPluginCommandTests extends ESTestCase {
Files.createDirectories(dirInConfigDir);
Files.createFile(dirInConfigDir.resolve("myconfig.yml"));
String pluginZip = createPlugin("fake", pluginDir);
IOException e = expectThrows(IOException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
installPlugin(pluginZip, env);
});
assertTrue(e.getMessage(), e.getMessage().contains("Directories not allowed in config dir for plugin"));

View File

@ -28,6 +28,7 @@ import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolTestCase;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.UserError;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
@ -66,7 +67,7 @@ public class RemovePluginCommandTests extends ESTestCase {
public void testMissing() throws Exception {
Environment env = createEnv();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
removePlugin("dne", env);
});
assertTrue(e.getMessage(), e.getMessage().contains("Plugin dne not found"));
@ -101,9 +102,10 @@ public class RemovePluginCommandTests extends ESTestCase {
public void testBinNotDir() throws Exception {
Environment env = createEnv();
Files.createDirectories(env.pluginsFile().resolve("elasticsearch"));
IllegalStateException e = expectThrows(IllegalStateException.class, () -> {
UserError e = expectThrows(UserError.class, () -> {
removePlugin("elasticsearch", env);
});
assertTrue(e.getMessage(), e.getMessage().contains("not a directory"));
assertTrue(Files.exists(env.pluginsFile().resolve("elasticsearch"))); // did not remove
assertTrue(Files.exists(env.binFile().resolve("elasticsearch")));
assertRemoveCleaned(env);

View File

@ -28,11 +28,8 @@ import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.greaterThan;
@ -73,7 +70,7 @@ public abstract class CliToolTestCase extends ESTestCase {
}
@Override
protected void doPrint(String msg, Object... args) {
protected void doPrint(String msg) {
}
@Override
@ -87,7 +84,7 @@ public abstract class CliToolTestCase extends ESTestCase {
}
@Override
public void print(String msg, Object... args) {
public void print(String msg) {
}
@Override
@ -99,7 +96,7 @@ public abstract class CliToolTestCase extends ESTestCase {
*/
public static class CaptureOutputTerminal extends MockTerminal {
List<String> terminalOutput = new ArrayList();
List<String> terminalOutput = new ArrayList<>();
public CaptureOutputTerminal() {
super(Verbosity.NORMAL);
@ -110,13 +107,13 @@ public abstract class CliToolTestCase extends ESTestCase {
}
@Override
protected void doPrint(String msg, Object... args) {
terminalOutput.add(String.format(Locale.ROOT, msg, args));
protected void doPrint(String msg) {
terminalOutput.add(msg);
}
@Override
public void print(String msg, Object... args) {
doPrint(msg, args);
public void print(String msg) {
doPrint(msg);
}
@Override

View File

@ -1435,11 +1435,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
if (!bogusIds.isEmpty()) {
// delete the bogus types again - it might trigger merges or at least holes in the segments and enforces deleted docs!
for (Tuple<String, String> doc : bogusIds) {
// see https://github.com/elasticsearch/elasticsearch/issues/8706
final DeleteResponse deleteResponse = client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get();
if (deleteResponse.isFound() == false) {
logger.warn("failed to delete a dummy doc [{}][{}]", doc.v1(), doc.v2());
}
assertTrue("failed to delete a dummy doc [" + doc.v1() + "][" + doc.v2() + "]",
client().prepareDelete(doc.v1(), RANDOM_BOGUS_TYPE, doc.v2()).get().isFound());
}
}
if (forceRefresh) {

View File

@ -382,9 +382,18 @@ public abstract class ESTestCase extends LuceneTestCase {
return generateRandomStringArray(maxArraySize, maxStringSize, allowNull, true);
}
private static String[] TIME_SUFFIXES = new String[]{"d", "H", "ms", "s", "S", "w"};
private static String randomTimeValue(int lower, int upper) {
return randomIntBetween(lower, upper) + randomFrom(TIME_SUFFIXES);
}
public static String randomTimeValue() {
final String[] values = new String[]{"d", "H", "ms", "s", "S", "w"};
return randomIntBetween(0, 1000) + randomFrom(values);
return randomTimeValue(0, 1000);
}
public static String randomPositiveTimeValue() {
return randomTimeValue(1, 1000);
}
/**

View File

@ -1036,7 +1036,7 @@ public final class InternalTestCluster extends TestCluster {
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexService indexService : indexServices) {
for (IndexShard indexShard : indexService) {
assertThat("index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " not 0", indexShard.getOperationsCount(), equalTo(0));
assertThat("index shard counter on shard " + indexShard.shardId() + " on node " + nodeAndClient.name + " not 0", indexShard.getActiveOperationsCount(), equalTo(0));
}
}
}