Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2016-08-05 12:09:07 +02:00
commit 3895aa470a
147 changed files with 1692 additions and 1369 deletions

View File

@ -145,7 +145,7 @@ public class AllocationBenchmark {
RoutingTable routingTable = rb.build();
DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
for (int i = 1; i <= numNodes; i++) {
nb.put(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags))));
nb.add(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags))));
}
initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData).routingTable(routingTable).nodes

View File

@ -119,6 +119,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
current.println(" reason: $test.skipTest")
}
if (test.setup != null) {
// Insert a setup defined outside of the docs
String setup = setups[test.setup]
if (setup == null) {
throw new InvalidUserDataException("Couldn't find setup "
@ -136,13 +137,23 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
response.contents.eachLine { current.println(" $it") }
}
void emitDo(String method, String pathAndQuery,
String body, String catchPart, boolean inSetup) {
void emitDo(String method, String pathAndQuery, String body,
String catchPart, List warnings, boolean inSetup) {
def (String path, String query) = pathAndQuery.tokenize('?')
current.println(" - do:")
if (catchPart != null) {
current.println(" catch: $catchPart")
}
if (false == warnings.isEmpty()) {
current.println(" warnings:")
for (String warning in warnings) {
// Escape " because we're going to quote the warning
String escaped = warning.replaceAll('"', '\\\\"')
/* Quote the warning in case it starts with [ which makes
* it look too much like an array. */
current.println(" - \"$escaped\"")
}
}
current.println(" raw:")
current.println(" method: $method")
current.println(" path: \"$path\"")
@ -200,7 +211,8 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
// Leading '/'s break the generated paths
pathAndQuery = pathAndQuery.substring(1)
}
emitDo(method, pathAndQuery, body, catchPart, inSetup)
emitDo(method, pathAndQuery, body, catchPart, snippet.warnings,
inSetup)
}
}

View File

@ -37,8 +37,9 @@ public class SnippetsTask extends DefaultTask {
private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/
private static final String SKIP = /skip:([^\]]+)/
private static final String SETUP = /setup:([^ \]]+)/
private static final String WARNING = /warning:(.+)/
private static final String TEST_SYNTAX =
/(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP) ?/
/(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/
/**
* Action to take on each snippet. Called with a single parameter, an
@ -158,6 +159,10 @@ public class SnippetsTask extends DefaultTask {
snippet.setup = it.group(6)
return
}
if (it.group(7) != null) {
snippet.warnings.add(it.group(7))
return
}
throw new InvalidUserDataException(
"Invalid test marker: $line")
}
@ -230,6 +235,7 @@ public class SnippetsTask extends DefaultTask {
String language = null
String catchPart = null
String setup = null
List warnings = new ArrayList()
@Override
public String toString() {
@ -254,6 +260,9 @@ public class SnippetsTask extends DefaultTask {
if (setup) {
result += "[setup:$setup]"
}
for (String warning in warnings) {
result += "[warning:$warning]"
}
}
if (testResponse) {
result += '// TESTRESPONSE'

View File

@ -34,6 +34,7 @@ dependencies {
compile "org.elasticsearch.plugin:percolator-client:${version}"
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testCompile "junit:junit:${versions.junit}"
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
}
dependencyLicenses {

View File

@ -19,6 +19,9 @@
package org.elasticsearch.transport.client;
import io.netty.util.ThreadDeathWatcher;
import io.netty.util.concurrent.GlobalEventExecutor;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Setting;
@ -34,6 +37,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* A builder to create an instance of {@link TransportClient}
@ -79,13 +83,28 @@ public class PreBuiltTransportClient extends TransportClient {
@Override
public Settings additionalSettings() {
return Settings.builder()
.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME)
.put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME)
.put("netty.assert.buglevel", true)
return Settings.builder().put("netty.assert.buglevel", true)
.build();
}
}
@Override
public void close() {
super.close();
if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false
|| NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) {
try {
GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
try {
ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
}
}

View File

@ -27,12 +27,13 @@ import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.transport.Netty3Plugin;
import org.elasticsearch.transport.Netty4Plugin;
import org.junit.Test;
import java.util.Arrays;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
public class PreBuiltTransportClientTests extends RandomizedTest {
@ -41,7 +42,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
public void testPluginInstalled() {
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
Settings settings = client.settings();
assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
}
}
@ -54,7 +56,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
new PreBuiltTransportClient(Settings.EMPTY, plugin);
fail("exception expected");
} catch (IllegalArgumentException ex) {
assertEquals("plugin is already installed", ex.getMessage());
assertTrue("Expected message to start with [plugin already exists: ] but was instead [" + ex.getMessage() + "]",
ex.getMessage().startsWith("plugin already exists: "));
}
}
}

View File

@ -199,10 +199,14 @@ public class ClusterChangedEvent {
return nodesRemoved() || nodesAdded();
}
// Determines whether or not the current cluster state represents an entirely
// different cluster from the previous cluster state, which will happen when a
// master node is elected that has never been part of the cluster before.
private boolean isNewCluster() {
/**
* Determines whether or not the current cluster state represents an entirely
* new cluster, either when a node joins a cluster for the first time or when
* the node receives a cluster state update from a brand new cluster (different
* UUID from the previous cluster), which will happen when a master node is
* elected that has never been part of the cluster before.
*/
public boolean isNewCluster() {
final String prevClusterUUID = previousState.metaData().clusterUUID();
final String currClusterUUID = state.metaData().clusterUUID();
return prevClusterUUID.equals(currClusterUUID) == false;

View File

@ -280,6 +280,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
public String prettyPrint() {
StringBuilder sb = new StringBuilder();
sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n");
sb.append("version: ").append(version).append("\n");
sb.append("state uuid: ").append(stateUUID).append("\n");
sb.append("from_diff: ").append(wasReadFromDiff).append("\n");
@ -624,6 +625,10 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
return this;
}
public DiscoveryNodes nodes() {
return nodes;
}
public Builder routingResult(RoutingAllocation.Result routingResult) {
this.routingTable = routingResult.routingTable();
this.metaData = routingResult.metaData();
@ -722,7 +727,6 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException {
return PROTO.readFrom(in, localNode);
}
}
@Override

View File

@ -357,14 +357,14 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId);
for (DiscoveryNode node : this) {
if (newNodes.contains(node.getId())) {
builder.put(node);
builder.add(node);
}
}
return builder.build();
}
public DiscoveryNodes newNode(DiscoveryNode node) {
return new Builder(this).put(node).build();
return new Builder(this).add(node).build();
}
/**
@ -554,8 +554,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
node = localNode;
}
// some one already built this and validated it's OK, skip the n2 scans
assert builder.validatePut(node) == null : "building disco nodes from network doesn't pass preflight: "
+ builder.validatePut(node);
assert builder.validateAdd(node) == null : "building disco nodes from network doesn't pass preflight: "
+ builder.validateAdd(node);
builder.putUnsafe(node);
}
return builder.build();
@ -592,10 +592,10 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
/**
* adds a disco node to the builder. Will throw an {@link IllegalArgumentException} if
* the supplied node doesn't pass the pre-flight checks performed by {@link #validatePut(DiscoveryNode)}
* the supplied node doesn't pass the pre-flight checks performed by {@link #validateAdd(DiscoveryNode)}
*/
public Builder put(DiscoveryNode node) {
final String preflight = validatePut(node);
public Builder add(DiscoveryNode node) {
final String preflight = validateAdd(node);
if (preflight != null) {
throw new IllegalArgumentException(preflight);
}
@ -603,6 +603,16 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
return this;
}
/**
* Get a node by its id
*
* @param nodeId id of the wanted node
* @return wanted node if it exists. Otherwise <code>null</code>
*/
@Nullable public DiscoveryNode get(String nodeId) {
return nodes.get(nodeId);
}
private void putUnsafe(DiscoveryNode node) {
nodes.put(node.getId(), node);
}
@ -635,10 +645,10 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
*
* @return null if all is OK or an error message explaining why a node can not be added.
*
* Note: if this method returns a non-null value, calling {@link #put(DiscoveryNode)} will fail with an
* Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an
* exception
*/
private String validatePut(DiscoveryNode node) {
private String validateAdd(DiscoveryNode node) {
for (ObjectCursor<DiscoveryNode> cursor : nodes.values()) {
final DiscoveryNode existingNode = cursor.value;
if (node.getAddress().equals(existingNode.getAddress()) &&
@ -646,9 +656,9 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
return "can't add node " + node + ", found existing node " + existingNode + " with same address";
}
if (node.getId().equals(existingNode.getId()) &&
node.getAddress().equals(existingNode.getAddress()) == false) {
node.equals(existingNode) == false) {
return "can't add node " + node + ", found existing node " + existingNode
+ " with the same id, but a different address";
+ " with the same id but is a different node instance";
}
}
return null;

View File

@ -249,11 +249,36 @@ public class AllocationService extends AbstractComponent {
applyFailedShard(allocation, failedShard, unassignedInfo);
}
gatewayAllocator.applyFailedShards(allocation);
reroute(allocation);
String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.routingEntry.shardId().toString());
return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ...");
}
/**
* unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas
* if needed.
*/
public RoutingAllocation.Result deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
clusterInfoService.getClusterInfo(), currentNanoTime(), false);
// first, clear from the shards any node id they used to belong to that is now dead
boolean changed = deassociateDeadNodes(allocation);
if (reroute) {
changed |= reroute(allocation);
}
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
return buildResultAndLogHealthChange(allocation, reason);
}
/**
* Removes delay markers from unassigned shards based on current time stamp. Returns true if markers were removed.
*/
@ -352,13 +377,9 @@ public class AllocationService extends AbstractComponent {
}
private boolean reroute(RoutingAllocation allocation) {
boolean changed = false;
// first, clear from the shards any node id they used to belong to that is now dead
changed |= deassociateDeadNodes(allocation);
assert deassociateDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes";
// elect primaries *before* allocating unassigned, so backups of primaries that failed
// will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*)
changed |= electPrimariesAndUnassignedDanglingReplicas(allocation);
boolean changed = electPrimariesAndUnassignedDanglingReplicas(allocation);
// now allocate all the unassigned to available nodes
if (allocation.routingNodes().unassigned().size() > 0) {
@ -390,8 +411,8 @@ public class AllocationService extends AbstractComponent {
if (candidate != null) {
shardEntry = unassignedIterator.demotePrimaryToReplicaShard();
ShardRouting primarySwappedCandidate = routingNodes.promoteAssignedReplicaShardToPrimary(candidate);
changed = true;
if (primarySwappedCandidate.relocatingNodeId() != null) {
changed = true;
// its also relocating, make sure to move the other routing to primary
RoutingNode node = routingNodes.node(primarySwappedCandidate.relocatingNodeId());
if (node != null) {
@ -406,7 +427,6 @@ public class AllocationService extends AbstractComponent {
IndexMetaData index = allocation.metaData().getIndexSafe(primarySwappedCandidate.index());
if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) {
routingNodes.reinitShadowPrimary(primarySwappedCandidate);
changed = true;
}
}
}

View File

@ -156,7 +156,7 @@ public class ClusterService extends AbstractLifecycleComponent {
public synchronized void setLocalNode(DiscoveryNode localNode) {
assert clusterState.nodes().getLocalNodeId() == null : "local node is already set";
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId());
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId());
this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
}

View File

@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
@ -57,9 +56,12 @@ public class NetworkModule extends AbstractModule {
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String LOCAL_TRANSPORT = "local";
public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default";
public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default";
public static final Setting<String> TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString("transport.type.default", Property.NodeScope);
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString("http.type.default", Property.NodeScope);
public static final Setting<String> TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_DEFAULT_KEY,
Property.NodeScope);
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope);
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =

View File

@ -19,10 +19,13 @@
package org.elasticsearch.common.rounding;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.TimeValue;
import org.joda.time.DateTimeField;
import org.joda.time.DateTimeZone;
import org.joda.time.IllegalInstantException;
import java.io.IOException;
import java.util.Objects;
@ -54,103 +57,67 @@ public abstract class Rounding implements Streamable {
@Override
public abstract int hashCode();
/**
* Rounding strategy which is based on an interval
*
* {@code rounded = value - (value % interval) }
*/
public static class Interval extends Rounding {
public static Builder builder(DateTimeUnit unit) {
return new Builder(unit);
}
static final byte ID = 0;
public static Builder builder(TimeValue interval) {
return new Builder(interval);
}
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
public static class Builder {
private long interval;
private final DateTimeUnit unit;
private final long interval;
public Interval() { // for serialization
private DateTimeZone timeZone = DateTimeZone.UTC;
public Builder(DateTimeUnit unit) {
this.unit = unit;
this.interval = -1;
}
/**
* Creates a new interval rounding.
*
* @param interval The interval
*/
public Interval(long interval) {
this.interval = interval;
public Builder(TimeValue interval) {
this.unit = null;
if (interval.millis() < 1)
throw new IllegalArgumentException("Zero or negative time interval not supported");
this.interval = interval.millis();
}
@Override
public byte id() {
return ID;
public Builder timeZone(DateTimeZone timeZone) {
if (timeZone == null) {
throw new IllegalArgumentException("Setting null as timezone is not supported");
}
this.timeZone = timeZone;
return this;
}
public static long roundKey(long value, long interval) {
if (value < 0) {
return (value - interval + 1) / interval;
public Rounding build() {
Rounding timeZoneRounding;
if (unit != null) {
timeZoneRounding = new TimeUnitRounding(unit, timeZone);
} else {
return value / interval;
timeZoneRounding = new TimeIntervalRounding(interval, timeZone);
}
}
public static long roundValue(long key, long interval) {
return key * interval;
}
@Override
public long round(long value) {
return roundKey(value, interval) * interval;
}
@Override
public long nextRoundingValue(long value) {
assert value == round(value);
return value + interval;
}
@Override
public void readFrom(StreamInput in) throws IOException {
interval = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(interval);
}
@Override
public int hashCode() {
return Objects.hash(interval);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
Interval other = (Interval) obj;
return Objects.equals(interval, other.interval);
return timeZoneRounding;
}
}
public static class FactorRounding extends Rounding {
static class TimeUnitRounding extends Rounding {
static final byte ID = 7;
static final byte ID = 1;
public static final ParseField FACTOR_FIELD = new ParseField("factor");
private DateTimeUnit unit;
private DateTimeField field;
private DateTimeZone timeZone;
private Rounding rounding;
private float factor;
FactorRounding() { // for serialization
TimeUnitRounding() { // for serialization
}
FactorRounding(Rounding rounding, float factor) {
this.rounding = rounding;
this.factor = factor;
TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) {
this.unit = unit;
this.field = unit.field(timeZone);
this.timeZone = timeZone;
}
@Override
@ -160,31 +127,51 @@ public abstract class Rounding implements Streamable {
@Override
public long round(long utcMillis) {
return rounding.round((long) (factor * utcMillis));
long rounded = field.roundFloor(utcMillis);
if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {
// in this case, we crossed a time zone transition. In some edge
// cases this will
// result in a value that is not a rounded value itself. We need
// to round again
// to make sure. This will have no affect in cases where
// 'rounded' was already a proper
// rounded value
rounded = field.roundFloor(rounded);
}
assert rounded == field.roundFloor(rounded);
return rounded;
}
@Override
public long nextRoundingValue(long value) {
return rounding.nextRoundingValue(value);
public long nextRoundingValue(long utcMillis) {
long floor = round(utcMillis);
// add one unit and round to get to next rounded value
long next = round(field.add(floor, 1));
if (next == floor) {
// in rare case we need to add more than one unit
next = round(field.add(floor, 2));
}
return next;
}
@Override
public void readFrom(StreamInput in) throws IOException {
rounding = Rounding.Streams.read(in);
factor = in.readFloat();
unit = DateTimeUnit.resolve(in.readByte());
timeZone = DateTimeZone.forID(in.readString());
field = unit.field(timeZone);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
Rounding.Streams.write(rounding, out);
out.writeFloat(factor);
out.writeByte(unit.id());
out.writeString(timeZone.getID());
}
@Override
public int hashCode() {
return Objects.hash(rounding, factor);
return Objects.hash(unit, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
@ -193,28 +180,31 @@ public abstract class Rounding implements Streamable {
if (getClass() != obj.getClass()) {
return false;
}
FactorRounding other = (FactorRounding) obj;
return Objects.equals(rounding, other.rounding)
&& Objects.equals(factor, other.factor);
TimeUnitRounding other = (TimeUnitRounding) obj;
return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone);
}
@Override
public String toString() {
return "[" + timeZone + "][" + unit + "]";
}
}
public static class OffsetRounding extends Rounding {
static class TimeIntervalRounding extends Rounding {
static final byte ID = 8;
static final byte ID = 2;
public static final ParseField OFFSET_FIELD = new ParseField("offset");
private long interval;
private DateTimeZone timeZone;
private Rounding rounding;
private long offset;
OffsetRounding() { // for serialization
TimeIntervalRounding() { // for serialization
}
public OffsetRounding(Rounding intervalRounding, long offset) {
this.rounding = intervalRounding;
this.offset = offset;
TimeIntervalRounding(long interval, DateTimeZone timeZone) {
if (interval < 1)
throw new IllegalArgumentException("Zero or negative time interval not supported");
this.interval = interval;
this.timeZone = timeZone;
}
@Override
@ -223,32 +213,100 @@ public abstract class Rounding implements Streamable {
}
@Override
public long round(long value) {
return rounding.round(value - offset) + offset;
public long round(long utcMillis) {
long timeLocal = timeZone.convertUTCToLocal(utcMillis);
long rounded = roundKey(timeLocal, interval) * interval;
long roundedUTC;
if (isInDSTGap(rounded) == false) {
roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis);
// check if we crossed DST transition, in this case we want the
// last rounded value before the transition
long transition = timeZone.previousTransition(utcMillis);
if (transition != utcMillis && transition > roundedUTC) {
roundedUTC = round(transition - 1);
}
} else {
/*
* Edge case where the rounded local time is illegal and landed
* in a DST gap. In this case, we choose 1ms tick after the
* transition date. We don't want the transition date itself
* because those dates, when rounded themselves, fall into the
* previous interval. This would violate the invariant that the
* rounding operation should be idempotent.
*/
roundedUTC = timeZone.previousTransition(utcMillis) + 1;
}
return roundedUTC;
}
private static long roundKey(long value, long interval) {
if (value < 0) {
return (value - interval + 1) / interval;
} else {
return value / interval;
}
}
/**
* Determine whether the local instant is a valid instant in the given
* time zone. The logic for this is taken from
* {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the
* `strict` mode case, but instead of throwing an
* {@link IllegalInstantException}, which is costly, we want to return a
* flag indicating that the value is illegal in that time zone.
*/
private boolean isInDSTGap(long instantLocal) {
if (timeZone.isFixed()) {
return false;
}
// get the offset at instantLocal (first estimate)
int offsetLocal = timeZone.getOffset(instantLocal);
// adjust instantLocal using the estimate and recalc the offset
int offset = timeZone.getOffset(instantLocal - offsetLocal);
// if the offsets differ, we must be near a DST boundary
if (offsetLocal != offset) {
// determine if we are in the DST gap
long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal);
if (nextLocal == (instantLocal - offsetLocal)) {
nextLocal = Long.MAX_VALUE;
}
long nextAdjusted = timeZone.nextTransition(instantLocal - offset);
if (nextAdjusted == (instantLocal - offset)) {
nextAdjusted = Long.MAX_VALUE;
}
if (nextLocal != nextAdjusted) {
// we are in the DST gap
return true;
}
}
return false;
}
@Override
public long nextRoundingValue(long value) {
return rounding.nextRoundingValue(value - offset) + offset;
public long nextRoundingValue(long time) {
long timeLocal = time;
timeLocal = timeZone.convertUTCToLocal(time);
long next = timeLocal + interval;
return timeZone.convertLocalToUTC(next, false);
}
@Override
public void readFrom(StreamInput in) throws IOException {
rounding = Rounding.Streams.read(in);
offset = in.readLong();
interval = in.readVLong();
timeZone = DateTimeZone.forID(in.readString());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
Rounding.Streams.write(rounding, out);
out.writeLong(offset);
out.writeVLong(interval);
out.writeString(timeZone.getID());
}
@Override
public int hashCode() {
return Objects.hash(rounding, offset);
return Objects.hash(interval, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
@ -257,9 +315,8 @@ public abstract class Rounding implements Streamable {
if (getClass() != obj.getClass()) {
return false;
}
OffsetRounding other = (OffsetRounding) obj;
return Objects.equals(rounding, other.rounding)
&& Objects.equals(offset, other.offset);
TimeIntervalRounding other = (TimeIntervalRounding) obj;
return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone);
}
}
@ -274,11 +331,8 @@ public abstract class Rounding implements Streamable {
Rounding rounding = null;
byte id = in.readByte();
switch (id) {
case Interval.ID: rounding = new Interval(); break;
case TimeZoneRounding.TimeUnitRounding.ID: rounding = new TimeZoneRounding.TimeUnitRounding(); break;
case TimeZoneRounding.TimeIntervalRounding.ID: rounding = new TimeZoneRounding.TimeIntervalRounding(); break;
case TimeZoneRounding.FactorRounding.ID: rounding = new FactorRounding(); break;
case OffsetRounding.ID: rounding = new OffsetRounding(); break;
case TimeUnitRounding.ID: rounding = new TimeUnitRounding(); break;
case TimeIntervalRounding.ID: rounding = new TimeIntervalRounding(); break;
default: throw new ElasticsearchException("unknown rounding id [" + id + "]");
}
rounding.readFrom(in);

View File

@ -1,314 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.rounding;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue;
import org.joda.time.DateTimeField;
import org.joda.time.DateTimeZone;
import org.joda.time.IllegalInstantException;
import java.io.IOException;
import java.util.Objects;
/**
* A rounding strategy for dates. It is typically used to group together dates
* that are part of the same hour/day/month, taking into account time zones and
* daylight saving times.
*/
public abstract class TimeZoneRounding extends Rounding {
public static final ParseField INTERVAL_FIELD = new ParseField("interval");
public static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone");
public static Builder builder(DateTimeUnit unit) {
return new Builder(unit);
}
public static Builder builder(TimeValue interval) {
return new Builder(interval);
}
public static class Builder {
private final DateTimeUnit unit;
private final long interval;
private DateTimeZone timeZone = DateTimeZone.UTC;
private float factor = 1.0f;
private long offset;
public Builder(DateTimeUnit unit) {
this.unit = unit;
this.interval = -1;
}
public Builder(TimeValue interval) {
this.unit = null;
if (interval.millis() < 1)
throw new IllegalArgumentException("Zero or negative time interval not supported");
this.interval = interval.millis();
}
public Builder timeZone(DateTimeZone timeZone) {
if (timeZone == null) {
throw new IllegalArgumentException("Setting null as timezone is not supported");
}
this.timeZone = timeZone;
return this;
}
public Builder offset(long offset) {
this.offset = offset;
return this;
}
public Builder factor(float factor) {
this.factor = factor;
return this;
}
public Rounding build() {
Rounding timeZoneRounding;
if (unit != null) {
timeZoneRounding = new TimeUnitRounding(unit, timeZone);
} else {
timeZoneRounding = new TimeIntervalRounding(interval, timeZone);
}
if (offset != 0) {
timeZoneRounding = new OffsetRounding(timeZoneRounding, offset);
}
if (factor != 1.0f) {
timeZoneRounding = new FactorRounding(timeZoneRounding, factor);
}
return timeZoneRounding;
}
}
static class TimeUnitRounding extends TimeZoneRounding {
static final byte ID = 1;
private DateTimeUnit unit;
private DateTimeField field;
private DateTimeZone timeZone;
TimeUnitRounding() { // for serialization
}
TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) {
this.unit = unit;
this.field = unit.field(timeZone);
this.timeZone = timeZone;
}
@Override
public byte id() {
return ID;
}
@Override
public long round(long utcMillis) {
long rounded = field.roundFloor(utcMillis);
if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) {
// in this case, we crossed a time zone transition. In some edge cases this will
// result in a value that is not a rounded value itself. We need to round again
// to make sure. This will have no affect in cases where 'rounded' was already a proper
// rounded value
rounded = field.roundFloor(rounded);
}
assert rounded == field.roundFloor(rounded);
return rounded;
}
@Override
public long nextRoundingValue(long utcMillis) {
long floor = round(utcMillis);
// add one unit and round to get to next rounded value
long next = round(field.add(floor, 1));
if (next == floor) {
// in rare case we need to add more than one unit
next = round(field.add(floor, 2));
}
return next;
}
@Override
public void readFrom(StreamInput in) throws IOException {
unit = DateTimeUnit.resolve(in.readByte());
timeZone = DateTimeZone.forID(in.readString());
field = unit.field(timeZone);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte(unit.id());
out.writeString(timeZone.getID());
}
@Override
public int hashCode() {
return Objects.hash(unit, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TimeUnitRounding other = (TimeUnitRounding) obj;
return Objects.equals(unit, other.unit)
&& Objects.equals(timeZone, other.timeZone);
}
@Override
public String toString() {
return "[" + timeZone + "][" + unit +"]";
}
}
static class TimeIntervalRounding extends TimeZoneRounding {
static final byte ID = 2;
private long interval;
private DateTimeZone timeZone;
TimeIntervalRounding() { // for serialization
}
TimeIntervalRounding(long interval, DateTimeZone timeZone) {
if (interval < 1)
throw new IllegalArgumentException("Zero or negative time interval not supported");
this.interval = interval;
this.timeZone = timeZone;
}
@Override
public byte id() {
return ID;
}
@Override
public long round(long utcMillis) {
long timeLocal = timeZone.convertUTCToLocal(utcMillis);
long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval);
long roundedUTC;
if (isInDSTGap(rounded) == false) {
roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis);
// check if we crossed DST transition, in this case we want the last rounded value before the transition
long transition = timeZone.previousTransition(utcMillis);
if (transition != utcMillis && transition > roundedUTC) {
roundedUTC = round(transition - 1);
}
} else {
/*
* Edge case where the rounded local time is illegal and landed
* in a DST gap. In this case, we choose 1ms tick after the
* transition date. We don't want the transition date itself
* because those dates, when rounded themselves, fall into the
* previous interval. This would violate the invariant that the
* rounding operation should be idempotent.
*/
roundedUTC = timeZone.previousTransition(utcMillis) + 1;
}
return roundedUTC;
}
/**
* Determine whether the local instant is a valid instant in the given
* time zone. The logic for this is taken from
* {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the
* `strict` mode case, but instead of throwing an
* {@link IllegalInstantException}, which is costly, we want to return a
* flag indicating that the value is illegal in that time zone.
*/
private boolean isInDSTGap(long instantLocal) {
if (timeZone.isFixed()) {
return false;
}
// get the offset at instantLocal (first estimate)
int offsetLocal = timeZone.getOffset(instantLocal);
// adjust instantLocal using the estimate and recalc the offset
int offset = timeZone.getOffset(instantLocal - offsetLocal);
// if the offsets differ, we must be near a DST boundary
if (offsetLocal != offset) {
// determine if we are in the DST gap
long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal);
if (nextLocal == (instantLocal - offsetLocal)) {
nextLocal = Long.MAX_VALUE;
}
long nextAdjusted = timeZone.nextTransition(instantLocal - offset);
if (nextAdjusted == (instantLocal - offset)) {
nextAdjusted = Long.MAX_VALUE;
}
if (nextLocal != nextAdjusted) {
// we are in the DST gap
return true;
}
}
return false;
}
@Override
public long nextRoundingValue(long time) {
long timeLocal = time;
timeLocal = timeZone.convertUTCToLocal(time);
long next = timeLocal + interval;
return timeZone.convertLocalToUTC(next, false);
}
@Override
public void readFrom(StreamInput in) throws IOException {
interval = in.readVLong();
timeZone = DateTimeZone.forID(in.readString());
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVLong(interval);
out.writeString(timeZone.getID());
}
@Override
public int hashCode() {
return Objects.hash(interval, timeZone);
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
if (getClass() != obj.getClass()) {
return false;
}
TimeIntervalRounding other = (TimeIntervalRounding) obj;
return Objects.equals(interval, other.interval)
&& Objects.equals(timeZone, other.timeZone);
}
}
}

View File

@ -134,7 +134,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode());
nodesBuilder.add(discovery.localNode());
}
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
// remove the NO_MASTER block in this case
@ -160,7 +160,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode());
nodesBuilder.add(discovery.localNode());
}
nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId());
currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build();
@ -231,8 +231,8 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
}
// reroute here, so we eagerly remove dead nodes from the routing
ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build();
RoutingAllocation.Result routingResult = master.allocationService.reroute(
ClusterState.builder(updatedState).build(), "elected as master");
RoutingAllocation.Result routingResult = master.allocationService.deassociateDeadNodes(
ClusterState.builder(updatedState).build(), true, "node stopped");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}

View File

@ -413,8 +413,7 @@ public class NodeJoinController extends AbstractComponent {
final DiscoveryNodes currentNodes = currentState.nodes();
boolean nodesChanged = false;
ClusterState.Builder newState = ClusterState.builder(currentState);
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes);
ClusterState.Builder newState;
if (joiningNodes.size() == 1 && joiningNodes.get(0).equals(FINISH_ELECTION_TASK)) {
return results.successes(joiningNodes).build(currentState);
@ -423,16 +422,17 @@ public class NodeJoinController extends AbstractComponent {
// use these joins to try and become the master.
// Note that we don't have to do any validation of the amount of joining nodes - the commit
// during the cluster state publishing guarantees that we have enough
nodesBuilder.masterNodeId(currentNodes.getLocalNodeId());
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks())
.removeGlobalBlock(discoverySettings.getNoMasterBlock()).build();
newState.blocks(clusterBlocks);
newState = becomeMasterAndTrimConflictingNodes(currentState, joiningNodes);
nodesChanged = true;
} else if (nodesBuilder.isLocalNodeElectedMaster() == false) {
} else if (currentNodes.isLocalNodeElectedMaster() == false) {
logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode());
throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request");
} else {
newState = ClusterState.builder(currentState);
}
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes());
assert nodesBuilder.isLocalNodeElectedMaster();
// processing any joins
@ -443,7 +443,7 @@ public class NodeJoinController extends AbstractComponent {
logger.debug("received a join request for an existing node [{}]", node);
} else {
try {
nodesBuilder.put(node);
nodesBuilder.add(node);
nodesChanged = true;
} catch (IllegalArgumentException e) {
results.failure(node, e);
@ -468,6 +468,28 @@ public class NodeJoinController extends AbstractComponent {
return results.build(newState.build());
}
private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List<DiscoveryNode> joiningNodes) {
assert currentState.nodes().getMasterNodeId() == null : currentState.prettyPrint();
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes());
nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId());
ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks())
.removeGlobalBlock(discoverySettings.getNoMasterBlock()).build();
for (final DiscoveryNode joiningNode : joiningNodes) {
final DiscoveryNode existingNode = nodesBuilder.get(joiningNode.getId());
if (existingNode != null && existingNode.equals(joiningNode) == false) {
logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", existingNode, joiningNode);
nodesBuilder.remove(existingNode.getId());
}
}
// now trim any left over dead nodes - either left there when the previous master stepped down
// or removed by us above
ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build();
RoutingAllocation.Result result = allocationService.deassociateDeadNodes(tmpState, false,
"removed dead nodes on election");
return ClusterState.builder(tmpState).routingResult(result);
}
@Override
public boolean runOnlyOnMaster() {
// we validate that we are allowed to change the cluster state during cluster state processing

View File

@ -570,7 +570,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) {
return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes"));
} else {
final RoutingAllocation.Result routingResult = allocationService.reroute(remainingNodesClusterState, describeTasks(tasks));
final RoutingAllocation.Result routingResult =
allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks));
return resultBuilder.build(ClusterState.builder(remainingNodesClusterState).routingResult(routingResult).build());
}
}

View File

@ -18,16 +18,19 @@
*/
package org.elasticsearch.index.translog;
import org.apache.lucene.store.ByteArrayDataOutput;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.DataInput;
import org.apache.lucene.store.DataOutput;
import org.apache.lucene.store.InputStreamDataInput;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.OutputStreamIndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.elasticsearch.common.io.Channels;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.OpenOption;
import java.nio.file.Path;
@ -35,69 +38,117 @@ import java.nio.file.Path;
*/
class Checkpoint {
static final int BUFFER_SIZE = Integer.BYTES // ops
+ Long.BYTES // offset
+ Long.BYTES;// generation
final long offset;
final int numOps;
final long generation;
private static final int INITIAL_VERSION = 1; // start with 1, just to recognize there was some magic serialization logic before
private static final String CHECKPOINT_CODEC = "ckp";
static final int FILE_SIZE = CodecUtil.headerLength(CHECKPOINT_CODEC)
+ Integer.BYTES // ops
+ Long.BYTES // offset
+ Long.BYTES // generation
+ CodecUtil.footerLength();
static final int LEGACY_NON_CHECKSUMMED_FILE_LENGTH = Integer.BYTES // ops
+ Long.BYTES // offset
+ Long.BYTES; // generation
Checkpoint(long offset, int numOps, long generation) {
this.offset = offset;
this.numOps = numOps;
this.generation = generation;
}
Checkpoint(DataInput in) throws IOException {
offset = in.readLong();
numOps = in.readInt();
generation = in.readLong();
}
private void write(FileChannel channel) throws IOException {
byte[] buffer = new byte[BUFFER_SIZE];
final ByteArrayDataOutput out = new ByteArrayDataOutput(buffer);
write(out);
Channels.writeToChannel(buffer, channel);
}
void write(DataOutput out) throws IOException {
private void write(DataOutput out) throws IOException {
out.writeLong(offset);
out.writeInt(numOps);
out.writeLong(generation);
}
// reads a checksummed checkpoint introduced in ES 5.0.0
static Checkpoint readChecksummedV1(DataInput in) throws IOException {
return new Checkpoint(in.readLong(), in.readInt(), in.readLong());
}
// reads checkpoint from ES < 5.0.0
static Checkpoint readNonChecksummed(DataInput in) throws IOException {
return new Checkpoint(in.readLong(), in.readInt(), in.readLong());
}
@Override
public String toString() {
return "Checkpoint{" +
"offset=" + offset +
", numOps=" + numOps +
", translogFileGeneration= " + generation +
'}';
"offset=" + offset +
", numOps=" + numOps +
", translogFileGeneration= " + generation +
'}';
}
public static Checkpoint read(Path path) throws IOException {
try (InputStream in = Files.newInputStream(path)) {
return new Checkpoint(new InputStreamDataInput(in));
try (Directory dir = new SimpleFSDirectory(path.getParent())) {
try (final IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) {
if (indexInput.length() == LEGACY_NON_CHECKSUMMED_FILE_LENGTH) {
// OLD unchecksummed file that was written < ES 5.0.0
return Checkpoint.readNonChecksummed(indexInput);
}
// We checksum the entire file before we even go and parse it. If it's corrupted we barf right here.
CodecUtil.checksumEntireFile(indexInput);
final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, INITIAL_VERSION, INITIAL_VERSION);
return Checkpoint.readChecksummedV1(indexInput);
}
}
}
public static void write(ChannelFactory factory, Path checkpointFile, Checkpoint checkpoint, OpenOption... options) throws IOException {
final ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(FILE_SIZE) {
@Override
public synchronized byte[] toByteArray() {
// don't clone
return buf;
}
};
final String resourceDesc = "checkpoint(path=\"" + checkpointFile + "\", gen=" + checkpoint + ")";
try (final OutputStreamIndexOutput indexOutput =
new OutputStreamIndexOutput(resourceDesc, checkpointFile.toString(), byteOutputStream, FILE_SIZE)) {
CodecUtil.writeHeader(indexOutput, CHECKPOINT_CODEC, INITIAL_VERSION);
checkpoint.write(indexOutput);
CodecUtil.writeFooter(indexOutput);
assert indexOutput.getFilePointer() == FILE_SIZE :
"get you number straights. Bytes written: " + indexOutput.getFilePointer() + " buffer size: " + FILE_SIZE;
assert indexOutput.getFilePointer() < 512 :
"checkpoint files have to be smaller 512b for atomic writes. size: " + indexOutput.getFilePointer();
}
// now go and write to the channel, in one go.
try (FileChannel channel = factory.open(checkpointFile, options)) {
checkpoint.write(channel);
Channels.writeToChannel(byteOutputStream.toByteArray(), channel);
// no need to force metadata, file size stays the same and we did the full fsync
// when we first created the file, so the directory entry doesn't change as well
channel.force(false);
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Checkpoint that = (Checkpoint) o;
if (offset != that.offset) return false;
if (numOps != that.numOps) return false;
if (offset != that.offset) {
return false;
}
if (numOps != that.numOps) {
return false;
}
return generation == that.generation;
}

View File

@ -200,7 +200,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
Files.createDirectories(location);
final long generation = 1;
Checkpoint checkpoint = new Checkpoint(0, 0, generation);
Checkpoint.write(getChannelFactory(), location.resolve(CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME);
Checkpoint.write(getChannelFactory(), checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
IOUtils.fsync(checkpointFile, false);
current = createWriter(generation);
this.lastCommittedTranslogFileGeneration = NOT_SET_GENERATION;

View File

@ -36,11 +36,9 @@ import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cli.SettingCommand;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.translog.Checkpoint;
import java.io.IOException;
import java.nio.channels.Channels;
@ -168,12 +166,11 @@ public class TruncateTranslogCommand extends SettingCommand {
/** Write a checkpoint file to the given location with the given generation */
public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException {
try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(fc))) {
Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration);
emptyCheckpoint.write(out);
fc.force(true);
}
Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration);
Checkpoint.write(FileChannel::open, filename, emptyCheckpoint,
StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
// fsync with metadata here to make sure.
IOUtils.fsync(filename, false);
}
/**

View File

@ -177,7 +177,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
deleteIndices(event); // also deletes shards of deleted indices
removeUnallocatedIndices(state); // also removes shards of removed indices
removeUnallocatedIndices(event); // also removes shards of removed indices
failMissingShards(state);
@ -286,28 +286,16 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
});
}
}
// delete local indices that do neither exist in previous cluster state nor part of tombstones
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
Index index = indexService.index();
IndexMetaData indexMetaData = event.state().metaData().index(index);
if (indexMetaData == null) {
assert false : "index" + index + " exists locally, doesn't have a metadata but is not part"
+ " of the delete index list. \nprevious state: " + event.previousState().prettyPrint()
+ "\n current state:\n" + event.state().prettyPrint();
logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing", index);
indicesService.deleteIndex(index, "isn't part of metadata (explicit check)");
}
}
}
/**
* Removes indices that have no shards allocated to this node. This does not delete the shard data as we wait for enough
* shard copies to exist in the cluster before deleting shard data (triggered by {@link org.elasticsearch.indices.store.IndicesStore}).
*
* @param state new cluster state
* @param event the cluster changed event
*/
private void removeUnallocatedIndices(final ClusterState state) {
private void removeUnallocatedIndices(final ClusterChangedEvent event) {
final ClusterState state = event.state();
final String localNodeId = state.nodes().getLocalNodeId();
assert localNodeId != null;
@ -322,6 +310,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
Index index = indexService.index();
if (indicesWithShards.contains(index) == false) {
// if the cluster change indicates a brand new cluster, we only want
// to remove the in-memory structures for the index and not delete the
// contents on disk because the index will later be re-imported as a
// dangling index
assert state.metaData().index(index) != null || event.isNewCluster() :
"index " + index + " does not exist in the cluster state, it should either " +
"have been deleted or the cluster must be new";
logger.debug("{} removing index, no shards allocated", index);
indicesService.removeIndex(index, "removing index (no shards allocated)");
}

View File

@ -24,7 +24,6 @@ import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.rounding.TimeZoneRounding;
import org.elasticsearch.common.util.LongHash;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator;
@ -45,8 +44,9 @@ import java.util.Map;
/**
* An aggregator for date values. Every date is rounded down using a configured
* {@link TimeZoneRounding}.
* @see TimeZoneRounding
* {@link Rounding}.
*
* @see Rounding
*/
class DateHistogramAggregator extends BucketsAggregator {
@ -60,14 +60,17 @@ class DateHistogramAggregator extends BucketsAggregator {
private final ExtendedBounds extendedBounds;
private final LongHash bucketOrds;
private long offset;
public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed,
public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, InternalOrder order,
boolean keyed,
long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource,
DocValueFormat formatter, AggregationContext aggregationContext,
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
super(name, factories, aggregationContext, parent, pipelineAggregators, metaData);
this.rounding = rounding;
this.offset = offset;
this.order = order;
this.keyed = keyed;
this.minDocCount = minDocCount;
@ -100,7 +103,7 @@ class DateHistogramAggregator extends BucketsAggregator {
long previousRounded = Long.MIN_VALUE;
for (int i = 0; i < valuesCount; ++i) {
long value = values.valueAt(i);
long rounded = rounding.round(value);
long rounded = rounding.round(value - offset) + offset;
assert rounded >= previousRounded;
if (rounded == previousRounded) {
continue;
@ -133,7 +136,7 @@ class DateHistogramAggregator extends BucketsAggregator {
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
: null;
return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed,
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed,
pipelineAggregators(), metaData());
}
@ -142,7 +145,7 @@ class DateHistogramAggregator extends BucketsAggregator {
InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0
? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds)
: null;
return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed,
return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed,
pipelineAggregators(), metaData());
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.rounding.DateTimeUnit;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.rounding.TimeZoneRounding;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
@ -95,24 +94,24 @@ public final class DateHistogramAggregatorFactory
}
private Rounding createRounding() {
TimeZoneRounding.Builder tzRoundingBuilder;
Rounding.Builder tzRoundingBuilder;
if (dateHistogramInterval != null) {
DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString());
if (dateTimeUnit != null) {
tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit);
tzRoundingBuilder = Rounding.builder(dateTimeUnit);
} else {
// the interval is a time value?
tzRoundingBuilder = TimeZoneRounding.builder(
tzRoundingBuilder = Rounding.builder(
TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval"));
}
} else {
// the interval is an integer time value in millis?
tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.timeValueMillis(interval));
tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval));
}
if (timeZone() != null) {
tzRoundingBuilder.timeZone(timeZone());
}
Rounding rounding = tzRoundingBuilder.offset(offset).build();
Rounding rounding = tzRoundingBuilder.build();
return rounding;
}
@ -138,7 +137,7 @@ public final class DateHistogramAggregatorFactory
// parse any string bounds to longs and round them
roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding);
}
return new DateHistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource,
return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, roundedBounds, valuesSource,
config.format(), context, parent, pipelineAggregators, metaData);
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser;
@ -45,7 +44,7 @@ public class DateHistogramParser extends NumericValuesSourceParser {
protected DateHistogramAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType,
ValueType targetValueType, Map<ParseField, Object> otherOptions) {
DateHistogramAggregationBuilder factory = new DateHistogramAggregationBuilder(aggregationName);
Object interval = otherOptions.get(Rounding.Interval.INTERVAL_FIELD);
Object interval = otherOptions.get(Histogram.INTERVAL_FIELD);
if (interval == null) {
throw new ParsingException(null, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]");
} else if (interval instanceof Long) {
@ -55,7 +54,7 @@ public class DateHistogramParser extends NumericValuesSourceParser {
} else {
throw new IllegalStateException("Unexpected interval class: " + interval.getClass());
}
Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD);
Long offset = (Long) otherOptions.get(Histogram.OFFSET_FIELD);
if (offset != null) {
factory.offset(offset);
}
@ -83,12 +82,12 @@ public class DateHistogramParser extends NumericValuesSourceParser {
protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser,
ParseFieldMatcher parseFieldMatcher, Map<ParseField, Object> otherOptions) throws IOException {
if (token.isValue()) {
if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) {
if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) {
if (token == XContentParser.Token.VALUE_STRING) {
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, new DateHistogramInterval(parser.text()));
otherOptions.put(Histogram.INTERVAL_FIELD, new DateHistogramInterval(parser.text()));
return true;
} else {
otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue());
otherOptions.put(Histogram.INTERVAL_FIELD, parser.longValue());
return true;
}
} else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) {
@ -97,13 +96,13 @@ public class DateHistogramParser extends NumericValuesSourceParser {
} else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) {
otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue());
return true;
} else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) {
} else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) {
if (token == XContentParser.Token.VALUE_STRING) {
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD,
otherOptions.put(Histogram.OFFSET_FIELD,
DateHistogramAggregationBuilder.parseStringOffset(parser.text()));
return true;
} else {
otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue());
otherOptions.put(Histogram.OFFSET_FIELD, parser.longValue());
return true;
}
} else {

View File

@ -178,14 +178,17 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
private final DocValueFormat format;
private final boolean keyed;
private final long minDocCount;
private final long offset;
private final EmptyBucketInfo emptyBucketInfo;
InternalDateHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo,
InternalDateHistogram(String name, List<Bucket> buckets, InternalOrder order, long minDocCount, long offset,
EmptyBucketInfo emptyBucketInfo,
DocValueFormat formatter, boolean keyed, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) {
super(name, pipelineAggregators, metaData);
this.buckets = buckets;
this.order = order;
this.offset = offset;
assert (minDocCount == 0) == (emptyBucketInfo != null);
this.minDocCount = minDocCount;
this.emptyBucketInfo = emptyBucketInfo;
@ -205,6 +208,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
} else {
emptyBucketInfo = null;
}
offset = in.readLong();
format = in.readNamedWriteable(DocValueFormat.class);
keyed = in.readBoolean();
buckets = in.readList(stream -> new Bucket(stream, keyed, format));
@ -217,6 +221,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
if (minDocCount == 0) {
emptyBucketInfo.writeTo(out);
}
out.writeLong(offset);
out.writeNamedWriteable(format);
out.writeBoolean(keyed);
out.writeList(buckets);
@ -234,7 +239,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
@Override
public InternalDateHistogram create(List<Bucket> buckets) {
return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format,
return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format,
keyed, pipelineAggregators(), metaData);
}
@ -328,7 +333,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
long max = bounds.getMax();
while (key <= max) {
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
key = nextKey(key).longValue();
}
}
} else {
@ -337,7 +342,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
if (key < firstBucket.key) {
while (key < firstBucket.key) {
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
key = nextKey(key).longValue();
}
}
}
@ -349,10 +354,10 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
while (iter.hasNext()) {
Bucket nextBucket = list.get(iter.nextIndex());
if (lastBucket != null) {
long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key);
long key = nextKey(lastBucket.key).longValue();
while (key < nextBucket.key) {
iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs));
key = emptyBucketInfo.rounding.nextRoundingValue(key);
key = nextKey(key).longValue();
}
assert key == nextBucket.key;
}
@ -393,7 +398,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
CollectionUtil.introSort(reducedBuckets, order.comparator());
}
return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo,
return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, offset, emptyBucketInfo,
format, keyed, pipelineAggregators(), getMetaData());
}
@ -424,7 +429,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
@Override
public Number nextKey(Number key) {
return emptyBucketInfo.rounding.nextRoundingValue(key.longValue());
return emptyBucketInfo.rounding.nextRoundingValue(key.longValue() - offset) + offset;
}
@Override
@ -435,7 +440,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation<
buckets2.add((Bucket) b);
}
buckets2 = Collections.unmodifiableList(buckets2);
return new InternalDateHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format,
return new InternalDateHistogram(name, buckets2, order, minDocCount, offset, emptyBucketInfo, format,
keyed, pipelineAggregators(), getMetaData());
}

View File

@ -136,14 +136,8 @@ public class InternalSearchResponse implements Streamable, ToXContent {
suggest = Suggest.readSuggest(in);
}
timedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean();
if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
profileResults = new SearchProfileShardResults(in);
} else {
profileResults = null;
}
profileResults = in.readOptionalWriteable(SearchProfileShardResults::new);
}
@Override
@ -162,16 +156,7 @@ public class InternalSearchResponse implements Streamable, ToXContent {
suggest.writeTo(out);
}
out.writeBoolean(timedOut);
out.writeOptionalBoolean(terminatedEarly);
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
if (profileResults == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
profileResults.writeTo(out);
}
}
out.writeOptionalWriteable(profileResults);
}
}

View File

@ -93,7 +93,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Supplier;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -768,12 +767,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
threadPool.generic().execute(() -> {
globalLock.writeLock().lock();
try {
for (Iterator<NodeChannels> it = connectedNodes.values().iterator(); it.hasNext(); ) {
NodeChannels nodeChannels = it.next();
it.remove();
IOUtils.closeWhileHandlingException(nodeChannels);
}
// first stop to accept any incoming connections so nobody can connect to this transport
for (Map.Entry<String, List<Channel>> entry : serverChannels.entrySet()) {
try {
closeChannels(entry.getValue());
@ -781,16 +775,13 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
logger.debug("Error closing serverChannel for profile [{}]", e, entry.getKey());
}
}
try {
stopInternal();
} finally {
for (Iterator<NodeChannels> it = connectedNodes.values().iterator(); it.hasNext(); ) {
NodeChannels nodeChannels = it.next();
it.remove();
IOUtils.closeWhileHandlingException(nodeChannels);
}
}
for (Iterator<NodeChannels> it = connectedNodes.values().iterator(); it.hasNext(); ) {
NodeChannels nodeChannels = it.next();
it.remove();
IOUtils.closeWhileHandlingException(nodeChannels);
}
stopInternal();
} finally {
globalLock.writeLock().unlock();
latch.countDown();
@ -800,7 +791,7 @@ public abstract class TcpTransport<Channel> extends AbstractLifecycleComponent i
try {
latch.await(30, TimeUnit.SECONDS);
} catch (InterruptedException e) {
Thread.interrupted();
Thread.currentThread().interrupt();
// ignore
}
}

View File

@ -382,7 +382,7 @@ public class TribeService extends AbstractLifecycleComponent {
clusterStateChanged = true;
logger.info("[{}] adding node [{}]", tribeName, discoNode);
nodes.remove(tribe.getId()); // remove any existing node with the same id but different ephemeral id
nodes.put(discoNode);
nodes.add(discoNode);
}
}

View File

@ -739,7 +739,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase {
// First group by node
DiscoveryNodes.Builder discoNodes = DiscoveryNodes.builder();
for (TestNode testNode : this.testNodes) {
discoNodes.put(testNode.discoveryNode);
discoNodes.add(testNode.discoveryNode);
}
response.setDiscoveryNodes(discoNodes.build());
Map<String, Object> byNodes = serialize(response, new ToXContent.MapParams(Collections.singletonMap("group_by", "nodes")));

View File

@ -166,7 +166,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
.getDefault(Settings.EMPTY))
.metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")))
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
RoutingTable prevRoutingTable = routingTable;
routingTable = service.reroute(clusterState, "reroute").routingTable();

View File

@ -93,7 +93,7 @@ public class TransportShrinkActionTests extends ESTestCase {
// create one that won't fail
ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0,
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().put(newNode("node1")))
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
@ -116,7 +116,7 @@ public class TransportShrinkActionTests extends ESTestCase {
ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0,
Settings.builder()
.put("index.blocks.write", true)
.build())).nodes(DiscoveryNodes.builder().put(newNode("node1")))
.build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),

View File

@ -79,7 +79,7 @@ public class IngestProxyActionFilterTests extends ESTestCase {
roles.add(DiscoveryNode.Role.INGEST);
}
DiscoveryNode node = new DiscoveryNode(nodeId, nodeId, LocalTransportAddress.buildUnique(), attributes, roles, VersionUtils.randomVersion(random()));
builder.put(node);
builder.add(node);
if (i == totalNodes - 1) {
localNode = node;
}

View File

@ -37,16 +37,12 @@ import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
import static org.hamcrest.Matchers.sameInstance;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -117,12 +113,12 @@ public class TransportMultiSearchActionTests extends ESTestCase {
int numDataNodes = randomIntBetween(1, 10);
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
for (int i = 0; i < numDataNodes; i++) {
builder.put(new DiscoveryNode("_id" + i, new LocalTransportAddress("_id" + i), Collections.emptyMap(),
builder.add(new DiscoveryNode("_id" + i, new LocalTransportAddress("_id" + i), Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT));
}
builder.put(new DiscoveryNode("master", new LocalTransportAddress("mater"), Collections.emptyMap(),
builder.add(new DiscoveryNode("master", new LocalTransportAddress("mater"), Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT));
builder.put(new DiscoveryNode("ingest", new LocalTransportAddress("ingest"), Collections.emptyMap(),
builder.add(new DiscoveryNode("ingest", new LocalTransportAddress("ingest"), Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.INGEST), Version.CURRENT));
ClusterState state = ClusterState.builder(new ClusterName("_name")).nodes(builder).build();

View File

@ -220,7 +220,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
int totalIndexShards = 0;
for (int i = 0; i < numberOfNodes; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
discoBuilder = discoBuilder.add(node);
int numberOfShards = randomIntBetween(1, 10);
totalIndexShards += numberOfShards;
for (int j = 0; j < numberOfShards; j++) {

View File

@ -190,7 +190,7 @@ public class TransportNodesActionTests extends ESTestCase {
attributes.put("custom", randomBoolean() ? "match" : randomAsciiOfLengthBetween(3, 5));
}
final DiscoveryNode node = newNode(i, attributes, roles);
discoBuilder = discoBuilder.put(node);
discoBuilder = discoBuilder.add(node);
discoveryNodes.add(node);
}
discoBuilder.localNodeId(randomFrom(discoveryNodes).getId());

View File

@ -81,7 +81,7 @@ public class ClusterStateCreationUtils {
Set<String> unassignedNodes = new HashSet<>();
for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
discoBuilder = discoBuilder.add(node);
unassignedNodes.add(node.getId());
}
discoBuilder.localNodeId(newNode(0).getId());
@ -153,7 +153,7 @@ public class ClusterStateCreationUtils {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (int i = 0; i < numberOfNodes + 1; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
discoBuilder = discoBuilder.add(node);
}
discoBuilder.localNodeId(newNode(0).getId());
discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures
@ -241,7 +241,7 @@ public class ClusterStateCreationUtils {
public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
for (DiscoveryNode node : allNodes) {
discoBuilder.put(node);
discoBuilder.add(node);
}
if (masterNode != null) {
discoBuilder.masterNodeId(masterNode.getId());

View File

@ -534,7 +534,7 @@ public class TransportReplicationActionTests extends ESTestCase {
AtomicReference<Throwable> failure = new AtomicReference<>();
AtomicReference<Throwable> ignoredFailure = new AtomicReference<>();
AtomicBoolean success = new AtomicBoolean();
proxy.failShard(replica, randomIntBetween(0, 10), "test", new ElasticsearchException("simulated"),
proxy.failShard(replica, randomIntBetween(1, 10), "test", new ElasticsearchException("simulated"),
() -> success.set(true), failure::set, ignoredFailure::set
);
CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear();

View File

@ -131,7 +131,7 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase {
ClusterName cluster1 = new ClusterName("cluster1");
ClusterState.Builder builder = ClusterState.builder(cluster1);
//the sniffer detects only data nodes
builder.nodes(DiscoveryNodes.builder().put(new DiscoveryNode("node_id", address, Collections.emptyMap(),
builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)));
((TransportResponseHandler<ClusterStateResponse>) handler)
.handleResponse(new ClusterStateResponse(cluster1, builder.build()));

View File

@ -310,7 +310,7 @@ public class ClusterChangedEventTests extends ESTestCase {
}
}
final DiscoveryNode node = newNode(nodeId, roles);
builder.put(node);
builder.add(node);
if (i == localNodeIndex) {
builder.localNodeId(nodeId);
}

View File

@ -78,7 +78,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
emptyMap(), emptySet(), Version.CURRENT);
DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"),
emptyMap(), emptySet(), Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.getId()).build();
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build();
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode);
@ -193,14 +193,14 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
if (nodeId.startsWith("node-")) {
nodes.remove(nodeId);
if (randomBoolean()) {
nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(),
nodes.add(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(),
emptySet(), randomVersion(random())));
}
}
}
int additionalNodeCount = randomIntBetween(1, 20);
for (int i = 0; i < additionalNodeCount; i++) {
nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)),
nodes.add(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)),
emptyMap(), emptySet(), randomVersion(random())));
}
return ClusterState.builder(clusterState).nodes(nodes);

View File

@ -35,7 +35,7 @@ public class ClusterStateTests extends ESTestCase {
final Version version = Version.CURRENT;
final DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version);
final DiscoveryNode node2 = new DiscoveryNode("node2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version);
final DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build();
final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build();
ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY);
ClusterState noMaster1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build();
ClusterState noMaster2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build();

View File

@ -73,7 +73,7 @@ public class NodeConnectionsServiceTests extends ESTestCase {
private ClusterState clusterStateFromNodes(List<DiscoveryNode> nodes) {
final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
for (DiscoveryNode node : nodes) {
builder.put(node);
builder.add(node);
}
return ClusterState.builder(new ClusterName("test")).nodes(builder).build();
}

View File

@ -154,7 +154,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
private ClusterState createClusterStateWithStartedShards(String reason) {
int numberOfNodes = 1 + numberOfReplicas;
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
IntStream.rangeClosed(1, numberOfNodes).mapToObj(node -> newNode("node" + node)).forEach(nodes::put);
IntStream.rangeClosed(1, numberOfNodes).mapToObj(node -> newNode("node" + node)).forEach(nodes::add);
ClusterState stateAfterAddingNode =
ClusterState.builder(clusterState).nodes(nodes).build();
RoutingTable afterReroute =

View File

@ -129,7 +129,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
// create one that won't fail
ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0,
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().put(newNode("node1")))
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
@ -157,7 +157,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
.put("index.blocks.write", true)
.put("index.similarity.default.type", "BM25")
.put("index.analysis.analyzer.my_analyzer.tokenizer", "keyword")
.build())).nodes(DiscoveryNodes.builder().put(newNode("node1")))
.build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),

View File

@ -123,13 +123,13 @@ public class DiscoveryNodesTests extends ESTestCase {
DiscoveryNode masterB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB);
DiscoveryNodes.Builder builderA = DiscoveryNodes.builder();
nodesA.stream().forEach(builderA::put);
nodesA.stream().forEach(builderA::add);
final String masterAId = masterA == null ? null : masterA.getId();
builderA.masterNodeId(masterAId);
builderA.localNodeId(RandomPicks.randomFrom(random(), nodesA).getId());
DiscoveryNodes.Builder builderB = DiscoveryNodes.builder();
nodesB.stream().forEach(builderB::put);
nodesB.stream().forEach(builderB::add);
final String masterBId = masterB == null ? null : masterB.getId();
builderB.masterNodeId(masterBId);
builderB.localNodeId(RandomPicks.randomFrom(random(), nodesB).getId());
@ -186,7 +186,7 @@ public class DiscoveryNodesTests extends ESTestCase {
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder();
List<DiscoveryNode> nodesList = randomNodes(numNodes);
for (DiscoveryNode node : nodesList) {
discoBuilder = discoBuilder.put(node);
discoBuilder = discoBuilder.add(node);
}
discoBuilder.localNodeId(randomFrom(nodesList).getId());
discoBuilder.masterNodeId(randomFrom(nodesList).getId());

View File

@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
@ -30,12 +29,9 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
@ -45,8 +41,6 @@ import org.junit.Before;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.singleton;
@ -96,7 +90,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build();
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).localNodeId("node1").masterNodeId("node1"))
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1"))
.build();
clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
// starting primaries
@ -113,10 +107,11 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()).remove("node2");
boolean nodeAvailableForAllocation = randomBoolean();
if (nodeAvailableForAllocation) {
nodes.put(newNode("node3"));
nodes.add(newNode("node3"));
}
clusterState = ClusterState.builder(clusterState).nodes(nodes).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
clusterState = ClusterState.builder(clusterState).routingResult(
allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build();
ClusterState newState = clusterState;
List<ShardRouting> unassignedShards = newState.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED);
if (nodeAvailableForAllocation) {
@ -142,7 +137,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build();
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).localNodeId("node1").masterNodeId("node1"))
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1"))
.build();
final long baseTimestampNanos = System.nanoTime();
allocationService.setNanoTimeOverride(baseTimestampNanos);
@ -169,7 +164,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
// remove node that has replica and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(nodeId)).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
clusterState = ClusterState.builder(clusterState).routingResult(
allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build();
ClusterState stateWithDelayedShard = clusterState;
// make sure the replica is marked as delayed (i.e. not reallocated)
assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard));
@ -239,8 +235,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("short_delay")).addAsNew(metaData.index("long_delay")).build())
.nodes(DiscoveryNodes.builder()
.put(newNode("node0", singleton(DiscoveryNode.Role.MASTER))).localNodeId("node0").masterNodeId("node0")
.put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
.add(newNode("node0", singleton(DiscoveryNode.Role.MASTER))).localNodeId("node0").masterNodeId("node0")
.add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
// allocate shards
clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
// start primaries
@ -284,7 +280,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.build();
// make sure both replicas are marked as delayed (i.e. not reallocated)
allocationService.setNanoTimeOverride(baseTimestampNanos);
clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build();
clusterState = ClusterState.builder(clusterState).routingResult(
allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build();
final ClusterState stateWithDelayedShards = clusterState;
assertEquals(2, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShards));
RoutingNodes.UnassignedShards.UnassignedIterator iter = stateWithDelayedShards.getRoutingNodes().unassigned().iterator();
@ -398,7 +395,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
.build()).build();
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder()
.put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))
.add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))
.localNodeId("node1").masterNodeId("node1"))
.build();
final long nodeLeftTimestampNanos = System.nanoTime();
@ -425,7 +422,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
// remove node that has replica and reroute
clusterState = ClusterState.builder(clusterState).nodes(
DiscoveryNodes.builder(clusterState.nodes()).remove(nodeIdOfFooReplica)).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "fake node left")).build();
clusterState = ClusterState.builder(clusterState).routingResult(
allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build();
ClusterState stateWithDelayedShard = clusterState;
// make sure the replica is marked as delayed (i.e. not reallocated)
assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard));
@ -469,7 +467,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(stateWithDelayedShard).nodes(
DiscoveryNodes.builder(stateWithDelayedShard.nodes()).remove(nodeIdOfBarReplica)).build();
ClusterState stateWithShorterDelay = ClusterState.builder(clusterState).routingResult(
allocationService.reroute(clusterState, "fake node left")).build();
allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build();
delayedAllocationService.setNanoTimeOverride(clusterChangeEventTimestampNanos);
delayedAllocationService.clusterChanged(
new ClusterChangedEvent("fake node left", stateWithShorterDelay, stateWithDelayedShard));

View File

@ -90,7 +90,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
Builder discoBuilder = DiscoveryNodes.builder();
for (int i = 0; i < this.numberOfReplicas + 1; i++) {
discoBuilder = discoBuilder.put(newNode("node" + i));
discoBuilder = discoBuilder.add(newNode("node" + i));
}
this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build();
RoutingAllocation.Result rerouteResult = allocationService.reroute(clusterState, "reroute");
@ -161,7 +161,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase {
final int newNodes = randomInt(10);
logger.info("adding [{}] nodes", newNodes);
for (int i = 0; i < newNodes; i++) {
nodesBuilder.put(newNode("extra_" + i));
nodesBuilder.add(newNode("extra_" + i));
}
this.clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
RoutingAllocation.Result rerouteResult = allocationService.reroute(this.clusterState, "nodes added");

View File

@ -83,7 +83,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1);
Builder discoBuilder = DiscoveryNodes.builder();
for (int i = 0; i < this.numberOfReplicas + 1; i++) {
discoBuilder = discoBuilder.put(newNode("node" + i));
discoBuilder = discoBuilder.add(newNode("node" + i));
}
this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build();
RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.reroute(clusterState, "reroute");

View File

@ -26,18 +26,18 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.test.ESAllocationTestCase;
import java.io.IOException;
@ -175,7 +175,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index(index)).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
// starting primaries
clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
@ -215,7 +215,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
// starting primaries
clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
@ -224,7 +224,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false));
// remove node2 and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build();
// verify that NODE_LEAVE is the reason for meta
assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1));
@ -244,7 +244,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
// starting primaries
clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build();
@ -294,7 +294,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0));
// starting primaries
@ -305,7 +305,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
// remove node2 and reroute
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
// make sure both replicas are marked as delayed (i.e. not reallocated)
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build();
assertThat(clusterState.prettyPrint(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2));
}
@ -322,7 +322,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0));
// starting primaries
@ -334,7 +334,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
final long baseTime = System.nanoTime();
allocation.setNanoTimeOverride(baseTime);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build();
clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build();
final long delta = randomBoolean() ? 0 : randomInt((int) expectMinDelaySettingsNanos - 1);

View File

@ -55,8 +55,8 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase {
.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("adding three nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(
newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(
newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
@ -83,7 +83,7 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.remove("node1"))
.build();
rerouteResult = allocation.reroute(clusterState, "reroute");
rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(2));
@ -92,7 +92,7 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.remove("node2").remove("node3"))
.build();
rerouteResult = allocation.reroute(clusterState, "reroute");
rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
// active allocation ids should not be updated

View File

@ -108,7 +108,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
nodes.put(newNode("node2"));
nodes.add(newNode("node2"));
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
@ -178,7 +178,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
logger.info("now, start one more node, check that rebalancing will happen because we set it to always");
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
nodes.put(newNode("node2"));
nodes.add(newNode("node2"));
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
@ -257,7 +257,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numNodes);
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
for (int i = 0; i < numNodes; i++) {
nodes.put(newNode("node" + (i + nodeOffset)));
nodes.add(newNode("node" + (i + nodeOffset)));
}
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
@ -304,7 +304,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
logger.info("start {} nodes", numberOfNodes);
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
for (int i = 0; i < numberOfNodes; i++) {
nodes.put(newNode("node" + i));
nodes.add(newNode("node" + i));
}
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
routingTable = service.reroute(clusterState, "reroute").routingTable();
@ -397,6 +397,8 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
}
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
clusterState = ClusterState.builder(clusterState)
.routingResult(service.deassociateDeadNodes(clusterState, true, "reroute")).build();
RoutingNodes routingNodes = clusterState.getRoutingNodes();
logger.info("start all the primary shards, replicas will start initializing");

View File

@ -51,11 +51,8 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.plugins.DiscoveryPlugin;
import org.elasticsearch.test.ESAllocationTestCase;
import java.util.Collections;
import static java.util.Collections.singleton;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
@ -82,7 +79,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
@ -141,10 +138,10 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
logger.info("--> adding 3 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.put(newNode("node3"))
.put(newNode("node4", singleton(DiscoveryNode.Role.MASTER)))
.add(newNode("node1"))
.add(newNode("node2"))
.add(newNode("node3"))
.add(newNode("node4", singleton(DiscoveryNode.Role.MASTER)))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
@ -263,9 +260,9 @@ public class AllocationCommandsTests extends ESAllocationTestCase {
logger.info("--> adding 3 nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.put(newNode("node3"))
.add(newNode("node1"))
.add(newNode("node2"))
.add(newNode("node3"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();

View File

@ -67,7 +67,7 @@ public class AllocationPriorityTests extends ESAllocationTestCase {
.build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();

View File

@ -72,8 +72,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -91,7 +91,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", singletonMap("rack_id", "2")))
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -111,7 +111,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, make sure nothing moves");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "3")))
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
assertThat(routingTable, sameInstance(clusterState.routingTable()));
@ -140,9 +140,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.put(newNode("node3", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node3", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -160,7 +160,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "2")))
.add(newNode("node4", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -180,7 +180,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, make sure nothing moves");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node5", singletonMap("rack_id", "3")))
.add(newNode("node5", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
assertThat(routingTable, sameInstance(clusterState.routingTable()));
@ -214,8 +214,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -239,7 +239,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", singletonMap("rack_id", "2")))
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -264,7 +264,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, some more relocation should happen");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "3")))
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -305,8 +305,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -324,7 +324,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", singletonMap("rack_id", "2")))
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -353,7 +353,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, some more relocation should happen");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "3")))
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -396,8 +396,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -415,7 +415,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", singletonMap("rack_id", "2")))
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -435,7 +435,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, we will have another relocation");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "3")))
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -474,10 +474,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.put(newNode("node3", singletonMap("rack_id", "1")))
.put(newNode("node4", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node3", singletonMap("rack_id", "1")))
.add(newNode("node4", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -495,7 +495,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node5", singletonMap("rack_id", "2")))
.add(newNode("node5", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -515,7 +515,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, we will have another relocation");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node6", singletonMap("rack_id", "3")))
.add(newNode("node6", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -555,8 +555,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -572,7 +572,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", singletonMap("rack_id", "2")))
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -592,7 +592,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, make sure nothing moves");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "3")))
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
assertThat(routingTable, sameInstance(clusterState.routingTable()));
@ -622,9 +622,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.put(newNode("node3", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node3", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -640,7 +640,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "2")))
.add(newNode("node4", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -660,7 +660,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, make sure nothing moves");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node5", singletonMap("rack_id", "3")))
.add(newNode("node5", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
assertThat(routingTable, sameInstance(clusterState.routingTable()));
@ -697,8 +697,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("rack_id", "1")))
.put(newNode("node2", singletonMap("rack_id", "1")))
.add(newNode("node1", singletonMap("rack_id", "1")))
.add(newNode("node2", singletonMap("rack_id", "1")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -712,7 +712,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node with a new rack and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", singletonMap("rack_id", "2")))
.add(newNode("node3", singletonMap("rack_id", "2")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -736,7 +736,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add another node with a new rack, some more relocation should happen");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4", singletonMap("rack_id", "3")))
.add(newNode("node4", singletonMap("rack_id", "3")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -776,8 +776,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes in different zones and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("A-0", singletonMap("zone", "a")))
.put(newNode("B-0", singletonMap("zone", "b")))
.add(newNode("A-0", singletonMap("zone", "a")))
.add(newNode("B-0", singletonMap("zone", "b")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -798,7 +798,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> add a new node in zone 'a' and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("A-1", singletonMap("zone", "a")))
.add(newNode("A-1", singletonMap("zone", "a")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -837,12 +837,12 @@ public class AwarenessAllocationTests extends ESAllocationTestCase {
logger.info("--> adding 5 nodes in different zones and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("A-0", singletonMap("zone", "a")))
.put(newNode("A-1", singletonMap("zone", "a")))
.put(newNode("A-2", singletonMap("zone", "a")))
.put(newNode("A-3", singletonMap("zone", "a")))
.put(newNode("A-4", singletonMap("zone", "a")))
.put(newNode("B-0", singletonMap("zone", "b")))
.add(newNode("A-0", singletonMap("zone", "a")))
.add(newNode("A-1", singletonMap("zone", "a")))
.add(newNode("A-2", singletonMap("zone", "a")))
.add(newNode("A-3", singletonMap("zone", "a")))
.add(newNode("A-4", singletonMap("zone", "a")))
.add(newNode("B-0", singletonMap("zone", "b")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -46,6 +46,7 @@ import org.hamcrest.Matchers;
import java.util.HashMap;
import java.util.Map;
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
@ -129,7 +130,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
logger.info("start " + numberOfNodes + " nodes");
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
for (int i = 0; i < numberOfNodes; i++) {
nodes.put(newNode("node" + i));
nodes.add(newNode("node" + i));
}
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -165,7 +166,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
private ClusterState addNode(ClusterState clusterState, AllocationService strategy) {
logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node" + numberOfNodes)))
.add(newNode("node" + numberOfNodes)))
.build();
RoutingTable routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -191,11 +192,18 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")");
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
boolean removed = false;
for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) {
nodes.remove("node" + i);
removed = true;
}
clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build();
if (removed) {
clusterState = ClusterState.builder(clusterState).routingResult(
strategy.deassociateDeadNodes(clusterState, randomBoolean(), "removed nodes")
).build();
}
RoutingNodes routingNodes = clusterState.getRoutingNodes();
logger.info("start all the primary shards, replicas will start initializing");
@ -378,7 +386,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder();
for (int i = 0; i < 4; i++) {
DiscoveryNode node = newNode("node" + i);
nodes.put(node);
nodes.add(node);
}
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build();

View File

@ -118,7 +118,7 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase {
RoutingTable routingTable = routingTableBuilder.build();
DiscoveryNodes.Builder builderDiscoNodes = DiscoveryNodes.builder();
for (String node : nodes) {
builderDiscoNodes.put(newNode(node));
builderDiscoNodes.add(newNode(node));
}
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).nodes(builderDiscoNodes.build()).build();
if (balanceFirst()) {

View File

@ -64,7 +64,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -121,7 +121,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.add(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -150,7 +150,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -226,7 +226,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.add(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -254,7 +254,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -311,7 +311,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.add(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -338,7 +338,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -433,7 +433,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.add(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -461,7 +461,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -518,7 +518,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.add(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -545,7 +545,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -621,7 +621,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")))
.add(newNode("node3")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -665,7 +665,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -686,7 +686,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.debug("now, start 1 more node, check that rebalancing will not happen since we unassigned shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node2")))
.add(newNode("node2")))
.build();
logger.debug("reroute and check that nothing has changed");
RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute");
@ -764,7 +764,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -785,7 +785,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
logger.debug("now, start 1 more node, check that rebalancing will not happen since we have shard sync going on");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node2")))
.add(newNode("node2")))
.build();
logger.debug("reroute and check that nothing has changed");
RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute");

View File

@ -69,7 +69,7 @@ public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase {
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -93,7 +93,7 @@ public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase {
logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
.add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10")))
.build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -60,8 +60,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
@ -84,10 +84,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode(nodeIdRemaining))
.add(newNode(nodeIdRemaining))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().primary(), equalTo(true));
@ -111,8 +111,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
@ -133,7 +133,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
logger.info("--> adding additional node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
.add(newNode("node3"))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
@ -158,10 +158,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
logger.info("--> fail primary shard recovering instance on node3 being initialized by killing node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode(origPrimaryNodeId))
.put(newNode(origReplicaNodeId))
.add(newNode(origPrimaryNodeId))
.add(newNode(origReplicaNodeId))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED));
@ -185,8 +185,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
@ -207,7 +207,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
logger.info("--> adding additional node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
.add(newNode("node3"))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
@ -232,10 +232,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase {
logger.info("--> fail primary shard recovering instance on 'origPrimaryNodeId' being relocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node3"))
.put(newNode(origReplicaNodeId))
.add(newNode("node3"))
.add(newNode(origReplicaNodeId))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED));

View File

@ -143,7 +143,7 @@ public class DecisionsImpactOnClusterHealthTests extends ESAllocationTestCase {
// any allocations on it
final DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder();
for (int i = 0; i < numShards; i++) {
discoveryNodes.put(newNode("node" + i));
discoveryNodes.add(newNode("node" + i));
}
clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).build();

View File

@ -58,7 +58,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -82,7 +82,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2));
logger.info("Start another node and perform rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -100,7 +100,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest
logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId());
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("make sure all the primary shards are active");

View File

@ -80,7 +80,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -95,7 +95,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
assertEquals(1, clusterState.getRoutingNodes().unassigned().size());
logger.info("Add another one node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -133,7 +133,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();

View File

@ -57,7 +57,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start 4 nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -90,7 +90,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase {
)
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.getRoutingNodes();

View File

@ -73,8 +73,8 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
logger.info("--> adding 2 nodes on same rack and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
@ -95,7 +95,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
logger.info("--> adding additional node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
.add(newNode("node3"))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build();
@ -163,7 +163,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -241,7 +241,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding single node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -296,7 +296,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1);
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder();
for (int i = 0; i < numberOfReplicas + 1; i++) {
nodeBuilder.put(newNode("node" + Integer.toString(i)));
nodeBuilder.add(newNode("node" + Integer.toString(i)));
}
clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build();
while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) {
@ -362,7 +362,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -419,7 +419,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -461,7 +461,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
}
logger.info("Adding third node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -510,7 +510,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
// add 4 nodes
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build();
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));
@ -552,7 +552,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
// add 4 nodes
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build();
clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build();
assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2));

View File

@ -116,7 +116,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
RoutingTable routingTable = routingTableBuilder.build();
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING
.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")))
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
routingTable = service.reroute(clusterState, "reroute", false).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -66,10 +66,10 @@ public class FilterRoutingTests extends ESAllocationTestCase {
logger.info("--> adding four nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("tag1", "value1")))
.put(newNode("node2", singletonMap("tag1", "value2")))
.put(newNode("node3", singletonMap("tag1", "value3")))
.put(newNode("node4", singletonMap("tag1", "value4")))
.add(newNode("node1", singletonMap("tag1", "value1")))
.add(newNode("node2", singletonMap("tag1", "value2")))
.add(newNode("node3", singletonMap("tag1", "value3")))
.add(newNode("node4", singletonMap("tag1", "value4")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -114,10 +114,10 @@ public class FilterRoutingTests extends ESAllocationTestCase {
logger.info("--> adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("tag1", "value1")))
.put(newNode("node2", singletonMap("tag1", "value2")))
.put(newNode("node3", singletonMap("tag1", "value3")))
.put(newNode("node4", singletonMap("tag1", "value4")))
.add(newNode("node1", singletonMap("tag1", "value1")))
.add(newNode("node2", singletonMap("tag1", "value2")))
.add(newNode("node3", singletonMap("tag1", "value3")))
.add(newNode("node4", singletonMap("tag1", "value4")))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -185,7 +185,7 @@ public class FilterRoutingTests extends ESAllocationTestCase {
logger.info("--> adding two nodes and performing rerouting");
DiscoveryNode node1 = newNode("node1", singletonMap("tag1", "value1"));
DiscoveryNode node2 = newNode("node2", singletonMap("tag1", "value2"));
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(node1).put(node2)).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node1).add(node2)).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(clusterState.getRoutingNodes().node(node1.getId()).numberOfShardsWithState(INITIALIZING), equalTo(2));

View File

@ -82,7 +82,7 @@ public class IndexBalanceTests extends ESAllocationTestCase {
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -211,7 +211,7 @@ public class IndexBalanceTests extends ESAllocationTestCase {
}
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -230,7 +230,7 @@ public class IndexBalanceTests extends ESAllocationTestCase {
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -294,7 +294,7 @@ public class IndexBalanceTests extends ESAllocationTestCase {
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -363,7 +363,7 @@ public class IndexBalanceTests extends ESAllocationTestCase {
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();

View File

@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
@ -66,7 +64,7 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase {
RoutingTable routingTable = routingTableBuilder.build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")))
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute", false).routingTable();

View File

@ -105,7 +105,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -145,7 +145,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3", VersionUtils.getPreviousVersion())))
.add(newNode("node3", VersionUtils.getPreviousVersion())))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -161,7 +161,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4")))
.add(newNode("node4")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -230,7 +230,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
}
}
for (DiscoveryNode node : nodes) {
nodesBuilder.put(node);
nodesBuilder.add(node);
}
clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
clusterState = stabilize(clusterState, service);
@ -267,29 +267,29 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue());
}
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("old0", VersionUtils.getPreviousVersion()))
.put(newNode("old1", VersionUtils.getPreviousVersion()))
.put(newNode("old2", VersionUtils.getPreviousVersion()))).build();
.add(newNode("old0", VersionUtils.getPreviousVersion()))
.add(newNode("old1", VersionUtils.getPreviousVersion()))
.add(newNode("old2", VersionUtils.getPreviousVersion()))).build();
clusterState = stabilize(clusterState, service);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("old0", VersionUtils.getPreviousVersion()))
.put(newNode("old1", VersionUtils.getPreviousVersion()))
.put(newNode("new0"))).build();
.add(newNode("old0", VersionUtils.getPreviousVersion()))
.add(newNode("old1", VersionUtils.getPreviousVersion()))
.add(newNode("new0"))).build();
clusterState = stabilize(clusterState, service);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node0", VersionUtils.getPreviousVersion()))
.put(newNode("new1"))
.put(newNode("new0"))).build();
.add(newNode("node0", VersionUtils.getPreviousVersion()))
.add(newNode("new1"))
.add(newNode("new0"))).build();
clusterState = stabilize(clusterState, service);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("new2"))
.put(newNode("new1"))
.put(newNode("new0"))).build();
.add(newNode("new2"))
.add(newNode("new1"))
.add(newNode("new0"))).build();
clusterState = stabilize(clusterState, service);
routingTable = clusterState.routingTable();
@ -334,7 +334,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(newNode).put(oldNode1).put(oldNode2)).build();
.nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build();
AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new NodeVersionAllocationDecider(Settings.EMPTY)});
AllocationService strategy = new MockAllocationService(Settings.EMPTY,
allocationDeciders,
@ -365,7 +365,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
.routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"),
new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())),
Version.CURRENT, "test")).build())
.nodes(DiscoveryNodes.builder().put(newNode).put(oldNode1).put(oldNode2)).build();
.nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build();
AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{
new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY),
new NodeVersionAllocationDecider(Settings.EMPTY)});
@ -383,7 +383,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
private ClusterState stabilize(ClusterState clusterState, AllocationService service) {
logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint());
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
RoutingTable routingTable = service.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertRecoveryNodeVersions(routingNodes);

View File

@ -66,8 +66,8 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation
logger.info("adding two nodes and performing rerouting till all are allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", singletonMap("tag1", "value1")))
.put(newNode("node2", singletonMap("tag1", "value2")))).build();
.add(newNode("node1", singletonMap("tag1", "value1")))
.add(newNode("node2", singletonMap("tag1", "value2")))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -92,7 +92,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation
.build()))
.build();
clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards);
@ -102,7 +102,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation
logger.info("start node back up");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node1", singletonMap("tag1", "value1")))).build();
.add(newNode("node1", singletonMap("tag1", "value1")))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -62,7 +62,7 @@ public class PreferPrimaryAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("adding two nodes and performing rerouting till all are allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -58,11 +58,11 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
result = strategy.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
@ -77,9 +77,9 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
logger.info("Adding third node and reroute and kill first node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")).remove("node1")).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3")).remove("node1")).build();
RoutingTable prevRoutingTable = clusterState.routingTable();
result = strategy.reroute(clusterState, "reroute");
result = strategy.deassociateDeadNodes(clusterState, true, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(result).build();
routingNodes = clusterState.getRoutingNodes();
routingTable = clusterState.routingTable();
@ -111,7 +111,7 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
@ -131,9 +131,9 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase {
String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId();
String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1";
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode(nodeIdRemaining))
.add(newNode(nodeIdRemaining))
).build();
rerouteResult = allocation.reroute(clusterState, "reroute");
rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute");
clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build();
routingNodes = clusterState.getRoutingNodes();

View File

@ -61,7 +61,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -73,7 +73,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5));
logger.info("start another node, replica will start recovering form primary");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -81,7 +81,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes
assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5));
logger.info("start another node, make sure the primary is not relocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -93,19 +93,25 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
int numNodes = scaledRandomIntBetween(1, 3);
for (int j = 0; j < numNodes; j++) {
logger.info("adding node [{}]", nodeIdCounter);
newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++)));
}
}
boolean nodesRemoved = false;
if (nodeIdCounter > 1 && rarely()) {
int nodeId = scaledRandomIntBetween(0, nodeIdCounter - 2);
logger.info("removing node [{}]", nodeId);
newNodesBuilder.remove("NODE_" + nodeId);
nodesRemoved = true;
}
stateBuilder.nodes(newNodesBuilder.build());
clusterState = stateBuilder.build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
if (nodesRemoved) {
routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
} else {
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
}
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) {
routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))
@ -119,7 +125,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
for (int j = 0; j < (maxNumReplicas - clusterState.nodes().getSize()); j++) {
logger.info("adding node [{}]", nodeIdCounter);
newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++)));
newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++)));
}
stateBuilder.nodes(newNodesBuilder.build());
clusterState = stateBuilder.build();

View File

@ -99,7 +99,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase {
}
logger.info("start two nodes and fully start the shards");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -126,7 +126,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase {
logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10")))
.add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10")))
.build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();

View File

@ -68,7 +68,7 @@ public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();

View File

@ -63,7 +63,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@ -133,7 +133,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -141,7 +141,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -169,7 +169,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -226,7 +226,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("Adding three node and performing rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build();
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build();
RoutingNodes routingNodes = clusterState.getRoutingNodes();
assertThat(assertShardStats(routingNodes), equalTo(true));
@ -371,7 +371,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase {
logger.info("kill one node");
IndexShardRoutingTable indexShardRoutingTable = routingTable.index("test").shard(0);
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
routingNodes = clusterState.getRoutingNodes();

View File

@ -63,9 +63,9 @@ public class SameShardRoutingTests extends ESAllocationTestCase {
logger.info("--> adding two nodes with the same host");
clusterState = ClusterState.builder(clusterState).nodes(
DiscoveryNodes.builder()
.put(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(),
.add(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(),
MASTER_DATA_ROLES, Version.CURRENT))
.put(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(),
.add(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(),
MASTER_DATA_ROLES, Version.CURRENT))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -82,7 +82,7 @@ public class SameShardRoutingTests extends ESAllocationTestCase {
logger.info("--> add another node, with a different host, replicas will be allocating");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(),
.add(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(),
MASTER_DATA_ROLES, Version.CURRENT))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -57,7 +57,7 @@ public class ShardVersioningTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -63,7 +63,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -107,7 +107,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -170,7 +170,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("Adding one node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -195,7 +195,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build();
logger.info("Add another one node and reroute");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -80,7 +80,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -112,7 +112,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1"));
logger.info("Starting another node and making sure nothing changed");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -128,7 +128,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable != prevRoutingTable, equalTo(true));
@ -139,7 +139,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2"));
logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -181,7 +181,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue());
logger.info("Adding one node and rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -244,7 +244,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
List<DiscoveryNode> nodes = new ArrayList<>();
for (int i = 0; i < (numberOfIndices / 2); i++) {
nodesBuilder.put(newNode("node" + i));
nodesBuilder.add(newNode("node" + i));
}
RoutingTable prevRoutingTable = routingTable;
clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
@ -282,7 +282,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change");
nodesBuilder = DiscoveryNodes.builder(clusterState.nodes());
for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) {
nodesBuilder.put(newNode("node" + i));
nodesBuilder.add(newNode("node" + i));
}
prevRoutingTable = routingTable;
clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build();
@ -348,7 +348,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
logger.info("Starting 3 nodes and rerouting");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")))
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")))
.build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -369,7 +369,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase {
logger.info("Start two more nodes, things should remain the same");
clusterState = ClusterState.builder(clusterState)
.nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4")).put(newNode("node5")))
.nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4")).add(newNode("node5")))
.build();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -67,7 +67,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue());
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -84,7 +84,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue());
logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -134,7 +134,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(prevRoutingTable != routingTable, equalTo(true));
@ -150,7 +150,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase {
logger.info("Start another node, backup shard should start initializing");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -51,7 +51,7 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase {
.build();
final Index index = indexMetaData.getIndex();
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")))
.nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.metaData(MetaData.builder().put(indexMetaData, false));
final ShardRouting initShard = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.INITIALIZING);

View File

@ -80,7 +80,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase {
}
logger.info("Adding one node and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -99,7 +99,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase {
}
logger.info("Add another node and perform rerouting, nothing will happen since primary not started");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -152,7 +152,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase {
assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10));
logger.info("Add another node and perform rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -66,7 +66,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start one node, do reroute, only 3 should initialize");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -125,7 +125,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start one node, do reroute, only 3 should initialize");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -150,7 +150,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5));
logger.info("start another node, replicas should start being allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -193,7 +193,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start one node, do reroute, only 5 should initialize");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0));
@ -213,7 +213,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
logger.info("start another 2 nodes, 5 shards should be relocating - at most 5 are allowed per node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2")).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2")).add(newNode("node3"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -256,7 +256,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
logger.info("start one node, do reroute, only 1 should initialize");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -273,7 +273,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2));
logger.info("start one more node, first non-primary should start being allocated");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -291,7 +291,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0);
logger.info("start one more node, initializing second non-primary");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -301,7 +301,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase {
assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1);
logger.info("start one more node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -70,7 +70,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase {
logger.info("Adding two nodes and performing rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build();
RoutingTable prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -121,7 +121,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase {
assertThat(routingTable.index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED));
logger.info("Add another node and start the added replica");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build();
prevRoutingTable = routingTable;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();

View File

@ -123,8 +123,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -156,7 +156,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
.add(newNode("node3"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -244,7 +244,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding node4");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4"))
.add(newNode("node4"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -324,8 +324,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding node1 and node2 node");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -395,7 +395,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
.add(newNode("node3"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -483,7 +483,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding node4");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node4"))
.add(newNode("node4"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -511,7 +511,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding node5");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node5"))
.add(newNode("node5"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -592,8 +592,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.routingTable(routingTable).build();
logger.info("--> adding node1");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters
.add(newNode("node1"))
.add(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -662,8 +662,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
.routingTable(routingTable).build();
logger.info("--> adding node1");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters
.add(newNode("node1"))
.add(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -770,8 +770,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -791,7 +791,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
logger.info("--> adding node3");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(newNode("node3"))
.add(newNode("node3"))
).build();
AllocationCommand relocate1 = new MoveAllocationCommand("test", 0, "node2", "node3");
@ -852,7 +852,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
MASTER_DATA_ROLES, Version.CURRENT);
DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", new LocalTransportAddress("2"), emptyMap(),
MASTER_DATA_ROLES, Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build();
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();
ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
@ -969,7 +969,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", new LocalTransportAddress("2"), emptyMap(),
singleton(DiscoveryNode.Role.DATA), Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build();
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build();
ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(metaData)
.routingTable(routingTable)
@ -1035,7 +1035,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", new LocalTransportAddress("3"), emptyMap(),
singleton(DiscoveryNode.Role.DATA), Version.CURRENT);
ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(discoveryNode3)).build();
.add(discoveryNode3)).build();
firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, null, true, ShardRoutingState.STARTED);
secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", null, true, ShardRoutingState.RELOCATING);

View File

@ -121,8 +121,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(node_0)
.put(node_1)
.add(node_0)
.add(node_1)
).build();
// actual test -- after all that bloat :)
@ -186,8 +186,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
logger.info("--> adding two nodes");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(node_0)
.put(node_1)
.add(node_0)
.add(node_1)
).build();
// actual test -- after all that bloat :)
@ -317,7 +317,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase {
.metaData(metaData).routingTable(routingTableBuilder.build()).build();
AllocationService allocationService = createAllocationService();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")))
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
RoutingAllocation.Result result = allocationService.reroute(clusterState, "foo");
clusterState = ClusterState.builder(clusterState).routingTable(result.routingTable()).build();

View File

@ -72,8 +72,8 @@ public class EnableAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -100,8 +100,8 @@ public class EnableAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -134,8 +134,8 @@ public class EnableAllocationTests extends ESAllocationTestCase {
logger.info("--> adding two nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -179,8 +179,8 @@ public class EnableAllocationTests extends ESAllocationTestCase {
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -198,9 +198,9 @@ public class EnableAllocationTests extends ESAllocationTestCase {
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.put(newNode("node3"))
.add(newNode("node1"))
.add(newNode("node2"))
.add(newNode("node3"))
).build();
ClusterState prevState = clusterState;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -279,8 +279,8 @@ public class EnableAllocationTests extends ESAllocationTestCase {
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
@ -293,9 +293,9 @@ public class EnableAllocationTests extends ESAllocationTestCase {
logger.info("--> adding one nodes and do rerouting");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.put(newNode("node3"))
.add(newNode("node1"))
.add(newNode("node2"))
.add(newNode("node3"))
).build();
ClusterState prevState = clusterState;
routingTable = strategy.reroute(clusterState, "reroute").routingTable();

View File

@ -47,7 +47,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase {
.addAsNew(metaData.index("test"))
.build();
DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).localNodeId("node1").masterNodeId("node2").build();
DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).localNodeId("node1").masterNodeId("node2").build();
ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1")).nodes(nodes).metaData(metaData).routingTable(routingTable).build();
@ -70,7 +70,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase {
.addAsNew(metaData.index("test"))
.build();
DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).build();
DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes)
.metaData(metaData).routingTable(routingTable).build();

View File

@ -50,7 +50,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase {
.addAsNew(metaData.index("test_idx"))
.build();
DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(),
DiscoveryNodes nodes = DiscoveryNodes.builder().add(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(),
emptyMap(), emptySet(), Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build();
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes)

View File

@ -35,7 +35,6 @@ import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardShuffler;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
@ -247,8 +246,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
node2Attributes.put("rack_id", "rack_2");
node2Attributes.put("zone", "zone2");
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1", unmodifiableMap(node1Attributes)))
.put(newNode("node2", unmodifiableMap(node2Attributes)))
.add(newNode("node1", unmodifiableMap(node1Attributes)))
.add(newNode("node2", unmodifiableMap(node2Attributes)))
.localNodeId("node1")
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -295,8 +294,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("fred", "node1", singletonMap("disk", "ebs")))
.put(newNode("barney", "node2", singletonMap("disk", "ephemeral")))
.add(newNode("fred", "node1", singletonMap("disk", "ebs")))
.add(newNode("barney", "node2", singletonMap("disk", "ephemeral")))
.localNodeId("node1")
).build();
@ -369,8 +368,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.add(newNode("node1"))
.add(newNode("node2"))
.localNodeId("node1")
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();
@ -442,9 +441,9 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder()
.put(newNode("node1"))
.put(newNode("node2"))
.put(newNode("node3"))
.add(newNode("node1"))
.add(newNode("node2"))
.add(newNode("node3"))
.localNodeId("node1")
).build();
routingTable = strategy.reroute(clusterState, "reroute").routingTable();

View File

@ -1,92 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.rounding;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class RoundingTests extends ESTestCase {
/**
* simple test case to illustrate how Rounding.Interval works on readable input
*/
public void testInterval() {
int interval = 10;
Rounding.Interval rounding = new Rounding.Interval(interval);
int value = 24;
final long r = rounding.round(24);
String message = "round(" + value + ", interval=" + interval + ") = " + r;
assertEquals(value/interval * interval, r);
assertEquals(message, 0, r % interval);
}
public void testIntervalRandom() {
final long interval = randomIntBetween(1, 100);
Rounding.Interval rounding = new Rounding.Interval(interval);
for (int i = 0; i < 1000; ++i) {
long l = Math.max(randomLong(), Long.MIN_VALUE + interval);
final long r = rounding.round(l);
String message = "round(" + l + ", interval=" + interval + ") = " + r;
assertEquals(message, 0, r % interval);
assertThat(message, r, lessThanOrEqualTo(l));
assertThat(message, r + interval, greaterThan(l));
}
}
/**
* Simple test case to illustrate how Rounding.Offset works on readable input.
* offset shifts input value back before rounding (so here 6 - 7 -&gt; -1)
* then shifts rounded Value back (here -10 -&gt; -3)
*/
public void testOffsetRounding() {
final long interval = 10;
final long offset = 7;
Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(new Rounding.Interval(interval), offset);
assertEquals(-3, rounding.round(6));
assertEquals(7, rounding.nextRoundingValue(-3));
assertEquals(7, rounding.round(7));
assertEquals(17, rounding.nextRoundingValue(7));
assertEquals(7, rounding.round(16));
assertEquals(17, rounding.round(17));
assertEquals(27, rounding.nextRoundingValue(17));
}
/**
* test OffsetRounding with an internal interval rounding on random inputs
*/
public void testOffsetRoundingRandom() {
for (int i = 0; i < 1000; ++i) {
final long interval = randomIntBetween(1, 100);
Rounding.Interval internalRounding = new Rounding.Interval(interval);
final long offset = randomIntBetween(-100, 100);
Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset);
long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow
long value = Math.max(randomLong() - safetyMargin, Long.MIN_VALUE + safetyMargin);
final long r_value = rounding.round(value);
final long nextRoundingValue = rounding.nextRoundingValue(r_value);
assertThat("Rounding should be idempotent", r_value, equalTo(rounding.round(r_value)));
assertThat("Rounded value smaller than unrounded, regardless of offset", r_value - offset, lessThanOrEqualTo(value - offset));
assertThat("Rounded value <= value < next interval start", r_value + interval, greaterThan(value));
assertThat("NextRounding value should be interval from rounded value", r_value + interval, equalTo(nextRoundingValue));
}
}
}

View File

@ -20,8 +20,8 @@
package org.elasticsearch.common.rounding;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.rounding.TimeZoneRounding.TimeIntervalRounding;
import org.elasticsearch.common.rounding.TimeZoneRounding.TimeUnitRounding;
import org.elasticsearch.common.rounding.Rounding.TimeIntervalRounding;
import org.elasticsearch.common.rounding.Rounding.TimeUnitRounding;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Description;
@ -47,29 +47,25 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo;
public class TimeZoneRoundingTests extends ESTestCase {
public void testUTCTimeUnitRounding() {
Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).build();
Rounding tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build();
DateTimeZone tz = DateTimeZone.UTC;
assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-01T00:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-01T00:00:00.000Z")), isDate(time("2009-03-01T00:00:00.000Z"), tz));
tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build();
tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build();
assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz));
tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build();
assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-08T00:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2012-01-08T00:00:00.000Z")), isDate(time("2012-01-15T00:00:00.000Z"), tz));
}
public void testUTCIntervalRounding() {
Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).build();
Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).build();
DateTimeZone tz = DateTimeZone.UTC;
assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-03T12:00:00.000Z"), tz));
assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T12:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")), isDate(time("2009-02-04T00:00:00.000Z"), tz));
tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(48)).build();
tzRounding = Rounding.builder(TimeValue.timeValueHours(48)).build();
assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-05T00:00:00.000Z"), tz));
assertThat(tzRounding.round(time("2009-02-05T13:01:01")), isDate(time("2009-02-05T00:00:00.000Z"), tz));
@ -77,11 +73,11 @@ public class TimeZoneRoundingTests extends ESTestCase {
}
/**
* test TimeIntervalTimeZoneRounding, (interval &lt; 12h) with time zone shift
* test TimeIntervalRounding, (interval &lt; 12h) with time zone shift
*/
public void testTimeIntervalTimeZoneRounding() {
public void testTimeIntervalRounding() {
DateTimeZone tz = DateTimeZone.forOffsetHours(-1);
Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build();
Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build();
assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T19:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-02T19:00:00.000Z")), isDate(time("2009-02-03T01:00:00.000Z"), tz));
@ -90,11 +86,11 @@ public class TimeZoneRoundingTests extends ESTestCase {
}
/**
* test DayIntervalTimeZoneRounding, (interval &gt;= 12h) with time zone shift
* test DayIntervalRounding, (interval &gt;= 12h) with time zone shift
*/
public void testDayIntervalTimeZoneRounding() {
public void testDayIntervalRounding() {
DateTimeZone tz = DateTimeZone.forOffsetHours(-8);
Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build();
Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build();
assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T20:00:00.000Z"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-02T20:00:00.000Z")), isDate(time("2009-02-03T08:00:00.000Z"), tz));
@ -102,37 +98,37 @@ public class TimeZoneRoundingTests extends ESTestCase {
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T08:00:00.000Z")), isDate(time("2009-02-03T20:00:00.000Z"), tz));
}
public void testDayTimeZoneRounding() {
public void testDayRounding() {
int timezoneOffset = -2;
Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset))
Rounding tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset))
.build();
assertThat(tzRounding.round(0), equalTo(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()));
assertThat(tzRounding.nextRoundingValue(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()), equalTo(0L - TimeValue
.timeValueHours(timezoneOffset).millis()));
DateTimeZone tz = DateTimeZone.forID("-08:00");
tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), isDate(time("2012-03-31T08:00:00Z"), tz));
tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build();
assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), equalTo(time("2012-03-01T08:00:00Z")));
// date in Feb-3rd, but still in Feb-2nd in -02:00 timezone
tz = DateTimeZone.forID("-02:00");
tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-02T02:00:00"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-02T02:00:00")), isDate(time("2009-02-03T02:00:00"), tz));
// date in Feb-3rd, also in -02:00 timezone
tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
assertThat(tzRounding.round(time("2009-02-03T02:01:01")), isDate(time("2009-02-03T02:00:00"), tz));
assertThat(tzRounding.nextRoundingValue(time("2009-02-03T02:00:00")), isDate(time("2009-02-04T02:00:00"), tz));
}
public void testTimeTimeZoneRounding() {
public void testTimeRounding() {
// hour unit
DateTimeZone tz = DateTimeZone.forOffsetHours(-2);
Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build();
Rounding tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build();
assertThat(tzRounding.round(0), equalTo(0L));
assertThat(tzRounding.nextRoundingValue(0L), equalTo(TimeValue.timeValueHours(1L).getMillis()));
@ -144,23 +140,23 @@ public class TimeZoneRoundingTests extends ESTestCase {
Rounding tzRounding;
// testing savings to non savings switch
DateTimeZone cet = DateTimeZone.forID("CET");
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build();
tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build();
assertThat(tzRounding.round(time("2014-10-26T01:01:01", cet)), isDate(time("2014-10-26T01:00:00+02:00"), cet));
assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", cet)),isDate(time("2014-10-26T02:00:00+02:00"), cet));
assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", cet)), isDate(time("2014-10-26T02:00:00+01:00"), cet));
// testing non savings to savings switch
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build();
tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build();
assertThat(tzRounding.round(time("2014-03-30T01:01:01", cet)), isDate(time("2014-03-30T01:00:00+01:00"), cet));
assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", cet)), isDate(time("2014-03-30T03:00:00", cet), cet));
assertThat(tzRounding.nextRoundingValue(time("2014-03-30T03:00:00", cet)), isDate(time("2014-03-30T04:00:00", cet), cet));
// testing non savings to savings switch (America/Chicago)
DateTimeZone chg = DateTimeZone.forID("America/Chicago");
Rounding tzRounding_utc = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build();
Rounding tzRounding_utc = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build();
assertThat(tzRounding.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg));
Rounding tzRounding_chg = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build();
Rounding tzRounding_chg = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build();
assertThat(tzRounding_chg.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg));
// testing savings to non savings switch 2013 (America/Chicago)
@ -173,18 +169,21 @@ public class TimeZoneRoundingTests extends ESTestCase {
}
/**
* Randomized test on TimeUnitRounding.
* Test uses random {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) chooses
* test dates that are exactly on or close to offset changes (e.g. DST) in the chosen time zone.
* Randomized test on TimeUnitRounding. Test uses random
* {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time)
* chooses test dates that are exactly on or close to offset changes (e.g.
* DST) in the chosen time zone.
*
* It rounds the test date down and up and performs various checks on the rounding unit interval that is
* defined by this. Assumptions tested are described in {@link #assertInterval(long, long, long, TimeZoneRounding, DateTimeZone)}
* It rounds the test date down and up and performs various checks on the
* rounding unit interval that is defined by this. Assumptions tested are
* described in
* {@link #assertInterval(long, long, long, Rounding, DateTimeZone)}
*/
public void testTimeZoneRoundingRandom() {
public void testRoundingRandom() {
for (int i = 0; i < 1000; ++i) {
DateTimeUnit timeUnit = randomTimeUnit();
DateTimeZone tz = randomDateTimeZone();
TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz);
Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz);
long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00
long unitMillis = timeUnit.field(tz).getDurationField().getUnitMillis();
if (randomBoolean()) {
@ -226,7 +225,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testTimeIntervalCET_DST_End() {
long interval = TimeUnit.MINUTES.toMillis(20);
DateTimeZone tz = DateTimeZone.forID("CET");
TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz);
Rounding rounding = new TimeIntervalRounding(interval, tz);
assertThat(rounding.round(time("2015-10-25T01:55:00+02:00")), isDate(time("2015-10-25T01:40:00+02:00"), tz));
assertThat(rounding.round(time("2015-10-25T02:15:00+02:00")), isDate(time("2015-10-25T02:00:00+02:00"), tz));
@ -246,7 +245,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testTimeIntervalCET_DST_Start() {
long interval = TimeUnit.MINUTES.toMillis(20);
DateTimeZone tz = DateTimeZone.forID("CET");
TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz);
Rounding rounding = new TimeIntervalRounding(interval, tz);
// test DST start
assertThat(rounding.round(time("2016-03-27T01:55:00+01:00")), isDate(time("2016-03-27T01:40:00+01:00"), tz));
assertThat(rounding.round(time("2016-03-27T02:00:00+01:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz));
@ -263,7 +262,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testTimeInterval_Kathmandu_DST_Start() {
long interval = TimeUnit.MINUTES.toMillis(20);
DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu");
TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz);
Rounding rounding = new TimeIntervalRounding(interval, tz);
assertThat(rounding.round(time("1985-12-31T23:55:00+05:30")), isDate(time("1985-12-31T23:40:00+05:30"), tz));
assertThat(rounding.round(time("1986-01-01T00:16:00+05:45")), isDate(time("1986-01-01T00:15:00+05:45"), tz));
assertThat(time("1986-01-01T00:15:00+05:45") - time("1985-12-31T23:40:00+05:30"), equalTo(TimeUnit.MINUTES.toMillis(20)));
@ -281,7 +280,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testIntervalRounding_NotDivisibleInteval() {
DateTimeZone tz = DateTimeZone.forID("CET");
long interval = TimeUnit.MINUTES.toMillis(14);
TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz);
Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz);
assertThat(rounding.round(time("2016-03-27T01:41:00+01:00")), isDate(time("2016-03-27T01:30:00+01:00"), tz));
assertThat(rounding.round(time("2016-03-27T01:51:00+01:00")), isDate(time("2016-03-27T01:44:00+01:00"), tz));
@ -298,7 +297,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testIntervalRounding_HalfDay_DST() {
DateTimeZone tz = DateTimeZone.forID("CET");
long interval = TimeUnit.HOURS.toMillis(12);
TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz);
Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz);
assertThat(rounding.round(time("2016-03-26T01:00:00+01:00")), isDate(time("2016-03-26T00:00:00+01:00"), tz));
assertThat(rounding.round(time("2016-03-26T13:00:00+01:00")), isDate(time("2016-03-26T12:00:00+01:00"), tz));
@ -316,7 +315,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
TimeUnit unit = randomFrom(new TimeUnit[] {TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS});
long interval = unit.toMillis(randomIntBetween(1, 365));
DateTimeZone tz = randomDateTimeZone();
TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz);
Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz);
long mainDate = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00
if (randomBoolean()) {
mainDate = nastyDate(mainDate, tz, interval);
@ -356,8 +355,8 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testIntervalRoundingMonotonic_CET() {
long interval = TimeUnit.MINUTES.toMillis(45);
DateTimeZone tz = DateTimeZone.forID("CET");
TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz);
List<Tuple<String, String>> expectedDates = new ArrayList<Tuple<String, String>>();
Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz);
List<Tuple<String, String>> expectedDates = new ArrayList<>();
// first date is the date to be rounded, second the expected result
expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00"));
expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00"));
@ -387,7 +386,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testAmbiguousHoursAfterDSTSwitch() {
Rounding tzRounding;
final DateTimeZone tz = DateTimeZone.forID("Asia/Jerusalem");
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build();
assertThat(tzRounding.round(time("2014-10-26T00:30:00+03:00")), isDate(time("2014-10-26T00:00:00+03:00"), tz));
assertThat(tzRounding.round(time("2014-10-26T01:30:00+03:00")), isDate(time("2014-10-26T01:00:00+03:00"), tz));
// the utc date for "2014-10-25T03:00:00+03:00" and "2014-10-25T03:00:00+02:00" is the same, local time turns back 1h here
@ -396,7 +395,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
assertThat(tzRounding.round(time("2014-10-26T02:30:00+02:00")), isDate(time("2014-10-26T02:00:00+02:00"), tz));
// Day interval
tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build();
assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-11T00:00:00", tz), tz));
// DST on
assertThat(tzRounding.round(time("2014-08-11T17:00:00", tz)), isDate(time("2014-08-11T00:00:00", tz), tz));
@ -406,17 +405,17 @@ public class TimeZoneRoundingTests extends ESTestCase {
assertThat(tzRounding.round(time("2015-03-27T17:00:00", tz)), isDate(time("2015-03-27T00:00:00", tz), tz));
// Month interval
tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build();
assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-01T00:00:00", tz), tz));
// DST on
assertThat(tzRounding.round(time("2014-10-10T17:00:00", tz)), isDate(time("2014-10-01T00:00:00", tz), tz));
// Year interval
tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build();
assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-01-01T00:00:00", tz), tz));
// Two timestamps in same year and different timezone offset ("Double buckets" issue - #9491)
tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build();
tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build();
assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)),
isDate(tzRounding.round(time("2014-08-11T17:00:00", tz)), tz));
}
@ -429,8 +428,8 @@ public class TimeZoneRoundingTests extends ESTestCase {
DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo");
long start = time("2014-10-18T20:50:00.000", tz);
long end = time("2014-10-19T01:00:00.000", tz);
Rounding tzRounding = new TimeZoneRounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz);
Rounding dayTzRounding = new TimeZoneRounding.TimeIntervalRounding(60000, tz);
Rounding tzRounding = new Rounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz);
Rounding dayTzRounding = new Rounding.TimeIntervalRounding(60000, tz);
for (long time = start; time < end; time = time + 60000) {
assertThat(tzRounding.nextRoundingValue(time), greaterThan(time));
assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time));
@ -442,7 +441,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
// standard +/-1 hour DST transition, CET
DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY;
DateTimeZone tz = DateTimeZone.forID("CET");
TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz);
Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz);
// 29 Mar 2015 - Daylight Saving Time Started
// at 02:00:00 clocks were turned forward 1 hour to 03:00:00
@ -466,7 +465,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
// which is not a round value for hourly rounding
DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY;
DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu");
TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz);
Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz);
assertInterval(time("1985-12-31T22:00:00.000+05:30"), time("1985-12-31T23:00:00.000+05:30"), rounding, 60, tz);
assertInterval(time("1985-12-31T23:00:00.000+05:30"), time("1986-01-01T01:00:00.000+05:45"), rounding, 105, tz);
@ -479,7 +478,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
// at 02:00:00 clocks were turned backward 0:30 hours to Sunday, 3 March 1991, 01:30:00
DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY;
DateTimeZone tz = DateTimeZone.forID("Australia/Lord_Howe");
TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz);
Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz);
assertInterval(time("1991-03-03T00:00:00.000+11:00"), time("1991-03-03T01:00:00.000+11:00"), rounding, 60, tz);
assertInterval(time("1991-03-03T01:00:00.000+11:00"), time("1991-03-03T02:00:00.000+10:30"), rounding, 90, tz);
@ -499,7 +498,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
// at 03:45:00 clocks were turned backward 1 hour to 02:45:00
DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY;
DateTimeZone tz = DateTimeZone.forID("Pacific/Chatham");
TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz);
Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz);
assertInterval(time("2015-04-05T02:00:00.000+13:45"), time("2015-04-05T03:00:00.000+13:45"), rounding, 60, tz);
assertInterval(time("2015-04-05T03:00:00.000+13:45"), time("2015-04-05T03:00:00.000+12:45"), rounding, 60, tz);
@ -514,7 +513,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
}
}
private static void assertInterval(long rounded, long nextRoundingValue, TimeZoneRounding rounding, int minutes,
private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes,
DateTimeZone tz) {
assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz);
assertEquals(DateTimeConstants.MILLIS_PER_MINUTE * minutes, nextRoundingValue - rounded);
@ -527,7 +526,7 @@ public class TimeZoneRoundingTests extends ESTestCase {
* @param nextRoundingValue the expected upper end of the rounding interval
* @param rounding the rounding instance
*/
private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, TimeZoneRounding rounding,
private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, Rounding rounding,
DateTimeZone tz) {
assert rounded <= unrounded && unrounded <= nextRoundingValue;
assertThat("rounding should be idempotent ", rounding.round(rounded), isDate(rounded, tz));

View File

@ -155,8 +155,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
private DiscoveryNodes buildNodesForA(boolean master) {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
builder.put(nodeA);
builder.put(nodeB);
builder.add(nodeA);
builder.add(nodeB);
builder.localNodeId(nodeA.getId());
builder.masterNodeId(master ? nodeA.getId() : nodeB.getId());
return builder.build();
@ -164,8 +164,8 @@ public class ZenFaultDetectionTests extends ESTestCase {
private DiscoveryNodes buildNodesForB(boolean master) {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
builder.put(nodeA);
builder.put(nodeB);
builder.add(nodeA);
builder.add(nodeB);
builder.localNodeId(nodeB.getId());
builder.masterNodeId(master ? nodeB.getId() : nodeA.getId());
return builder.build();

View File

@ -18,16 +18,22 @@
*/
package org.elasticsearch.discovery.zen;
import com.carrotsearch.randomizedtesting.annotations.Repeat;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
@ -38,6 +44,7 @@ import org.elasticsearch.common.util.concurrent.BaseFuture;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.membership.MembershipAction;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.test.junit.annotations.TestLogging;
@ -51,6 +58,7 @@ import org.junit.BeforeClass;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -63,13 +71,21 @@ import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static java.util.Collections.shuffle;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.elasticsearch.test.ESAllocationTestCase.createAllocationService;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
@ -101,7 +117,7 @@ public class NodeJoinControllerTests extends ESTestCase {
// make sure we have a master
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(
DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId())));
nodeJoinController = new NodeJoinController(clusterService, new NoopAllocationService(Settings.EMPTY),
nodeJoinController = new NodeJoinController(clusterService, createAllocationService(Settings.EMPTY),
new ElectMasterService(Settings.EMPTY), new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY,
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY);
}
@ -412,7 +428,7 @@ public class NodeJoinControllerTests extends ESTestCase {
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes());
final DiscoveryNode other_node = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(),
emptyMap(), emptySet(), Version.CURRENT);
nodesBuilder.put(other_node);
nodesBuilder.add(other_node);
setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder));
state = clusterService.state();
@ -524,60 +540,137 @@ public class NodeJoinControllerTests extends ESTestCase {
}
public void testRejectingJoinWithSameAddressButDifferentId() throws InterruptedException, ExecutionException {
addNodes(randomInt(5));
ClusterState state = clusterService.state();
final DiscoveryNode other_node = new DiscoveryNode("other_node", state.nodes().getLocalNode().getAddress(),
emptyMap(), emptySet(), Version.CURRENT);
final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList()));
final DiscoveryNode other_node = new DiscoveryNode("other_node", existing.getAddress(), emptyMap(), emptySet(), Version.CURRENT);
ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node));
assertThat(e.getMessage(), containsString("found existing node"));
}
public void testRejectingJoinWithSameIdButDifferentAddress() throws InterruptedException, ExecutionException {
public void testRejectingJoinWithSameIdButDifferentNode() throws InterruptedException, ExecutionException {
addNodes(randomInt(5));
ClusterState state = clusterService.state();
final DiscoveryNode other_node = new DiscoveryNode(state.nodes().getLocalNode().getId(),
new LocalTransportAddress(randomAsciiOfLength(20)), emptyMap(), emptySet(), Version.CURRENT);
ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node));
assertThat(e.getMessage(), containsString("found existing node"));
}
public void testJoinWithSameIdSameAddressButDifferentMeta() throws InterruptedException, ExecutionException {
ClusterState state = clusterService.state();
final DiscoveryNode localNode = state.nodes().getLocalNode();
final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList()));
final DiscoveryNode other_node = new DiscoveryNode(
randomBoolean() ? localNode.getName() : "other_name",
localNode.getId(), localNode.getAddress(),
randomBoolean() ? localNode.getAttributes() : Collections.singletonMap("attr", "other"),
randomBoolean() ? localNode.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))),
randomBoolean() ? localNode.getVersion() : VersionUtils.randomVersion(random()));
randomBoolean() ? existing.getName() : "other_name",
existing.getId(),
randomBoolean() ? existing.getAddress() : LocalTransportAddress.buildUnique(),
randomBoolean() ? existing.getAttributes() : Collections.singletonMap("attr", "other"),
randomBoolean() ? existing.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))),
randomBoolean() ? existing.getVersion() : VersionUtils.randomVersion(random()));
joinNode(other_node);
assertThat(clusterService.localNode(), equalTo(other_node));
ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node));
assertThat(e.getMessage(), containsString("found existing node"));
}
static class NoopAllocationService extends AllocationService {
public void testRejectingRestartedNodeJoinsBeforeProcessingNodeLeft() throws InterruptedException, ExecutionException {
addNodes(randomInt(5));
ClusterState state = clusterService.state();
final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList()));
joinNode(existing); // OK
public NoopAllocationService(Settings settings) {
super(settings, null, null, null, null);
final DiscoveryNode other_node = new DiscoveryNode(existing.getId(), existing.getAddress(), existing.getAttributes(),
existing.getRoles(), Version.CURRENT);
ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node));
assertThat(e.getMessage(), containsString("found existing node"));
}
/**
* Tests tha node can become a master, even though the last cluster state it knows contains
* nodes that conflict with the joins it got and needs to become a master
*/
public void testElectionBasedOnConflictingNodes() throws InterruptedException, ExecutionException {
final DiscoveryNode masterNode = clusterService.localNode();
final DiscoveryNode otherNode = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), emptyMap(),
EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT);
// simulate master going down with stale nodes in it's cluster state (for example when min master nodes is set to 2)
// also add some shards to that node
DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(clusterService.state().nodes());
discoBuilder.masterNodeId(null);
discoBuilder.add(otherNode);
ClusterState.Builder stateBuilder = ClusterState.builder(clusterService.state()).nodes(discoBuilder);
if (randomBoolean()) {
IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder()
.put(SETTING_VERSION_CREATED, Version.CURRENT)
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(SETTING_CREATION_DATE, System.currentTimeMillis())).build();
stateBuilder.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded());
IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetaData.getIndex());
RoutingTable.Builder routing = new RoutingTable.Builder();
routing.addAsNew(indexMetaData);
final ShardId shardId = new ShardId("test", "_na_", 0);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
final DiscoveryNode primaryNode = randomBoolean() ? masterNode : otherNode;
final DiscoveryNode replicaNode = primaryNode.equals(masterNode) ? otherNode : masterNode;
final boolean primaryStarted = randomBoolean();
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, primaryNode.getId(), null, null, true,
primaryStarted ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING,
primaryStarted ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "getting there")));
if (primaryStarted) {
boolean replicaStared = randomBoolean();
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, replicaNode.getId(), null, null, false,
replicaStared ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING,
replicaStared ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "getting there")));
} else {
indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, null, null, null, false,
ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "life sucks")));
}
indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build());
stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build());
}
@Override
public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards,
boolean withReroute) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
setState(clusterService, stateBuilder.build());
@Override
public RoutingAllocation.Result applyFailedShards(ClusterState clusterState,
List<FailedRerouteAllocation.FailedShard> failedShards) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
}
final DiscoveryNode restartedNode = new DiscoveryNode(otherNode.getId(),
randomBoolean() ? otherNode.getAddress() : LocalTransportAddress.buildUnique(), otherNode.getAttributes(),
otherNode.getRoles(), Version.CURRENT);
@Override
protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
nodeJoinController.startElectionContext();
final SimpleFuture joinFuture = joinNodeAsync(restartedNode);
final CountDownLatch elected = new CountDownLatch(1);
nodeJoinController.waitToBeElectedAsMaster(1, TimeValue.timeValueHours(5), new NodeJoinController.ElectionCallback() {
@Override
public void onElectedAsMaster(ClusterState state) {
elected.countDown();
}
@Override
public void onFailure(Throwable t) {
logger.error("failed to be elected as master", t);
throw new AssertionError("failed to be elected as master", t);
}
});
elected.await();
joinFuture.get(); // throw any exception
final ClusterState finalState = clusterService.state();
final DiscoveryNodes finalNodes = finalState.nodes();
assertTrue(finalNodes.isLocalNodeElectedMaster());
assertThat(finalNodes.getLocalNode(), equalTo(masterNode));
assertThat(finalNodes.getSize(), equalTo(2));
assertThat(finalNodes.get(restartedNode.getId()), equalTo(restartedNode));
List<ShardRouting> activeShardsOnRestartedNode =
StreamSupport.stream(finalState.getRoutingNodes().node(restartedNode.getId()).spliterator(), false)
.filter(ShardRouting::active).collect(Collectors.toList());
assertThat(activeShardsOnRestartedNode, empty());
}
private void addNodes(int count) {
ClusterState state = clusterService.state();
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes());
for (int i = 0;i< count;i++) {
final DiscoveryNode node = new DiscoveryNode("node_" + state.nodes().getSize() + i, LocalTransportAddress.buildUnique(),
emptyMap(), new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), Version.CURRENT);
nodesBuilder.add(node);
}
setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder));
}
protected void assertNodesInCurrentState(List<DiscoveryNode> expectedNodes) {

View File

@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.test.ESTestCase;
@ -56,13 +55,13 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
final DiscoveryNodes.Builder builder = DiscoveryNodes.builder();
final int nodes = randomIntBetween(2, 16);
for (int i = 0; i < nodes; i++) {
builder.put(node(i));
builder.add(node(i));
}
final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(builder).build();
final DiscoveryNodes.Builder removeBuilder = DiscoveryNodes.builder();
for (int i = nodes; i < nodes + randomIntBetween(1, 16); i++) {
removeBuilder.put(node(i));
removeBuilder.add(node(i));
}
final List<ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task> tasks =
StreamSupport
@ -106,7 +105,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
boolean first = true;
for (int i = 0; i < nodes; i++) {
final DiscoveryNode node = node(i);
builder.put(node);
builder.add(node);
if (first || randomBoolean()) {
tasks.add(new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(node, randomBoolean() ? "left" : "failed"));
}
@ -134,7 +133,8 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
when(electMasterService.hasEnoughMasterNodes(any(Iterable.class))).thenReturn(true);
final AllocationService allocationService = mock(AllocationService.class);
when(allocationService.reroute(any(ClusterState.class), any(String.class))).thenReturn(mock(RoutingAllocation.Result.class));
when(allocationService.deassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class)))
.thenReturn(mock(RoutingAllocation.Result.class));
final BiFunction<ClusterState, String, ClusterState> rejoin = (cs, r) -> {
fail("rejoin should not be invoked");
@ -158,7 +158,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
boolean first = true;
for (int i = 0; i < nodes; i++) {
final DiscoveryNode node = node(i);
builder.put(node);
builder.add(node);
if (first || randomBoolean()) {
tasks.add(new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(node, randomBoolean() ? "left" : "failed"));
}
@ -171,7 +171,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase {
verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes()));
verifyNoMoreInteractions(electMasterService);
verify(allocationService).reroute(eq(remainingNodesClusterState.get()), any(String.class));
verify(allocationService).deassociateDeadNodes(eq(remainingNodesClusterState.get()), eq(true), any(String.class));
for (final ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task task : tasks) {
assertNull(result.resultingState.nodes().get(task.node().getId()));

View File

@ -210,7 +210,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
assert node != null;
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(state.nodes())
.put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(),
.add(new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(),
emptySet(), Version.CURRENT)).masterNodeId("abc");
ClusterState.Builder builder = ClusterState.builder(state);
builder.nodes(nodes);

View File

@ -50,9 +50,9 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
ClusterName clusterName = new ClusterName("abc");
DiscoveryNodes.Builder currentNodes = DiscoveryNodes.builder();
currentNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
currentNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
DiscoveryNodes.Builder newNodes = DiscoveryNodes.builder();
newNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
newNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
ClusterState.Builder currentState = ClusterState.builder(clusterName);
currentState.nodes(currentNodes);
@ -70,7 +70,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()));
currentNodes = DiscoveryNodes.builder();
currentNodes.masterNodeId("b").put(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
currentNodes.masterNodeId("b").add(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT));
;
// version isn't taken into account, so randomize it to ensure this.
if (randomBoolean()) {

View File

@ -92,7 +92,7 @@ public class UnicastZenPingIT extends ESTestCase {
zenPingA.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().put(handleA.node).localNodeId("UZP_A").build();
return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build();
}
@Override
@ -106,7 +106,7 @@ public class UnicastZenPingIT extends ESTestCase {
zenPingB.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().put(handleB.node).localNodeId("UZP_B").build();
return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build();
}
@Override
@ -126,7 +126,7 @@ public class UnicastZenPingIT extends ESTestCase {
zenPingC.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().put(handleC.node).localNodeId("UZP_C").build();
return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build();
}
@Override
@ -140,7 +140,7 @@ public class UnicastZenPingIT extends ESTestCase {
zenPingD.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
return DiscoveryNodes.builder().put(handleD.node).localNodeId("UZP_D").build();
return DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D").build();
}
@Override

Some files were not shown because too many files have changed in this diff Show More