From f8e0557be5e8177382c9d1e36bf8dffa979e5446 Mon Sep 17 00:00:00 2001 From: David Pilato Date: Sat, 23 Jul 2016 01:17:23 +0200 Subject: [PATCH 01/20] Extract AWS Key from KeyChain instead of using potential null value While I was working on #18703, I discovered a bad behavior when people don't provide AWS key/secret as part as their `elasticsearch.yml` but rely on SysProps or env. variables... In [`InternalAwsS3Service#getClient(...)`](https://github.com/elastic/elasticsearch/blob/d4366f8493ac8d2f7091404ffd346e4f3c0f9af9/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java#L76-L141), we have: ```java Tuple clientDescriptor = new Tuple<>(endpoint, account); AmazonS3Client client = clients.get(clientDescriptor); ``` But if people don't provide credentials, `account` is `null`. Even if it actually could work, I think that we should use the `AWSCredentialsProvider` we create later on and extract from it the `account` (AWS KEY actually) and then use it as the second value of the tuple. Closes #19557. --- .../elasticsearch/cloud/aws/AwsS3Service.java | 3 ++- .../cloud/aws/InternalAwsS3Service.java | 19 ++++++++++++---- .../repositories/s3/S3Repository.java | 5 +---- .../cloud/aws/AwsS3ServiceImplTests.java | 9 ++------ .../cloud/aws/TestAwsS3Service.java | 6 ++--- .../s3/AbstractS3SnapshotRestoreTest.java | 22 +++++++++---------- .../repositories/s3/S3RepositoryTests.java | 2 +- 7 files changed, 34 insertions(+), 32 deletions(-) diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java index 43de8a3ba27..59c3d3445a2 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java @@ -24,6 +24,7 @@ import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; import java.util.Locale; import java.util.function.Function; @@ -154,6 +155,6 @@ public interface AwsS3Service extends LifecycleComponent { Setting ENDPOINT_SETTING = Setting.simpleString("cloud.aws.s3.endpoint", Property.NodeScope); } - AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, + AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess); } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java index 27053379db1..c4d8a63adc6 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java @@ -35,10 +35,13 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.repositories.s3.S3Repository; import java.util.HashMap; import java.util.Map; +import static org.elasticsearch.repositories.s3.S3Repository.getValue; + /** * */ @@ -54,17 +57,20 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements } @Override - public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String key, String secret, Integer maxRetries, + public synchronized AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) { String foundEndpoint = findEndpoint(logger, settings, endpoint, region); - Tuple clientDescriptor = new Tuple<>(foundEndpoint, key); + + AWSCredentialsProvider credentials = buildCredentials(logger, settings, repositorySettings); + + Tuple clientDescriptor = new Tuple<>(foundEndpoint, credentials.getCredentials().getAWSAccessKeyId()); AmazonS3Client client = clients.get(clientDescriptor); if (client != null) { return client; } client = new AmazonS3Client( - buildCredentials(logger, key, secret), + credentials, buildConfiguration(logger, settings, protocol, maxRetries, foundEndpoint, useThrottleRetries)); if (pathStyleAccess != null) { @@ -116,8 +122,13 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent implements return clientConfiguration; } - public static AWSCredentialsProvider buildCredentials(ESLogger logger, String key, String secret) { + public static AWSCredentialsProvider buildCredentials(ESLogger logger, Settings settings, Settings repositorySettings) { AWSCredentialsProvider credentials; + String key = getValue(repositorySettings, settings, + S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); + String secret = getValue(repositorySettings, settings, + S3Repository.Repository.SECRET_SETTING, S3Repository.Repositories.SECRET_SETTING); + if (key.isEmpty() && secret.isEmpty()) { logger.debug("Using either environment variables, system properties or instance profile credentials"); credentials = new DefaultAWSCredentialsProviderChain(); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index 56d05b65711..b5abb361be8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -301,11 +301,8 @@ public class S3Repository extends BlobStoreRepository { bucket, region, endpoint, protocol, chunkSize, serverSideEncryption, bufferSize, maxRetries, useThrottleRetries, cannedACL, storageClass, pathStyleAccess); - String key = getValue(metadata.settings(), settings, Repository.KEY_SETTING, Repositories.KEY_SETTING); - String secret = getValue(metadata.settings(), settings, Repository.SECRET_SETTING, Repositories.SECRET_SETTING); - blobStore = new S3BlobStore(settings, - s3Service.client(endpoint, protocol, region, key, secret, maxRetries, useThrottleRetries, pathStyleAccess), + s3Service.client(metadata.settings(), endpoint, protocol, region, maxRetries, useThrottleRetries, pathStyleAccess), bucket, region, serverSideEncryption, bufferSize, maxRetries, cannedACL, storageClass); String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java index 777bb5ff358..3f92a511190 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/AwsS3ServiceImplTests.java @@ -35,7 +35,7 @@ import static org.hamcrest.Matchers.is; public class AwsS3ServiceImplTests extends ESTestCase { public void testAWSCredentialsWithSystemProviders() { - AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, "", ""); + AWSCredentialsProvider credentialsProvider = InternalAwsS3Service.buildCredentials(logger, Settings.EMPTY, Settings.EMPTY); assertThat(credentialsProvider, instanceOf(DefaultAWSCredentialsProviderChain.class)); } @@ -136,12 +136,7 @@ public class AwsS3ServiceImplTests extends ESTestCase { protected void launchAWSCredentialsWithElasticsearchSettingsTest(Settings singleRepositorySettings, Settings settings, String expectedKey, String expectedSecret) { - String key = S3Repository.getValue(singleRepositorySettings, settings, - S3Repository.Repository.KEY_SETTING, S3Repository.Repositories.KEY_SETTING); - String secret = S3Repository.getValue(singleRepositorySettings, settings, - S3Repository.Repository.SECRET_SETTING, S3Repository.Repositories.SECRET_SETTING); - - AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, key, secret).getCredentials(); + AWSCredentials credentials = InternalAwsS3Service.buildCredentials(logger, settings, singleRepositorySettings).getCredentials(); assertThat(credentials.getAWSAccessKeyId(), is(expectedKey)); assertThat(credentials.getAWSSecretKey(), is(expectedSecret)); } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java index d7c706822a8..e7a57958a89 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java @@ -21,10 +21,8 @@ package org.elasticsearch.cloud.aws; import com.amazonaws.Protocol; import com.amazonaws.services.s3.AmazonS3; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugin.repository.s3.S3RepositoryPlugin; -import org.elasticsearch.plugins.Plugin; import java.util.IdentityHashMap; @@ -44,9 +42,9 @@ public class TestAwsS3Service extends InternalAwsS3Service { @Override - public synchronized AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, + public synchronized AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) { - return cachedWrapper(super.client(endpoint, protocol, region, account, key, maxRetries, useThrottleRetries, pathStyleAccess)); + return cachedWrapper(super.client(repositorySettings, endpoint, protocol, region, maxRetries, useThrottleRetries, pathStyleAccess)); } private AmazonS3 cachedWrapper(AmazonS3 client) { diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java index a3671b42ee4..d9d15ce0b3b 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java @@ -161,12 +161,15 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase public void testEncryption() { Client client = client(); logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath); + + Settings repositorySettings = Settings.builder() + .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) + .put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)) + .put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true) + .build(); + PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") - .setType("s3").setSettings(Settings.builder() - .put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath) - .put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000)) - .put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true) - ).get(); + .setType("s3").setSettings(repositorySettings).get(); assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); createIndex("test-idx-1", "test-idx-2", "test-idx-3"); @@ -193,11 +196,10 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase Settings settings = internalCluster().getInstance(Settings.class); Settings bucket = settings.getByPrefix("repositories.s3."); AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client( + repositorySettings, null, S3Repository.Repositories.PROTOCOL_SETTING.get(settings), S3Repository.Repositories.REGION_SETTING.get(settings), - S3Repository.Repositories.KEY_SETTING.get(settings), - S3Repository.Repositories.SECRET_SETTING.get(settings), null, randomBoolean(), null); String bucketName = bucket.get("bucket"); @@ -466,15 +468,13 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase String endpoint = bucket.get("endpoint", S3Repository.Repositories.ENDPOINT_SETTING.get(settings)); Protocol protocol = S3Repository.Repositories.PROTOCOL_SETTING.get(settings); String region = bucket.get("region", S3Repository.Repositories.REGION_SETTING.get(settings)); - String accessKey = bucket.get("access_key", S3Repository.Repositories.KEY_SETTING.get(settings)); - String secretKey = bucket.get("secret_key", S3Repository.Repositories.SECRET_SETTING.get(settings)); String bucketName = bucket.get("bucket"); // We check that settings has been set in elasticsearch.yml integration test file // as described in README assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue()); - AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(endpoint, protocol, region, accessKey, secretKey, - null, randomBoolean(), null); + AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY, endpoint, protocol, region, null, + randomBoolean(), null); try { ObjectListing prevListing = null; //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index d6cca5d70d6..f8940c6158c 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -58,7 +58,7 @@ public class S3RepositoryTests extends ESTestCase { @Override protected void doClose() {} @Override - public AmazonS3 client(String endpoint, Protocol protocol, String region, String account, String key, Integer maxRetries, + public AmazonS3 client(Settings repositorySettings, String endpoint, Protocol protocol, String region, Integer maxRetries, boolean useThrottleRetries, Boolean pathStyleAccess) { return new DummyS3Client(); } From 8bbc312fdd2442ce5c9ad431ab0c61930069df2f Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Thu, 4 Aug 2016 08:47:46 -0400 Subject: [PATCH 02/20] Fixes issue with dangling index being deleted instead of re-imported (#19666) Fixes an issue where a node that receives a cluster state update with a brand new cluster UUID but without an initial persistence block could cause indices to be wiped out, preventing them from being reimported as dangling indices. This commit only removes the in-memory data structures and thus, are subsequently reimported as dangling indices. --- .../cluster/ClusterChangedEvent.java | 12 +- .../elasticsearch/cluster/ClusterState.java | 1 + .../cluster/IndicesClusterStateService.java | 27 ++--- ...ClusterStateServiceRandomUpdatesTests.java | 106 ++++++++++++++++-- 4 files changed, 115 insertions(+), 31 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index efd525d313b..e3164eacdbb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -199,10 +199,14 @@ public class ClusterChangedEvent { return nodesRemoved() || nodesAdded(); } - // Determines whether or not the current cluster state represents an entirely - // different cluster from the previous cluster state, which will happen when a - // master node is elected that has never been part of the cluster before. - private boolean isNewCluster() { + /** + * Determines whether or not the current cluster state represents an entirely + * new cluster, either when a node joins a cluster for the first time or when + * the node receives a cluster state update from a brand new cluster (different + * UUID from the previous cluster), which will happen when a master node is + * elected that has never been part of the cluster before. + */ + public boolean isNewCluster() { final String prevClusterUUID = previousState.metaData().clusterUUID(); final String currClusterUUID = state.metaData().clusterUUID(); return prevClusterUUID.equals(currClusterUUID) == false; diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index abad2e9a8e4..6745900057d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -280,6 +280,7 @@ public class ClusterState implements ToXContent, Diffable { public String prettyPrint() { StringBuilder sb = new StringBuilder(); + sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n"); sb.append("version: ").append(version).append("\n"); sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index fd06175e28c..f01a09d4608 100644 --- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -177,7 +177,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple deleteIndices(event); // also deletes shards of deleted indices - removeUnallocatedIndices(state); // also removes shards of removed indices + removeUnallocatedIndices(event); // also removes shards of removed indices failMissingShards(state); @@ -286,28 +286,16 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple }); } } - - // delete local indices that do neither exist in previous cluster state nor part of tombstones - for (AllocatedIndex indexService : indicesService) { - Index index = indexService.index(); - IndexMetaData indexMetaData = event.state().metaData().index(index); - if (indexMetaData == null) { - assert false : "index" + index + " exists locally, doesn't have a metadata but is not part" - + " of the delete index list. \nprevious state: " + event.previousState().prettyPrint() - + "\n current state:\n" + event.state().prettyPrint(); - logger.warn("[{}] isn't part of metadata but is part of in memory structures. removing", index); - indicesService.deleteIndex(index, "isn't part of metadata (explicit check)"); - } - } } /** * Removes indices that have no shards allocated to this node. This does not delete the shard data as we wait for enough * shard copies to exist in the cluster before deleting shard data (triggered by {@link org.elasticsearch.indices.store.IndicesStore}). * - * @param state new cluster state + * @param event the cluster changed event */ - private void removeUnallocatedIndices(final ClusterState state) { + private void removeUnallocatedIndices(final ClusterChangedEvent event) { + final ClusterState state = event.state(); final String localNodeId = state.nodes().getLocalNodeId(); assert localNodeId != null; @@ -322,6 +310,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple for (AllocatedIndex indexService : indicesService) { Index index = indexService.index(); if (indicesWithShards.contains(index) == false) { + // if the cluster change indicates a brand new cluster, we only want + // to remove the in-memory structures for the index and not delete the + // contents on disk because the index will later be re-imported as a + // dangling index + assert state.metaData().index(index) != null || event.isNewCluster() : + "index " + index + " does not exist in the cluster state, it should either " + + "have been deleted or the cluster must be new"; logger.debug("{} removing index, no shards allocated", index); indicesService.removeIndex(index, "removing index (no shards allocated)"); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index c2ccb9cd4ab..4477974d118 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -32,20 +32,26 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation.FailedShard; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.index.Index; import org.elasticsearch.indices.recovery.RecoveryTargetService; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -56,9 +62,11 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.Executor; +import java.util.function.Supplier; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -69,7 +77,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice public void testRandomClusterStateUpdates() { // we have an IndicesClusterStateService per node in the cluster final Map clusterStateServiceMap = new HashMap<>(); - ClusterState state = randomInitialClusterState(clusterStateServiceMap); + ClusterState state = randomInitialClusterState(clusterStateServiceMap, MockIndicesService::new); // each of the following iterations represents a new cluster state update processed on all nodes for (int i = 0; i < 30; i++) { @@ -78,7 +86,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice // calculate new cluster state for (int j = 0; j < randomInt(3); j++) { // multiple iterations to simulate batching of cluster states - state = randomlyUpdateClusterState(state, clusterStateServiceMap); + state = randomlyUpdateClusterState(state, clusterStateServiceMap, MockIndicesService::new); } // apply cluster state to nodes (incl. master) @@ -97,7 +105,65 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice logger.info("Final cluster state: {}", state.prettyPrint()); } - public ClusterState randomInitialClusterState(Map clusterStateServiceMap) { + /** + * This test ensures that when a node joins a brand new cluster (different cluster UUID), + * different from the cluster it was previously a part of, the in-memory index data structures + * are all removed but the on disk contents of those indices remain so that they can later be + * imported as dangling indices. Normally, the first cluster state update that the node + * receives from the new cluster would contain a cluster block that would cause all in-memory + * structures to be removed (see {@link IndicesClusterStateService#clusterChanged(ClusterChangedEvent)}), + * but in the case where the node joined and was a few cluster state updates behind, it would + * not have received the cluster block, in which case we still need to remove the in-memory + * structures while ensuring the data remains on disk. This test executes this particular + * scenario. + */ + public void testJoiningNewClusterOnlyRemovesInMemoryIndexStructures() { + // a cluster state derived from the initial state that includes a created index + String name = "index_" + randomAsciiOfLength(8).toLowerCase(Locale.ROOT); + ShardRoutingState[] replicaStates = new ShardRoutingState[randomIntBetween(0, 3)]; + Arrays.fill(replicaStates, ShardRoutingState.INITIALIZING); + ClusterState stateWithIndex = ClusterStateCreationUtils.state(name, randomBoolean(), ShardRoutingState.INITIALIZING, replicaStates); + + // the initial state which is derived from the newly created cluster state but doesn't contain the index + ClusterState initialState = ClusterState.builder(stateWithIndex) + .metaData(MetaData.builder(stateWithIndex.metaData()).remove(name)) + .routingTable(RoutingTable.builder().build()) + .build(); + + // pick a data node to simulate the adding an index cluster state change event on, that has shards assigned to it + DiscoveryNode node = stateWithIndex.nodes().get( + randomFrom(stateWithIndex.routingTable().index(name).shardsWithState(INITIALIZING)).currentNodeId()); + + // simulate the cluster state change on the node + ClusterState localState = adaptClusterStateToLocalNode(stateWithIndex, node); + ClusterState previousLocalState = adaptClusterStateToLocalNode(initialState, node); + IndicesClusterStateService indicesCSSvc = createIndicesClusterStateService(RecordingIndicesService::new); + indicesCSSvc.start(); + indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change that adds the index", localState, previousLocalState)); + + // create a new empty cluster state with a brand new cluster UUID + ClusterState newClusterState = ClusterState.builder(initialState) + .metaData(MetaData.builder(initialState.metaData()).clusterUUID(UUIDs.randomBase64UUID())) + .build(); + + // simulate the cluster state change on the node + localState = adaptClusterStateToLocalNode(newClusterState, node); + previousLocalState = adaptClusterStateToLocalNode(stateWithIndex, node); + indicesCSSvc.clusterChanged(new ClusterChangedEvent("cluster state change with a new cluster UUID (and doesn't contain the index)", + localState, previousLocalState)); + + // check that in memory data structures have been removed once the new cluster state is applied, + // but the persistent data is still there + RecordingIndicesService indicesService = (RecordingIndicesService) indicesCSSvc.indicesService; + for (IndexMetaData indexMetaData : stateWithIndex.metaData()) { + Index index = indexMetaData.getIndex(); + assertNull(indicesService.indexService(index)); + assertFalse(indicesService.isDeleted(index)); + } + } + + public ClusterState randomInitialClusterState(Map clusterStateServiceMap, + Supplier indicesServiceSupplier) { List allNodes = new ArrayList<>(); DiscoveryNode localNode = createNode(DiscoveryNode.Role.MASTER); // local node is the master allNodes.add(localNode); @@ -109,14 +175,15 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); // add nodes to clusterStateServiceMap - updateNodes(state, clusterStateServiceMap); + updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); return state; } - private void updateNodes(ClusterState state, Map clusterStateServiceMap) { + private void updateNodes(ClusterState state, Map clusterStateServiceMap, + Supplier indicesServiceSupplier) { for (DiscoveryNode node : state.nodes()) { clusterStateServiceMap.computeIfAbsent(node, discoveryNode -> { - IndicesClusterStateService ics = createIndicesClusterStateService(); + IndicesClusterStateService ics = createIndicesClusterStateService(indicesServiceSupplier); ics.start(); return ics; }); @@ -131,7 +198,8 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice } public ClusterState randomlyUpdateClusterState(ClusterState state, - Map clusterStateServiceMap) { + Map clusterStateServiceMap, + Supplier indicesServiceSupplier) { // randomly create new indices (until we have 200 max) for (int i = 0; i < randomInt(5); i++) { if (state.metaData().indices().size() > 200) { @@ -229,7 +297,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).put(createNode()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node leave - updateNodes(state, clusterStateServiceMap); + updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } } else { // remove node @@ -239,7 +307,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node join - updateNodes(state, clusterStateServiceMap); + updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } } } @@ -263,11 +331,11 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice return ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).localNodeId(node.getId())).build(); } - private IndicesClusterStateService createIndicesClusterStateService() { + private IndicesClusterStateService createIndicesClusterStateService(final Supplier indicesServiceSupplier) { final ThreadPool threadPool = mock(ThreadPool.class); final Executor executor = mock(Executor.class); when(threadPool.generic()).thenReturn(executor); - final MockIndicesService indicesService = new MockIndicesService(); + final MockIndicesService indicesService = indicesServiceSupplier.get(); final TransportService transportService = new TransportService(Settings.EMPTY, null, threadPool); final ClusterService clusterService = mock(ClusterService.class); final RepositoriesService repositoriesService = new RepositoriesService(Settings.EMPTY, clusterService, @@ -279,4 +347,20 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice threadPool, recoveryTargetService, shardStateAction, null, repositoriesService, null, null, null, null, null); } + private class RecordingIndicesService extends MockIndicesService { + private Set deletedIndices = Collections.emptySet(); + + @Override + public synchronized void deleteIndex(Index index, String reason) { + super.deleteIndex(index, reason); + Set newSet = Sets.newHashSet(deletedIndices); + newSet.add(index); + deletedIndices = Collections.unmodifiableSet(newSet); + } + + public synchronized boolean isDeleted(Index index) { + return deletedIndices.contains(index); + } + } + } From b0730bb2141173366ae7a73525de93c9d122e55c Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 14:15:28 +0100 Subject: [PATCH 03/20] Fix PreBuiltTransportClientTests to run and pass This change does three things: 1. Makes PreBuiltTransportClientTests run since it was silently failing on a missing dependency 2. Makes PreBuiltTransportClientTests pass 3. Removes the http.type and transport.type from being set in the transport clients additional settings since these are set to `netty4` by default anyway. --- client/transport/build.gradle | 1 + .../transport/client/PreBuiltTransportClient.java | 6 +----- .../transport/client/PreBuiltTransportClientTests.java | 8 +++++--- .../org/elasticsearch/common/network/NetworkModule.java | 8 +++++--- 4 files changed, 12 insertions(+), 11 deletions(-) diff --git a/client/transport/build.gradle b/client/transport/build.gradle index 140ef1b4303..dcb932e0ed6 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -34,6 +34,7 @@ dependencies { compile "org.elasticsearch.plugin:percolator-client:${version}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"` } dependencyLicenses { diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index 91e3e0830a1..287dd014c20 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport.client; import org.elasticsearch.client.transport.TransportClient; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.ReindexPlugin; @@ -79,10 +78,7 @@ public class PreBuiltTransportClient extends TransportClient { @Override public Settings additionalSettings() { - return Settings.builder() - .put(NetworkModule.TRANSPORT_TYPE_KEY, Netty4Plugin.NETTY_TRANSPORT_NAME) - .put(NetworkModule.HTTP_TYPE_KEY, Netty4Plugin.NETTY_HTTP_TRANSPORT_NAME) - .put("netty.assert.buglevel", true) + return Settings.builder().put("netty.assert.buglevel", true) .build(); } diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java index 5b72006f5f0..c519b29f9bc 100644 --- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java +++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java @@ -27,11 +27,12 @@ import org.elasticsearch.index.reindex.ReindexPlugin; import org.elasticsearch.percolator.PercolatorPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.mustache.MustachePlugin; -import org.elasticsearch.transport.Netty3Plugin; +import org.elasticsearch.transport.Netty4Plugin; import org.junit.Test; import java.util.Arrays; +import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; @@ -41,7 +42,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest { public void testPluginInstalled() { try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) { Settings settings = client.settings(); - assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings)); + assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings)); + assertEquals(Netty4Plugin.NETTY_TRANSPORT_NAME, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings)); } } @@ -54,7 +56,7 @@ public class PreBuiltTransportClientTests extends RandomizedTest { new PreBuiltTransportClient(Settings.EMPTY, plugin); fail("exception expected"); } catch (IllegalArgumentException ex) { - assertEquals("plugin is already installed", ex.getMessage()); + assertTrue(ex.getMessage().startsWith("plugin already exists: ")); } } } diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 824456fe514..f6c2ef326f2 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -33,7 +33,6 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.common.ParseField; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.util.Providers; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; @@ -57,9 +56,12 @@ public class NetworkModule extends AbstractModule { public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; public static final String HTTP_TYPE_KEY = "http.type"; public static final String LOCAL_TRANSPORT = "local"; + public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default"; + public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default"; - public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString("transport.type.default", Property.NodeScope); - public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString("http.type.default", Property.NodeScope); + public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_DEFAULT_KEY, + Property.NodeScope); + public static final Setting HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope); public static final Setting HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope); public static final Setting HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope); public static final Setting TRANSPORT_SERVICE_TYPE_SETTING = From c08557d033b9ce67613c99abaa9e5082119a6160 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 4 Aug 2016 09:48:38 -0400 Subject: [PATCH 04/20] Wait for Netty 4 threads to terminate on close Today if the PreBuiltTransportClient is using Netty 4 transport, on shutdown some Netty 4 threads could linger. This commit causes the client to wait for these threads to shutdown upon termination. --- client/transport/build.gradle | 2 +- .../client/PreBuiltTransportClient.java | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/client/transport/build.gradle b/client/transport/build.gradle index dcb932e0ed6..c3dc2d84982 100644 --- a/client/transport/build.gradle +++ b/client/transport/build.gradle @@ -34,7 +34,7 @@ dependencies { compile "org.elasticsearch.plugin:percolator-client:${version}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" - testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"` + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" } dependencyLicenses { diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index 287dd014c20..8a28ab2df94 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -19,7 +19,10 @@ package org.elasticsearch.transport.client; +import io.netty.util.ThreadDeathWatcher; +import io.netty.util.concurrent.GlobalEventExecutor; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.reindex.ReindexPlugin; @@ -33,6 +36,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; /** * A builder to create an instance of {@link TransportClient} @@ -84,4 +88,18 @@ public class PreBuiltTransportClient extends TransportClient { } + @Override + public void close() { + super.close(); + if (!NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) + || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) { + try { + GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + } From 9f1525255ac8629d83364ca3b65c4cf86304dd9d Mon Sep 17 00:00:00 2001 From: Ryan Biesemeyer Date: Thu, 4 Aug 2016 13:56:28 +0000 Subject: [PATCH 05/20] Update link to mapper-murmur3 plugin in card docs (#19788) --- .../aggregations/metrics/cardinality-aggregation.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc index b8ee0508618..73d7f3c26bb 100644 --- a/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/cardinality-aggregation.asciidoc @@ -141,7 +141,7 @@ On string fields that have a high cardinality, it might be faster to store the hash of your field values in your index and then run the cardinality aggregation on this field. This can either be done by providing hash values from client-side or by letting elasticsearch compute hash values for you by using the -{plugins}/mapper-size.html[`mapper-murmur3`] plugin. +{plugins}/mapper-murmur3.html[`mapper-murmur3`] plugin. NOTE: Pre-computing hashes is usually only useful on very large and/or high-cardinality fields as it saves CPU and memory. However, on numeric From 2936810c631252e258d3e0e576d144b532cf9d9d Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 4 Aug 2016 10:03:08 -0400 Subject: [PATCH 06/20] Setting exists equals false instead of not exists This commit rewrites a boolean expression to check for equality with false instead of negating the existence check. --- .../elasticsearch/transport/client/PreBuiltTransportClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index 8a28ab2df94..acc28f2e34a 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -91,7 +91,7 @@ public class PreBuiltTransportClient extends TransportClient { @Override public void close() { super.close(); - if (!NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) + if (NetworkModule.TRANSPORT_TYPE_SETTING.exists(settings) == false || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) { try { GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); From 34bb1508637368c43b792992646a612bb8022e99 Mon Sep 17 00:00:00 2001 From: Ali Beyad Date: Thu, 4 Aug 2016 10:16:58 -0400 Subject: [PATCH 07/20] [TEST] Fixes primary term in TransportReplicationActionTests#testReplicaProxy --- .../support/replication/TransportReplicationActionTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java index 9d8bf87757e..6b1d0dd0885 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/TransportReplicationActionTests.java @@ -534,7 +534,7 @@ public class TransportReplicationActionTests extends ESTestCase { AtomicReference failure = new AtomicReference<>(); AtomicReference ignoredFailure = new AtomicReference<>(); AtomicBoolean success = new AtomicBoolean(); - proxy.failShard(replica, randomIntBetween(0, 10), "test", new ElasticsearchException("simulated"), + proxy.failShard(replica, randomIntBetween(1, 10), "test", new ElasticsearchException("simulated"), () -> success.set(true), failure::set, ignoredFailure::set ); CapturingTransport.CapturedRequest[] shardFailedRequests = transport.getCapturedRequestsAndClear(); From 2cceb0a5f46a77b94d10ce68e08cde5f15469148 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Thu, 4 Aug 2016 17:17:53 +0200 Subject: [PATCH 08/20] Updated v5.0.0-alpha5 release notes --- .../release-notes/5.0.0-alpha5.asciidoc | 46 ++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/docs/reference/release-notes/5.0.0-alpha5.asciidoc b/docs/reference/release-notes/5.0.0-alpha5.asciidoc index 3ba956f9f38..0137af17423 100644 --- a/docs/reference/release-notes/5.0.0-alpha5.asciidoc +++ b/docs/reference/release-notes/5.0.0-alpha5.asciidoc @@ -9,6 +9,9 @@ IMPORTANT: This is an alpha release and is intended for _testing purposes only_. [float] === Breaking changes +CAT API:: +* Improve cat thread pool API {pull}19721[#19721] (issue: {issue}19590[#19590]) + Cluster:: * Persistent Node Ids {pull}19140[#19140] (issue: {issue}17811[#17811]) @@ -18,6 +21,9 @@ Core:: Exceptions:: * Die with dignity {pull}19272[#19272] (issue: {issue}19231[#19231]) +Index APIs:: +* Removes write consistency level across replication action APIs in favor of wait_for_active_shards {pull}19454[#19454] (issue: {issue}18985[#18985]) + Scripting:: * Remove deprecated 1.x script and template syntax {pull}19387[#19387] (issue: {issue}13729[#13729]) @@ -30,10 +36,13 @@ Settings:: -[[breaking java-5.0.0-alpha5]] +[[breaking-java-5.0.0-alpha5]] [float] === Breaking Java changes +CRUD:: +* Removing isCreated and isFound from the Java API {pull}19645[#19645] (issues: {issue}19566[#19566], {issue}19631[#19631]) + Internal:: * Clean up BytesReference {pull}19196[#19196] @@ -61,6 +70,9 @@ Scripting:: Settings:: * Remove `node.mode` and `node.local` settings {pull}19428[#19428] +Snapshot/Restore:: +* Removes extra writeBlob method in BlobContainer {pull}19727[#19727] (issue: {issue}18528[#18528]) + [[deprecation-5.0.0-alpha5]] @@ -82,6 +94,9 @@ Templates:: [float] === New features +Aggregations:: +* Split regular histograms from date histograms. {pull}19551[#19551] (issues: {issue}4847[#4847], {issue}8082[#8082]) + Circuit Breakers:: * Circuit break on aggregation bucket numbers with request breaker {pull}19394[#19394] (issue: {issue}14046[#14046]) @@ -110,6 +125,7 @@ Translog:: === Enhancements Aggregations:: +* Make the heuristic to compute the default shard size less aggressive. {pull}19659[#19659] * Add _bucket_count option to buckets_path {pull}19571[#19571] (issue: {issue}19553[#19553]) * Remove AggregationStreams {pull}19507[#19507] * Migrate serial_diff aggregation to NamedWriteable {pull}19483[#19483] @@ -139,6 +155,7 @@ CAT API:: * Includes the index UUID in the _cat/indices API {pull}19204[#19204] (issue: {issue}19132[#19132]) CRUD:: +* #19664 Renaming operation to result and reworking responses {pull}19704[#19704] (issue: {issue}19664[#19664]) * Adding _operation field to index, update, delete response. {pull}19566[#19566] (issues: {issue}19267[#19267], {issue}9642[#9642], {issue}9736[#9736]) Cache:: @@ -153,6 +170,9 @@ Core:: * Makes index creation more friendly {pull}19450[#19450] (issue: {issue}9126[#9126]) * Clearer error when handling fractional time values {pull}19158[#19158] (issue: {issue}19102[#19102]) +Discovery:: +* Do not log cluster service errors at after joining a master {pull}19705[#19705] + Exceptions:: * Make NotMasterException a first class citizen {pull}19385[#19385] * Do not catch throwable {pull}19231[#19231] @@ -168,6 +188,7 @@ Ingest:: Internal:: * Make Priority an enum {pull}19448[#19448] +* Snapshot UUIDs in blob names {pull}19421[#19421] (issues: {issue}18156[#18156], {issue}18815[#18815], {issue}19002[#19002], {issue}7540[#7540]) * Add RestController method for deprecating in one step {pull}19343[#19343] * Tighten ensure atomic move cleanup {pull}19309[#19309] (issue: {issue}19036[#19036]) * Enable checkstyle ModifierOrder {pull}19214[#19214] @@ -197,6 +218,8 @@ Mapping:: * Elasticsearch should reject dynamic templates with unknown `match_mapping_type`. {pull}17285[#17285] (issue: {issue}16945[#16945]) Network:: +* Explicitly tell Netty to not use unsafe {pull}19786[#19786] (issues: {issue}19562[#19562], {issue}5624[#5624]) +* Enable Netty 4 extensions {pull}19767[#19767] (issue: {issue}19526[#19526]) * Modularize netty {pull}19392[#19392] * Simplify TcpTransport interface by reducing send code to a single send method {pull}19223[#19223] @@ -219,6 +242,7 @@ Plugin Mapper Size:: * Add doc values support to the _size field in the mapper-size plugin {pull}19217[#19217] (issue: {issue}18334[#18334]) Plugins:: +* Add ScriptService to dependencies available for plugin components {pull}19770[#19770] * Log one plugin info per line {pull}19441[#19441] * Make rest headers registration pull based {pull}19440[#19440] * Add resource watcher to services available for plugin components {pull}19401[#19401] @@ -238,6 +262,8 @@ Recovery:: * Non-blocking primary relocation hand-off {pull}19013[#19013] (issues: {issue}15900[#15900], {issue}18553[#18553]) Reindex API:: +* Only ask for `_version` we need it {pull}19693[#19693] (issue: {issue}19135[#19135]) +* Use fewer threads when reindexing-from-remote {pull}19636[#19636] * Support authentication with reindex-from-remote {pull}19310[#19310] * Support requests_per_second=-1 to mean no throttling in reindex {pull}19101[#19101] (issue: {issue}19089[#19089]) @@ -253,7 +279,10 @@ Settings:: * Validates new dynamic settings from the current state {pull}19122[#19122] (issue: {issue}19046[#19046]) Snapshot/Restore:: +* BlobContainer#writeBlob no longer can overwrite a blob {pull}19749[#19749] (issue: {issue}15579[#15579]) +* More resilient blob handling in snapshot repositories {pull}19706[#19706] (issues: {issue}18156[#18156], {issue}18815[#18815], {issue}19421[#19421], {issue}7540[#7540]) * Adding repository index generational files {pull}19002[#19002] (issue: {issue}18156[#18156]) +* Raised IOException on deleteBlob {pull}18815[#18815] (issue: {issue}18530[#18530]) Stats:: * Add missing field type in the FieldStats response. {pull}19241[#19241] (issue: {issue}17750[#17750]) @@ -266,6 +295,7 @@ Stats:: === Bug fixes Aggregations:: +* Undeprecates `aggs` in the search request {pull}19674[#19674] (issue: {issue}19504[#19504]) * Change how `nested` and `reverse_nested` aggs know about their nested depth level {pull}19550[#19550] (issues: {issue}11749[#11749], {issue}12410[#12410]) * Make ExtendedBounds immutable {pull}19490[#19490] (issue: {issue}19481[#19481]) * Fix potential AssertionError with include/exclude on terms aggregations. {pull}19252[#19252] (issue: {issue}18575[#18575]) @@ -279,23 +309,31 @@ Allocation:: Analysis:: * Fix analyzer alias processing {pull}19506[#19506] (issue: {issue}19163[#19163]) +CAT API:: +* Fixes cat tasks operation in detailed mode {pull}19759[#19759] (issue: {issue}19755[#19755]) +* Add index pattern wildcards support to _cat/shards {pull}19655[#19655] (issue: {issue}19634[#19634]) + Cluster:: +* Allow routing table to be filtered by index pattern {pull}19688[#19688] * Use executor's describeTasks method to log task information in cluster service {pull}19531[#19531] Core:: * Makes `m` case sensitive in TimeValue {pull}19649[#19649] (issue: {issue}19619[#19619]) +* Guard against negative result from FileStore.getUsableSpace when picking data path for a new shard {pull}19554[#19554] * Handle rejected execution exception on reschedule {pull}19505[#19505] Dates:: * Make sure TimeIntervalRounding is monotonic for increasing dates {pull}19020[#19020] Geo:: -* Incomplete results when using geo_distance for large distances [ISSUE] {pull}17578[#17578] +* Incomplete results when using geo_distance for large distances {pull}17578[#17578] Highlighting:: +* Plain highlighter should ignore parent/child queries {pull}19616[#19616] (issue: {issue}14999[#14999]) * Let fast vector highlighter also extract terms from the nested query's inner query. {pull}19337[#19337] (issue: {issue}19265[#19265]) Index APIs:: +* Fixes active shard count check in the case of `all` shards {pull}19760[#19760] * Add zero-padding to auto-generated rollover index name increment {pull}19610[#19610] (issue: {issue}19484[#19484]) Ingest:: @@ -320,6 +358,7 @@ Logging:: * Only log running out of slots when out of slots {pull}19637[#19637] Mapping:: +* Mappings: Fix detection of metadata fields in documents {pull}19765[#19765] * Fix not_analyzed string fields to error when position_increment_gap is set {pull}19510[#19510] * Automatically created indices should honor `index.mapper.dynamic`. {pull}19478[#19478] (issue: {issue}17592[#17592]) @@ -349,6 +388,7 @@ Plugin Repository S3:: * Fix repository S3 Settings and add more tests {pull}18703[#18703] (issues: {issue}18662[#18662], {issue}18690[#18690]) Query DSL:: +* Throw ParsingException if a query is wrapped in an array {pull}19750[#19750] (issue: {issue}12887[#12887]) * Restore parameter name auto_generate_phrase_queries {pull}19514[#19514] (issue: {issue}19512[#19512]) REST:: @@ -371,6 +411,7 @@ Stats:: * Allocation explain: Also serialize `includeDiskInfo` field {pull}19492[#19492] Store:: +* Tighten up concurrent store metadata listing and engine writes {pull}19684[#19684] (issue: {issue}19416[#19416]) * Make static Store access shard lock aware {pull}19416[#19416] (issue: {issue}18938[#18938]) * Catch assertion errors on commit and turn it into a real exception {pull}19357[#19357] (issue: {issue}19356[#19356]) @@ -381,6 +422,7 @@ Store:: === Upgrades Network:: +* Dependencies: Upgrade to netty 4.1.4 {pull}19689[#19689] * Introduce Netty 4 {pull}19526[#19526] (issue: {issue}3226[#3226]) * Upgrade to netty 3.10.6.Final {pull}19235[#19235] From 785624e96b03c5fc011bfd89429036d758b404b7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 4 Aug 2016 11:22:13 -0400 Subject: [PATCH 09/20] Restore interrupted status on when closing client When closing a transport client that depends on Netty 4, interrupted exceptions can be thrown while shutting down some Netty threads. This commit refactors the handling of these exceptions to finish shutting down and then just restore the interrupted status. --- .../transport/client/PreBuiltTransportClient.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java index acc28f2e34a..cc7e722d802 100644 --- a/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java +++ b/client/transport/src/main/java/org/elasticsearch/transport/client/PreBuiltTransportClient.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport.client; import io.netty.util.ThreadDeathWatcher; import io.netty.util.concurrent.GlobalEventExecutor; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Setting; @@ -95,9 +96,13 @@ public class PreBuiltTransportClient extends TransportClient { || NetworkModule.TRANSPORT_TYPE_SETTING.get(settings).equals(Netty4Plugin.NETTY_TRANSPORT_NAME)) { try { GlobalEventExecutor.INSTANCE.awaitInactivity(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + try { ThreadDeathWatcher.awaitInactivity(5, TimeUnit.SECONDS); } catch (InterruptedException e) { - throw new RuntimeException(e); + Thread.currentThread().interrupt(); } } } From 5ab5cc69b8165e5349cc6384842c217223f5e478 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 3 Aug 2016 14:51:50 +0100 Subject: [PATCH 10/20] Remove unused rounding code Factor rounding and Interval rounding (the non-date based rounding) was no longer used so it has been removed. Offset rounding has been retained for no since both date based rounding classes rely on it --- .../common/rounding/Rounding.java | 150 ------------------ .../common/rounding/TimeZoneRounding.java | 23 ++- .../bucket/histogram/DateHistogramParser.java | 17 +- ...ingTests.java => OffsetRoundingTests.java} | 33 +--- 4 files changed, 22 insertions(+), 201 deletions(-) rename core/src/test/java/org/elasticsearch/common/rounding/{RoundingTests.java => OffsetRoundingTests.java} (70%) diff --git a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java index 5633cf9f213..4ddba09cfab 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.rounding; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -54,157 +53,10 @@ public abstract class Rounding implements Streamable { @Override public abstract int hashCode(); - /** - * Rounding strategy which is based on an interval - * - * {@code rounded = value - (value % interval) } - */ - public static class Interval extends Rounding { - - static final byte ID = 0; - - public static final ParseField INTERVAL_FIELD = new ParseField("interval"); - - private long interval; - - public Interval() { // for serialization - } - - /** - * Creates a new interval rounding. - * - * @param interval The interval - */ - public Interval(long interval) { - this.interval = interval; - } - - @Override - public byte id() { - return ID; - } - - public static long roundKey(long value, long interval) { - if (value < 0) { - return (value - interval + 1) / interval; - } else { - return value / interval; - } - } - - public static long roundValue(long key, long interval) { - return key * interval; - } - - @Override - public long round(long value) { - return roundKey(value, interval) * interval; - } - - @Override - public long nextRoundingValue(long value) { - assert value == round(value); - return value + interval; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - interval = in.readVLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(interval); - } - - @Override - public int hashCode() { - return Objects.hash(interval); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - Interval other = (Interval) obj; - return Objects.equals(interval, other.interval); - } - } - - public static class FactorRounding extends Rounding { - - static final byte ID = 7; - - public static final ParseField FACTOR_FIELD = new ParseField("factor"); - - private Rounding rounding; - - private float factor; - - FactorRounding() { // for serialization - } - - FactorRounding(Rounding rounding, float factor) { - this.rounding = rounding; - this.factor = factor; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - return rounding.round((long) (factor * utcMillis)); - } - - @Override - public long nextRoundingValue(long value) { - return rounding.nextRoundingValue(value); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); - factor = in.readFloat(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); - out.writeFloat(factor); - } - - @Override - public int hashCode() { - return Objects.hash(rounding, factor); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - FactorRounding other = (FactorRounding) obj; - return Objects.equals(rounding, other.rounding) - && Objects.equals(factor, other.factor); - } - } - public static class OffsetRounding extends Rounding { static final byte ID = 8; - public static final ParseField OFFSET_FIELD = new ParseField("offset"); - private Rounding rounding; private long offset; @@ -274,10 +126,8 @@ public abstract class Rounding implements Streamable { Rounding rounding = null; byte id = in.readByte(); switch (id) { - case Interval.ID: rounding = new Interval(); break; case TimeZoneRounding.TimeUnitRounding.ID: rounding = new TimeZoneRounding.TimeUnitRounding(); break; case TimeZoneRounding.TimeIntervalRounding.ID: rounding = new TimeZoneRounding.TimeIntervalRounding(); break; - case TimeZoneRounding.FactorRounding.ID: rounding = new FactorRounding(); break; case OffsetRounding.ID: rounding = new OffsetRounding(); break; default: throw new ElasticsearchException("unknown rounding id [" + id + "]"); } diff --git a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 932afa15b56..5287203df69 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.rounding; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -36,8 +35,6 @@ import java.util.Objects; * daylight saving times. */ public abstract class TimeZoneRounding extends Rounding { - public static final ParseField INTERVAL_FIELD = new ParseField("interval"); - public static final ParseField TIME_ZONE_FIELD = new ParseField("time_zone"); public static Builder builder(DateTimeUnit unit) { return new Builder(unit); @@ -54,8 +51,6 @@ public abstract class TimeZoneRounding extends Rounding { private DateTimeZone timeZone = DateTimeZone.UTC; - private float factor = 1.0f; - private long offset; public Builder(DateTimeUnit unit) { @@ -83,11 +78,6 @@ public abstract class TimeZoneRounding extends Rounding { return this; } - public Builder factor(float factor) { - this.factor = factor; - return this; - } - public Rounding build() { Rounding timeZoneRounding; if (unit != null) { @@ -98,9 +88,6 @@ public abstract class TimeZoneRounding extends Rounding { if (offset != 0) { timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); } - if (factor != 1.0f) { - timeZoneRounding = new FactorRounding(timeZoneRounding, factor); - } return timeZoneRounding; } } @@ -215,7 +202,7 @@ public abstract class TimeZoneRounding extends Rounding { @Override public long round(long utcMillis) { long timeLocal = timeZone.convertUTCToLocal(utcMillis); - long rounded = Rounding.Interval.roundValue(Rounding.Interval.roundKey(timeLocal, interval), interval); + long rounded = roundKey(timeLocal, interval) * interval; long roundedUTC; if (isInDSTGap(rounded) == false) { roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); @@ -238,6 +225,14 @@ public abstract class TimeZoneRounding extends Rounding { return roundedUTC; } + private static long roundKey(long value, long interval) { + if (value < 0) { + return (value - interval + 1) / interval; + } else { + return value / interval; + } + } + /** * Determine whether the local instant is a valid instant in the given * time zone. The logic for this is taken from diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java index f139ad18bb0..e3a3ea75762 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.search.aggregations.support.AbstractValuesSourceParser.NumericValuesSourceParser; @@ -45,7 +44,7 @@ public class DateHistogramParser extends NumericValuesSourceParser { protected DateHistogramAggregationBuilder createFactory(String aggregationName, ValuesSourceType valuesSourceType, ValueType targetValueType, Map otherOptions) { DateHistogramAggregationBuilder factory = new DateHistogramAggregationBuilder(aggregationName); - Object interval = otherOptions.get(Rounding.Interval.INTERVAL_FIELD); + Object interval = otherOptions.get(Histogram.INTERVAL_FIELD); if (interval == null) { throw new ParsingException(null, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); } else if (interval instanceof Long) { @@ -55,7 +54,7 @@ public class DateHistogramParser extends NumericValuesSourceParser { } else { throw new IllegalStateException("Unexpected interval class: " + interval.getClass()); } - Long offset = (Long) otherOptions.get(Rounding.OffsetRounding.OFFSET_FIELD); + Long offset = (Long) otherOptions.get(Histogram.OFFSET_FIELD); if (offset != null) { factory.offset(offset); } @@ -83,12 +82,12 @@ public class DateHistogramParser extends NumericValuesSourceParser { protected boolean token(String aggregationName, String currentFieldName, Token token, XContentParser parser, ParseFieldMatcher parseFieldMatcher, Map otherOptions) throws IOException { if (token.isValue()) { - if (parseFieldMatcher.match(currentFieldName, Rounding.Interval.INTERVAL_FIELD)) { + if (parseFieldMatcher.match(currentFieldName, Histogram.INTERVAL_FIELD)) { if (token == XContentParser.Token.VALUE_STRING) { - otherOptions.put(Rounding.Interval.INTERVAL_FIELD, new DateHistogramInterval(parser.text())); + otherOptions.put(Histogram.INTERVAL_FIELD, new DateHistogramInterval(parser.text())); return true; } else { - otherOptions.put(Rounding.Interval.INTERVAL_FIELD, parser.longValue()); + otherOptions.put(Histogram.INTERVAL_FIELD, parser.longValue()); return true; } } else if (parseFieldMatcher.match(currentFieldName, Histogram.MIN_DOC_COUNT_FIELD)) { @@ -97,13 +96,13 @@ public class DateHistogramParser extends NumericValuesSourceParser { } else if (parseFieldMatcher.match(currentFieldName, Histogram.KEYED_FIELD)) { otherOptions.put(Histogram.KEYED_FIELD, parser.booleanValue()); return true; - } else if (parseFieldMatcher.match(currentFieldName, Rounding.OffsetRounding.OFFSET_FIELD)) { + } else if (parseFieldMatcher.match(currentFieldName, Histogram.OFFSET_FIELD)) { if (token == XContentParser.Token.VALUE_STRING) { - otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, + otherOptions.put(Histogram.OFFSET_FIELD, DateHistogramAggregationBuilder.parseStringOffset(parser.text())); return true; } else { - otherOptions.put(Rounding.OffsetRounding.OFFSET_FIELD, parser.longValue()); + otherOptions.put(Histogram.OFFSET_FIELD, parser.longValue()); return true; } } else { diff --git a/core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java similarity index 70% rename from core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java rename to core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java index a71cc77ffc1..a601bd140e8 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/RoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java @@ -20,37 +20,13 @@ package org.elasticsearch.common.rounding; import org.elasticsearch.test.ESTestCase; +import org.joda.time.DateTimeZone; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class RoundingTests extends ESTestCase { - /** - * simple test case to illustrate how Rounding.Interval works on readable input - */ - public void testInterval() { - int interval = 10; - Rounding.Interval rounding = new Rounding.Interval(interval); - int value = 24; - final long r = rounding.round(24); - String message = "round(" + value + ", interval=" + interval + ") = " + r; - assertEquals(value/interval * interval, r); - assertEquals(message, 0, r % interval); - } - - public void testIntervalRandom() { - final long interval = randomIntBetween(1, 100); - Rounding.Interval rounding = new Rounding.Interval(interval); - for (int i = 0; i < 1000; ++i) { - long l = Math.max(randomLong(), Long.MIN_VALUE + interval); - final long r = rounding.round(l); - String message = "round(" + l + ", interval=" + interval + ") = " + r; - assertEquals(message, 0, r % interval); - assertThat(message, r, lessThanOrEqualTo(l)); - assertThat(message, r + interval, greaterThan(l)); - } - } +public class OffsetRoundingTests extends ESTestCase { /** * Simple test case to illustrate how Rounding.Offset works on readable input. @@ -60,7 +36,8 @@ public class RoundingTests extends ESTestCase { public void testOffsetRounding() { final long interval = 10; final long offset = 7; - Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(new Rounding.Interval(interval), offset); + Rounding.OffsetRounding rounding = new Rounding.OffsetRounding( + new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); assertEquals(-3, rounding.round(6)); assertEquals(7, rounding.nextRoundingValue(-3)); assertEquals(7, rounding.round(7)); @@ -76,7 +53,7 @@ public class RoundingTests extends ESTestCase { public void testOffsetRoundingRandom() { for (int i = 0; i < 1000; ++i) { final long interval = randomIntBetween(1, 100); - Rounding.Interval internalRounding = new Rounding.Interval(interval); + Rounding internalRounding = new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC); final long offset = randomIntBetween(-100, 100); Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset); long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow From c14155e4a850d1e3e39d22d68fa33ad37efbd25b Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 3 Aug 2016 16:05:52 +0100 Subject: [PATCH 11/20] Remove TimeZoneRounding abstraction Because the Rounding class now only deals with date based rounding of values we can remove the TimeZoneRounding abstraction to simplify the code. --- .../common/rounding/Rounding.java | 285 +++++++++++++++- .../common/rounding/TimeZoneRounding.java | 309 ------------------ .../histogram/DateHistogramAggregator.java | 6 +- .../DateHistogramAggregatorFactory.java | 9 +- .../common/rounding/OffsetRoundingTests.java | 4 +- .../rounding/TimeZoneRoundingTests.java | 109 +++--- 6 files changed, 346 insertions(+), 376 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java diff --git a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java index 4ddba09cfab..59acf468b86 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java @@ -22,6 +22,10 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.unit.TimeValue; +import org.joda.time.DateTimeField; +import org.joda.time.DateTimeZone; +import org.joda.time.IllegalInstantException; import java.io.IOException; import java.util.Objects; @@ -53,6 +57,279 @@ public abstract class Rounding implements Streamable { @Override public abstract int hashCode(); + public static Builder builder(DateTimeUnit unit) { + return new Builder(unit); + } + + public static Builder builder(TimeValue interval) { + return new Builder(interval); + } + + public static class Builder { + + private final DateTimeUnit unit; + private final long interval; + + private DateTimeZone timeZone = DateTimeZone.UTC; + + private long offset; + + public Builder(DateTimeUnit unit) { + this.unit = unit; + this.interval = -1; + } + + public Builder(TimeValue interval) { + this.unit = null; + if (interval.millis() < 1) + throw new IllegalArgumentException("Zero or negative time interval not supported"); + this.interval = interval.millis(); + } + + public Builder timeZone(DateTimeZone timeZone) { + if (timeZone == null) { + throw new IllegalArgumentException("Setting null as timezone is not supported"); + } + this.timeZone = timeZone; + return this; + } + + public Builder offset(long offset) { + this.offset = offset; + return this; + } + + public Rounding build() { + Rounding timeZoneRounding; + if (unit != null) { + timeZoneRounding = new TimeUnitRounding(unit, timeZone); + } else { + timeZoneRounding = new TimeIntervalRounding(interval, timeZone); + } + if (offset != 0) { + timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); + } + return timeZoneRounding; + } + } + + static class TimeUnitRounding extends Rounding { + + static final byte ID = 1; + + private DateTimeUnit unit; + private DateTimeField field; + private DateTimeZone timeZone; + + TimeUnitRounding() { // for serialization + } + + TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) { + this.unit = unit; + this.field = unit.field(timeZone); + this.timeZone = timeZone; + } + + @Override + public byte id() { + return ID; + } + + @Override + public long round(long utcMillis) { + long rounded = field.roundFloor(utcMillis); + if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) { + // in this case, we crossed a time zone transition. In some edge + // cases this will + // result in a value that is not a rounded value itself. We need + // to round again + // to make sure. This will have no affect in cases where + // 'rounded' was already a proper + // rounded value + rounded = field.roundFloor(rounded); + } + assert rounded == field.roundFloor(rounded); + return rounded; + } + + @Override + public long nextRoundingValue(long utcMillis) { + long floor = round(utcMillis); + // add one unit and round to get to next rounded value + long next = round(field.add(floor, 1)); + if (next == floor) { + // in rare case we need to add more than one unit + next = round(field.add(floor, 2)); + } + return next; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + unit = DateTimeUnit.resolve(in.readByte()); + timeZone = DateTimeZone.forID(in.readString()); + field = unit.field(timeZone); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(unit.id()); + out.writeString(timeZone.getID()); + } + + @Override + public int hashCode() { + return Objects.hash(unit, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TimeUnitRounding other = (TimeUnitRounding) obj; + return Objects.equals(unit, other.unit) && Objects.equals(timeZone, other.timeZone); + } + + @Override + public String toString() { + return "[" + timeZone + "][" + unit + "]"; + } + } + + static class TimeIntervalRounding extends Rounding { + + static final byte ID = 2; + + private long interval; + private DateTimeZone timeZone; + + TimeIntervalRounding() { // for serialization + } + + TimeIntervalRounding(long interval, DateTimeZone timeZone) { + if (interval < 1) + throw new IllegalArgumentException("Zero or negative time interval not supported"); + this.interval = interval; + this.timeZone = timeZone; + } + + @Override + public byte id() { + return ID; + } + + @Override + public long round(long utcMillis) { + long timeLocal = timeZone.convertUTCToLocal(utcMillis); + long rounded = roundKey(timeLocal, interval) * interval; + long roundedUTC; + if (isInDSTGap(rounded) == false) { + roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); + // check if we crossed DST transition, in this case we want the + // last rounded value before the transition + long transition = timeZone.previousTransition(utcMillis); + if (transition != utcMillis && transition > roundedUTC) { + roundedUTC = round(transition - 1); + } + } else { + /* + * Edge case where the rounded local time is illegal and landed + * in a DST gap. In this case, we choose 1ms tick after the + * transition date. We don't want the transition date itself + * because those dates, when rounded themselves, fall into the + * previous interval. This would violate the invariant that the + * rounding operation should be idempotent. + */ + roundedUTC = timeZone.previousTransition(utcMillis) + 1; + } + return roundedUTC; + } + + private static long roundKey(long value, long interval) { + if (value < 0) { + return (value - interval + 1) / interval; + } else { + return value / interval; + } + } + + /** + * Determine whether the local instant is a valid instant in the given + * time zone. The logic for this is taken from + * {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the + * `strict` mode case, but instead of throwing an + * {@link IllegalInstantException}, which is costly, we want to return a + * flag indicating that the value is illegal in that time zone. + */ + private boolean isInDSTGap(long instantLocal) { + if (timeZone.isFixed()) { + return false; + } + // get the offset at instantLocal (first estimate) + int offsetLocal = timeZone.getOffset(instantLocal); + // adjust instantLocal using the estimate and recalc the offset + int offset = timeZone.getOffset(instantLocal - offsetLocal); + // if the offsets differ, we must be near a DST boundary + if (offsetLocal != offset) { + // determine if we are in the DST gap + long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal); + if (nextLocal == (instantLocal - offsetLocal)) { + nextLocal = Long.MAX_VALUE; + } + long nextAdjusted = timeZone.nextTransition(instantLocal - offset); + if (nextAdjusted == (instantLocal - offset)) { + nextAdjusted = Long.MAX_VALUE; + } + if (nextLocal != nextAdjusted) { + // we are in the DST gap + return true; + } + } + return false; + } + + @Override + public long nextRoundingValue(long time) { + long timeLocal = time; + timeLocal = timeZone.convertUTCToLocal(time); + long next = timeLocal + interval; + return timeZone.convertLocalToUTC(next, false); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + interval = in.readVLong(); + timeZone = DateTimeZone.forID(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(interval); + out.writeString(timeZone.getID()); + } + + @Override + public int hashCode() { + return Objects.hash(interval, timeZone); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + TimeIntervalRounding other = (TimeIntervalRounding) obj; + return Objects.equals(interval, other.interval) && Objects.equals(timeZone, other.timeZone); + } + } + public static class OffsetRounding extends Rounding { static final byte ID = 8; @@ -95,12 +372,12 @@ public abstract class Rounding implements Streamable { Rounding.Streams.write(rounding, out); out.writeLong(offset); } - + @Override public int hashCode() { return Objects.hash(rounding, offset); } - + @Override public boolean equals(Object obj) { if (obj == null) { @@ -126,8 +403,8 @@ public abstract class Rounding implements Streamable { Rounding rounding = null; byte id = in.readByte(); switch (id) { - case TimeZoneRounding.TimeUnitRounding.ID: rounding = new TimeZoneRounding.TimeUnitRounding(); break; - case TimeZoneRounding.TimeIntervalRounding.ID: rounding = new TimeZoneRounding.TimeIntervalRounding(); break; + case TimeUnitRounding.ID: rounding = new TimeUnitRounding(); break; + case TimeIntervalRounding.ID: rounding = new TimeIntervalRounding(); break; case OffsetRounding.ID: rounding = new OffsetRounding(); break; default: throw new ElasticsearchException("unknown rounding id [" + id + "]"); } diff --git a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java deleted file mode 100644 index 5287203df69..00000000000 --- a/core/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ /dev/null @@ -1,309 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.rounding; - -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; -import org.joda.time.IllegalInstantException; - -import java.io.IOException; -import java.util.Objects; - -/** - * A rounding strategy for dates. It is typically used to group together dates - * that are part of the same hour/day/month, taking into account time zones and - * daylight saving times. - */ -public abstract class TimeZoneRounding extends Rounding { - - public static Builder builder(DateTimeUnit unit) { - return new Builder(unit); - } - - public static Builder builder(TimeValue interval) { - return new Builder(interval); - } - - public static class Builder { - - private final DateTimeUnit unit; - private final long interval; - - private DateTimeZone timeZone = DateTimeZone.UTC; - - private long offset; - - public Builder(DateTimeUnit unit) { - this.unit = unit; - this.interval = -1; - } - - public Builder(TimeValue interval) { - this.unit = null; - if (interval.millis() < 1) - throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval.millis(); - } - - public Builder timeZone(DateTimeZone timeZone) { - if (timeZone == null) { - throw new IllegalArgumentException("Setting null as timezone is not supported"); - } - this.timeZone = timeZone; - return this; - } - - public Builder offset(long offset) { - this.offset = offset; - return this; - } - - public Rounding build() { - Rounding timeZoneRounding; - if (unit != null) { - timeZoneRounding = new TimeUnitRounding(unit, timeZone); - } else { - timeZoneRounding = new TimeIntervalRounding(interval, timeZone); - } - if (offset != 0) { - timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); - } - return timeZoneRounding; - } - } - - static class TimeUnitRounding extends TimeZoneRounding { - - static final byte ID = 1; - - private DateTimeUnit unit; - private DateTimeField field; - private DateTimeZone timeZone; - - TimeUnitRounding() { // for serialization - } - - TimeUnitRounding(DateTimeUnit unit, DateTimeZone timeZone) { - this.unit = unit; - this.field = unit.field(timeZone); - this.timeZone = timeZone; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - long rounded = field.roundFloor(utcMillis); - if (timeZone.isFixed() == false && timeZone.getOffset(utcMillis) != timeZone.getOffset(rounded)) { - // in this case, we crossed a time zone transition. In some edge cases this will - // result in a value that is not a rounded value itself. We need to round again - // to make sure. This will have no affect in cases where 'rounded' was already a proper - // rounded value - rounded = field.roundFloor(rounded); - } - assert rounded == field.roundFloor(rounded); - return rounded; - } - - @Override - public long nextRoundingValue(long utcMillis) { - long floor = round(utcMillis); - // add one unit and round to get to next rounded value - long next = round(field.add(floor, 1)); - if (next == floor) { - // in rare case we need to add more than one unit - next = round(field.add(floor, 2)); - } - return next; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - unit = DateTimeUnit.resolve(in.readByte()); - timeZone = DateTimeZone.forID(in.readString()); - field = unit.field(timeZone); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeByte(unit.id()); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(unit, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeUnitRounding other = (TimeUnitRounding) obj; - return Objects.equals(unit, other.unit) - && Objects.equals(timeZone, other.timeZone); - } - - @Override - public String toString() { - return "[" + timeZone + "][" + unit +"]"; - } - } - - static class TimeIntervalRounding extends TimeZoneRounding { - - static final byte ID = 2; - - private long interval; - private DateTimeZone timeZone; - - TimeIntervalRounding() { // for serialization - } - - TimeIntervalRounding(long interval, DateTimeZone timeZone) { - if (interval < 1) - throw new IllegalArgumentException("Zero or negative time interval not supported"); - this.interval = interval; - this.timeZone = timeZone; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long utcMillis) { - long timeLocal = timeZone.convertUTCToLocal(utcMillis); - long rounded = roundKey(timeLocal, interval) * interval; - long roundedUTC; - if (isInDSTGap(rounded) == false) { - roundedUTC = timeZone.convertLocalToUTC(rounded, true, utcMillis); - // check if we crossed DST transition, in this case we want the last rounded value before the transition - long transition = timeZone.previousTransition(utcMillis); - if (transition != utcMillis && transition > roundedUTC) { - roundedUTC = round(transition - 1); - } - } else { - /* - * Edge case where the rounded local time is illegal and landed - * in a DST gap. In this case, we choose 1ms tick after the - * transition date. We don't want the transition date itself - * because those dates, when rounded themselves, fall into the - * previous interval. This would violate the invariant that the - * rounding operation should be idempotent. - */ - roundedUTC = timeZone.previousTransition(utcMillis) + 1; - } - return roundedUTC; - } - - private static long roundKey(long value, long interval) { - if (value < 0) { - return (value - interval + 1) / interval; - } else { - return value / interval; - } - } - - /** - * Determine whether the local instant is a valid instant in the given - * time zone. The logic for this is taken from - * {@link DateTimeZone#convertLocalToUTC(long, boolean)} for the - * `strict` mode case, but instead of throwing an - * {@link IllegalInstantException}, which is costly, we want to return a - * flag indicating that the value is illegal in that time zone. - */ - private boolean isInDSTGap(long instantLocal) { - if (timeZone.isFixed()) { - return false; - } - // get the offset at instantLocal (first estimate) - int offsetLocal = timeZone.getOffset(instantLocal); - // adjust instantLocal using the estimate and recalc the offset - int offset = timeZone.getOffset(instantLocal - offsetLocal); - // if the offsets differ, we must be near a DST boundary - if (offsetLocal != offset) { - // determine if we are in the DST gap - long nextLocal = timeZone.nextTransition(instantLocal - offsetLocal); - if (nextLocal == (instantLocal - offsetLocal)) { - nextLocal = Long.MAX_VALUE; - } - long nextAdjusted = timeZone.nextTransition(instantLocal - offset); - if (nextAdjusted == (instantLocal - offset)) { - nextAdjusted = Long.MAX_VALUE; - } - if (nextLocal != nextAdjusted) { - // we are in the DST gap - return true; - } - } - return false; - } - - @Override - public long nextRoundingValue(long time) { - long timeLocal = time; - timeLocal = timeZone.convertUTCToLocal(time); - long next = timeLocal + interval; - return timeZone.convertLocalToUTC(next, false); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - interval = in.readVLong(); - timeZone = DateTimeZone.forID(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(interval); - out.writeString(timeZone.getID()); - } - - @Override - public int hashCode() { - return Objects.hash(interval, timeZone); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - TimeIntervalRounding other = (TimeIntervalRounding) obj; - return Objects.equals(interval, other.interval) - && Objects.equals(timeZone, other.timeZone); - } - } -} diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index cf8325683e2..8785d53e01e 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.rounding.TimeZoneRounding; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; @@ -45,8 +44,9 @@ import java.util.Map; /** * An aggregator for date values. Every date is rounded down using a configured - * {@link TimeZoneRounding}. - * @see TimeZoneRounding + * {@link Rounding}. + * + * @see Rounding */ class DateHistogramAggregator extends BucketsAggregator { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 17c6d82a9c3..2743989a4b5 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.rounding.TimeZoneRounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -95,19 +94,19 @@ public final class DateHistogramAggregatorFactory } private Rounding createRounding() { - TimeZoneRounding.Builder tzRoundingBuilder; + Rounding.Builder tzRoundingBuilder; if (dateHistogramInterval != null) { DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); if (dateTimeUnit != null) { - tzRoundingBuilder = TimeZoneRounding.builder(dateTimeUnit); + tzRoundingBuilder = Rounding.builder(dateTimeUnit); } else { // the interval is a time value? - tzRoundingBuilder = TimeZoneRounding.builder( + tzRoundingBuilder = Rounding.builder( TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval")); } } else { // the interval is an integer time value in millis? - tzRoundingBuilder = TimeZoneRounding.builder(TimeValue.timeValueMillis(interval)); + tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval)); } if (timeZone() != null) { tzRoundingBuilder.timeZone(timeZone()); diff --git a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java index a601bd140e8..86e4e3b6cd6 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java @@ -37,7 +37,7 @@ public class OffsetRoundingTests extends ESTestCase { final long interval = 10; final long offset = 7; Rounding.OffsetRounding rounding = new Rounding.OffsetRounding( - new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); + new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); assertEquals(-3, rounding.round(6)); assertEquals(7, rounding.nextRoundingValue(-3)); assertEquals(7, rounding.round(7)); @@ -53,7 +53,7 @@ public class OffsetRoundingTests extends ESTestCase { public void testOffsetRoundingRandom() { for (int i = 0; i < 1000; ++i) { final long interval = randomIntBetween(1, 100); - Rounding internalRounding = new TimeZoneRounding.TimeIntervalRounding(interval, DateTimeZone.UTC); + Rounding internalRounding = new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC); final long offset = randomIntBetween(-100, 100); Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset); long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index d4920e9afe8..41f60556d9d 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -20,8 +20,8 @@ package org.elasticsearch.common.rounding; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.rounding.TimeZoneRounding.TimeIntervalRounding; -import org.elasticsearch.common.rounding.TimeZoneRounding.TimeUnitRounding; +import org.elasticsearch.common.rounding.Rounding.TimeIntervalRounding; +import org.elasticsearch.common.rounding.Rounding.TimeUnitRounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Description; @@ -47,29 +47,29 @@ import static org.hamcrest.Matchers.lessThanOrEqualTo; public class TimeZoneRoundingTests extends ESTestCase { public void testUTCTimeUnitRounding() { - Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(); + Rounding tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).build(); DateTimeZone tz = DateTimeZone.UTC; assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-01T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-01T00:00:00.000Z")), isDate(time("2009-03-01T00:00:00.000Z"), tz)); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); + tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build(); + tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-08T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-08T00:00:00.000Z")), isDate(time("2012-01-15T00:00:00.000Z"), tz)); } public void testUTCIntervalRounding() { - Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).build(); + Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).build(); DateTimeZone tz = DateTimeZone.UTC; assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); assertThat(tzRounding.round(time("2009-02-03T13:01:01")), isDate(time("2009-02-03T12:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T12:00:00.000Z")), isDate(time("2009-02-04T00:00:00.000Z"), tz)); - tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(48)).build(); + tzRounding = Rounding.builder(TimeValue.timeValueHours(48)).build(); assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-03T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T00:00:00.000Z")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); assertThat(tzRounding.round(time("2009-02-05T13:01:01")), isDate(time("2009-02-05T00:00:00.000Z"), tz)); @@ -77,11 +77,11 @@ public class TimeZoneRoundingTests extends ESTestCase { } /** - * test TimeIntervalTimeZoneRounding, (interval < 12h) with time zone shift + * test TimeIntervalRounding, (interval < 12h) with time zone shift */ - public void testTimeIntervalTimeZoneRounding() { + public void testTimeIntervalRounding() { DateTimeZone tz = DateTimeZone.forOffsetHours(-1); - Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build(); + Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(6)).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T19:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-02T19:00:00.000Z")), isDate(time("2009-02-03T01:00:00.000Z"), tz)); @@ -90,11 +90,11 @@ public class TimeZoneRoundingTests extends ESTestCase { } /** - * test DayIntervalTimeZoneRounding, (interval >= 12h) with time zone shift + * test DayIntervalRounding, (interval >= 12h) with time zone shift */ - public void testDayIntervalTimeZoneRounding() { + public void testDayIntervalRounding() { DateTimeZone tz = DateTimeZone.forOffsetHours(-8); - Rounding tzRounding = TimeZoneRounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build(); + Rounding tzRounding = Rounding.builder(TimeValue.timeValueHours(12)).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T00:01:01")), isDate(time("2009-02-02T20:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-02T20:00:00.000Z")), isDate(time("2009-02-03T08:00:00.000Z"), tz)); @@ -102,37 +102,37 @@ public class TimeZoneRoundingTests extends ESTestCase { assertThat(tzRounding.nextRoundingValue(time("2009-02-03T08:00:00.000Z")), isDate(time("2009-02-03T20:00:00.000Z"), tz)); } - public void testDayTimeZoneRounding() { + public void testDayRounding() { int timezoneOffset = -2; - Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset)) + Rounding tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset)) .build(); assertThat(tzRounding.round(0), equalTo(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis())); assertThat(tzRounding.nextRoundingValue(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()), equalTo(0L - TimeValue .timeValueHours(timezoneOffset).millis())); DateTimeZone tz = DateTimeZone.forID("-08:00"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), isDate(time("2012-03-31T08:00:00Z"), tz)); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); assertThat(tzRounding.round(time("2012-04-01T04:15:30Z")), equalTo(time("2012-03-01T08:00:00Z"))); // date in Feb-3rd, but still in Feb-2nd in -02:00 timezone tz = DateTimeZone.forID("-02:00"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T01:01:01")), isDate(time("2009-02-02T02:00:00"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-02T02:00:00")), isDate(time("2009-02-03T02:00:00"), tz)); // date in Feb-3rd, also in -02:00 timezone - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2009-02-03T02:01:01")), isDate(time("2009-02-03T02:00:00"), tz)); assertThat(tzRounding.nextRoundingValue(time("2009-02-03T02:00:00")), isDate(time("2009-02-04T02:00:00"), tz)); } - public void testTimeTimeZoneRounding() { + public void testTimeRounding() { // hour unit DateTimeZone tz = DateTimeZone.forOffsetHours(-2); - Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); + Rounding tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); assertThat(tzRounding.round(0), equalTo(0L)); assertThat(tzRounding.nextRoundingValue(0L), equalTo(TimeValue.timeValueHours(1L).getMillis())); @@ -144,23 +144,23 @@ public class TimeZoneRoundingTests extends ESTestCase { Rounding tzRounding; // testing savings to non savings switch DateTimeZone cet = DateTimeZone.forID("CET"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); + tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); assertThat(tzRounding.round(time("2014-10-26T01:01:01", cet)), isDate(time("2014-10-26T01:00:00+02:00"), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-10-26T01:00:00", cet)),isDate(time("2014-10-26T02:00:00+02:00"), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-10-26T02:00:00", cet)), isDate(time("2014-10-26T02:00:00+01:00"), cet)); // testing non savings to savings switch - tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); + tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(cet).build(); assertThat(tzRounding.round(time("2014-03-30T01:01:01", cet)), isDate(time("2014-03-30T01:00:00+01:00"), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-03-30T01:00:00", cet)), isDate(time("2014-03-30T03:00:00", cet), cet)); assertThat(tzRounding.nextRoundingValue(time("2014-03-30T03:00:00", cet)), isDate(time("2014-03-30T04:00:00", cet), cet)); // testing non savings to savings switch (America/Chicago) DateTimeZone chg = DateTimeZone.forID("America/Chicago"); - Rounding tzRounding_utc = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build(); + Rounding tzRounding_utc = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.UTC).build(); assertThat(tzRounding.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); - Rounding tzRounding_chg = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build(); + Rounding tzRounding_chg = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(chg).build(); assertThat(tzRounding_chg.round(time("2014-03-09T03:01:01", chg)), isDate(time("2014-03-09T03:00:00", chg), chg)); // testing savings to non savings switch 2013 (America/Chicago) @@ -173,18 +173,21 @@ public class TimeZoneRoundingTests extends ESTestCase { } /** - * Randomized test on TimeUnitRounding. - * Test uses random {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) chooses - * test dates that are exactly on or close to offset changes (e.g. DST) in the chosen time zone. + * Randomized test on TimeUnitRounding. Test uses random + * {@link DateTimeUnit} and {@link DateTimeZone} and often (50% of the time) + * chooses test dates that are exactly on or close to offset changes (e.g. + * DST) in the chosen time zone. * - * It rounds the test date down and up and performs various checks on the rounding unit interval that is - * defined by this. Assumptions tested are described in {@link #assertInterval(long, long, long, TimeZoneRounding, DateTimeZone)} + * It rounds the test date down and up and performs various checks on the + * rounding unit interval that is defined by this. Assumptions tested are + * described in + * {@link #assertInterval(long, long, long, Rounding, DateTimeZone)} */ - public void testTimeZoneRoundingRandom() { + public void testRoundingRandom() { for (int i = 0; i < 1000; ++i) { DateTimeUnit timeUnit = randomTimeUnit(); DateTimeZone tz = randomDateTimeZone(); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); long date = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 long unitMillis = timeUnit.field(tz).getDurationField().getUnitMillis(); if (randomBoolean()) { @@ -226,7 +229,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testTimeIntervalCET_DST_End() { long interval = TimeUnit.MINUTES.toMillis(20); DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz); + Rounding rounding = new TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("2015-10-25T01:55:00+02:00")), isDate(time("2015-10-25T01:40:00+02:00"), tz)); assertThat(rounding.round(time("2015-10-25T02:15:00+02:00")), isDate(time("2015-10-25T02:00:00+02:00"), tz)); @@ -246,7 +249,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testTimeIntervalCET_DST_Start() { long interval = TimeUnit.MINUTES.toMillis(20); DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz); + Rounding rounding = new TimeIntervalRounding(interval, tz); // test DST start assertThat(rounding.round(time("2016-03-27T01:55:00+01:00")), isDate(time("2016-03-27T01:40:00+01:00"), tz)); assertThat(rounding.round(time("2016-03-27T02:00:00+01:00")), isDate(time("2016-03-27T03:00:00+02:00"), tz)); @@ -263,7 +266,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testTimeInterval_Kathmandu_DST_Start() { long interval = TimeUnit.MINUTES.toMillis(20); DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - TimeZoneRounding rounding = new TimeIntervalRounding(interval, tz); + Rounding rounding = new TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("1985-12-31T23:55:00+05:30")), isDate(time("1985-12-31T23:40:00+05:30"), tz)); assertThat(rounding.round(time("1986-01-01T00:16:00+05:45")), isDate(time("1986-01-01T00:15:00+05:45"), tz)); assertThat(time("1986-01-01T00:15:00+05:45") - time("1985-12-31T23:40:00+05:30"), equalTo(TimeUnit.MINUTES.toMillis(20))); @@ -281,7 +284,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testIntervalRounding_NotDivisibleInteval() { DateTimeZone tz = DateTimeZone.forID("CET"); long interval = TimeUnit.MINUTES.toMillis(14); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("2016-03-27T01:41:00+01:00")), isDate(time("2016-03-27T01:30:00+01:00"), tz)); assertThat(rounding.round(time("2016-03-27T01:51:00+01:00")), isDate(time("2016-03-27T01:44:00+01:00"), tz)); @@ -298,7 +301,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testIntervalRounding_HalfDay_DST() { DateTimeZone tz = DateTimeZone.forID("CET"); long interval = TimeUnit.HOURS.toMillis(12); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); assertThat(rounding.round(time("2016-03-26T01:00:00+01:00")), isDate(time("2016-03-26T00:00:00+01:00"), tz)); assertThat(rounding.round(time("2016-03-26T13:00:00+01:00")), isDate(time("2016-03-26T12:00:00+01:00"), tz)); @@ -316,7 +319,7 @@ public class TimeZoneRoundingTests extends ESTestCase { TimeUnit unit = randomFrom(new TimeUnit[] {TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS}); long interval = unit.toMillis(randomIntBetween(1, 365)); DateTimeZone tz = randomDateTimeZone(); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); long mainDate = Math.abs(randomLong() % (2 * (long) 10e11)); // 1970-01-01T00:00:00Z - 2033-05-18T05:33:20.000+02:00 if (randomBoolean()) { mainDate = nastyDate(mainDate, tz, interval); @@ -356,8 +359,8 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testIntervalRoundingMonotonic_CET() { long interval = TimeUnit.MINUTES.toMillis(45); DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeIntervalRounding(interval, tz); - List> expectedDates = new ArrayList>(); + Rounding rounding = new Rounding.TimeIntervalRounding(interval, tz); + List> expectedDates = new ArrayList<>(); // first date is the date to be rounded, second the expected result expectedDates.add(new Tuple<>("2011-10-30T01:40:00.000+02:00", "2011-10-30T01:30:00.000+02:00")); expectedDates.add(new Tuple<>("2011-10-30T02:02:30.000+02:00", "2011-10-30T01:30:00.000+02:00")); @@ -387,7 +390,7 @@ public class TimeZoneRoundingTests extends ESTestCase { public void testAmbiguousHoursAfterDSTSwitch() { Rounding tzRounding; final DateTimeZone tz = DateTimeZone.forID("Asia/Jerusalem"); - tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-10-26T00:30:00+03:00")), isDate(time("2014-10-26T00:00:00+03:00"), tz)); assertThat(tzRounding.round(time("2014-10-26T01:30:00+03:00")), isDate(time("2014-10-26T01:00:00+03:00"), tz)); // the utc date for "2014-10-25T03:00:00+03:00" and "2014-10-25T03:00:00+02:00" is the same, local time turns back 1h here @@ -396,7 +399,7 @@ public class TimeZoneRoundingTests extends ESTestCase { assertThat(tzRounding.round(time("2014-10-26T02:30:00+02:00")), isDate(time("2014-10-26T02:00:00+02:00"), tz)); // Day interval - tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-11T00:00:00", tz), tz)); // DST on assertThat(tzRounding.round(time("2014-08-11T17:00:00", tz)), isDate(time("2014-08-11T00:00:00", tz), tz)); @@ -406,17 +409,17 @@ public class TimeZoneRoundingTests extends ESTestCase { assertThat(tzRounding.round(time("2015-03-27T17:00:00", tz)), isDate(time("2015-03-27T00:00:00", tz), tz)); // Month interval - tzRounding = TimeZoneRounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.MONTH_OF_YEAR).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-11-01T00:00:00", tz), tz)); // DST on assertThat(tzRounding.round(time("2014-10-10T17:00:00", tz)), isDate(time("2014-10-01T00:00:00", tz), tz)); // Year interval - tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(time("2014-01-01T00:00:00", tz), tz)); // Two timestamps in same year and different timezone offset ("Double buckets" issue - #9491) - tzRounding = TimeZoneRounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); + tzRounding = Rounding.builder(DateTimeUnit.YEAR_OF_CENTURY).timeZone(tz).build(); assertThat(tzRounding.round(time("2014-11-11T17:00:00", tz)), isDate(tzRounding.round(time("2014-08-11T17:00:00", tz)), tz)); } @@ -429,8 +432,8 @@ public class TimeZoneRoundingTests extends ESTestCase { DateTimeZone tz = DateTimeZone.forID("America/Sao_Paulo"); long start = time("2014-10-18T20:50:00.000", tz); long end = time("2014-10-19T01:00:00.000", tz); - Rounding tzRounding = new TimeZoneRounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); - Rounding dayTzRounding = new TimeZoneRounding.TimeIntervalRounding(60000, tz); + Rounding tzRounding = new Rounding.TimeUnitRounding(DateTimeUnit.MINUTES_OF_HOUR, tz); + Rounding dayTzRounding = new Rounding.TimeIntervalRounding(60000, tz); for (long time = start; time < end; time = time + 60000) { assertThat(tzRounding.nextRoundingValue(time), greaterThan(time)); assertThat(dayTzRounding.nextRoundingValue(time), greaterThan(time)); @@ -442,7 +445,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // standard +/-1 hour DST transition, CET DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("CET"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); // 29 Mar 2015 - Daylight Saving Time Started // at 02:00:00 clocks were turned forward 1 hour to 03:00:00 @@ -466,7 +469,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // which is not a round value for hourly rounding DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("Asia/Kathmandu"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); assertInterval(time("1985-12-31T22:00:00.000+05:30"), time("1985-12-31T23:00:00.000+05:30"), rounding, 60, tz); assertInterval(time("1985-12-31T23:00:00.000+05:30"), time("1986-01-01T01:00:00.000+05:45"), rounding, 105, tz); @@ -479,7 +482,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // at 02:00:00 clocks were turned backward 0:30 hours to Sunday, 3 March 1991, 01:30:00 DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("Australia/Lord_Howe"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); assertInterval(time("1991-03-03T00:00:00.000+11:00"), time("1991-03-03T01:00:00.000+11:00"), rounding, 60, tz); assertInterval(time("1991-03-03T01:00:00.000+11:00"), time("1991-03-03T02:00:00.000+10:30"), rounding, 90, tz); @@ -499,7 +502,7 @@ public class TimeZoneRoundingTests extends ESTestCase { // at 03:45:00 clocks were turned backward 1 hour to 02:45:00 DateTimeUnit timeUnit = DateTimeUnit.HOUR_OF_DAY; DateTimeZone tz = DateTimeZone.forID("Pacific/Chatham"); - TimeZoneRounding rounding = new TimeZoneRounding.TimeUnitRounding(timeUnit, tz); + Rounding rounding = new Rounding.TimeUnitRounding(timeUnit, tz); assertInterval(time("2015-04-05T02:00:00.000+13:45"), time("2015-04-05T03:00:00.000+13:45"), rounding, 60, tz); assertInterval(time("2015-04-05T03:00:00.000+13:45"), time("2015-04-05T03:00:00.000+12:45"), rounding, 60, tz); @@ -514,7 +517,7 @@ public class TimeZoneRoundingTests extends ESTestCase { } } - private static void assertInterval(long rounded, long nextRoundingValue, TimeZoneRounding rounding, int minutes, + private static void assertInterval(long rounded, long nextRoundingValue, Rounding rounding, int minutes, DateTimeZone tz) { assertInterval(rounded, dateBetween(rounded, nextRoundingValue), nextRoundingValue, rounding, tz); assertEquals(DateTimeConstants.MILLIS_PER_MINUTE * minutes, nextRoundingValue - rounded); @@ -527,7 +530,7 @@ public class TimeZoneRoundingTests extends ESTestCase { * @param nextRoundingValue the expected upper end of the rounding interval * @param rounding the rounding instance */ - private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, TimeZoneRounding rounding, + private static void assertInterval(long rounded, long unrounded, long nextRoundingValue, Rounding rounding, DateTimeZone tz) { assert rounded <= unrounded && unrounded <= nextRoundingValue; assertThat("rounding should be idempotent ", rounding.round(rounded), isDate(rounded, tz)); From b6ef99195d4ac1d2b503e380ed147f8fef5940e3 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 11:36:26 +0100 Subject: [PATCH 12/20] Remove offset rounding This is in favour of doing the offset calculations in the date histogram --- .../common/rounding/Rounding.java | 73 ------------------- .../histogram/DateHistogramAggregator.java | 13 ++-- .../DateHistogramAggregatorFactory.java | 4 +- .../histogram/InternalDateHistogram.java | 23 +++--- .../common/rounding/OffsetRoundingTests.java | 69 ------------------ .../rounding/TimeZoneRoundingTests.java | 4 - 6 files changed, 24 insertions(+), 162 deletions(-) delete mode 100644 core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java diff --git a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java index 59acf468b86..ad9f926e881 100644 --- a/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java +++ b/core/src/main/java/org/elasticsearch/common/rounding/Rounding.java @@ -72,8 +72,6 @@ public abstract class Rounding implements Streamable { private DateTimeZone timeZone = DateTimeZone.UTC; - private long offset; - public Builder(DateTimeUnit unit) { this.unit = unit; this.interval = -1; @@ -94,11 +92,6 @@ public abstract class Rounding implements Streamable { return this; } - public Builder offset(long offset) { - this.offset = offset; - return this; - } - public Rounding build() { Rounding timeZoneRounding; if (unit != null) { @@ -106,9 +99,6 @@ public abstract class Rounding implements Streamable { } else { timeZoneRounding = new TimeIntervalRounding(interval, timeZone); } - if (offset != 0) { - timeZoneRounding = new OffsetRounding(timeZoneRounding, offset); - } return timeZoneRounding; } } @@ -330,68 +320,6 @@ public abstract class Rounding implements Streamable { } } - public static class OffsetRounding extends Rounding { - - static final byte ID = 8; - - private Rounding rounding; - - private long offset; - - OffsetRounding() { // for serialization - } - - public OffsetRounding(Rounding intervalRounding, long offset) { - this.rounding = intervalRounding; - this.offset = offset; - } - - @Override - public byte id() { - return ID; - } - - @Override - public long round(long value) { - return rounding.round(value - offset) + offset; - } - - @Override - public long nextRoundingValue(long value) { - return rounding.nextRoundingValue(value - offset) + offset; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); - offset = in.readLong(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); - out.writeLong(offset); - } - - @Override - public int hashCode() { - return Objects.hash(rounding, offset); - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - OffsetRounding other = (OffsetRounding) obj; - return Objects.equals(rounding, other.rounding) - && Objects.equals(offset, other.offset); - } - } - public static class Streams { public static void write(Rounding rounding, StreamOutput out) throws IOException { @@ -405,7 +333,6 @@ public abstract class Rounding implements Streamable { switch (id) { case TimeUnitRounding.ID: rounding = new TimeUnitRounding(); break; case TimeIntervalRounding.ID: rounding = new TimeIntervalRounding(); break; - case OffsetRounding.ID: rounding = new OffsetRounding(); break; default: throw new ElasticsearchException("unknown rounding id [" + id + "]"); } rounding.readFrom(in); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 8785d53e01e..0ea2fba719b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -45,7 +45,7 @@ import java.util.Map; /** * An aggregator for date values. Every date is rounded down using a configured * {@link Rounding}. - * + * * @see Rounding */ class DateHistogramAggregator extends BucketsAggregator { @@ -60,14 +60,17 @@ class DateHistogramAggregator extends BucketsAggregator { private final ExtendedBounds extendedBounds; private final LongHash bucketOrds; + private long offset; - public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed, + public DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, InternalOrder order, + boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, AggregationContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.rounding = rounding; + this.offset = offset; this.order = order; this.keyed = keyed; this.minDocCount = minDocCount; @@ -100,7 +103,7 @@ class DateHistogramAggregator extends BucketsAggregator { long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.valueAt(i); - long rounded = rounding.round(value); + long rounded = rounding.round(value - offset) + offset; assert rounded >= previousRounded; if (rounded == previousRounded) { continue; @@ -133,7 +136,7 @@ class DateHistogramAggregator extends BucketsAggregator { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, + return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); } @@ -142,7 +145,7 @@ class DateHistogramAggregator extends BucketsAggregator { InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, + return new InternalDateHistogram(name, Collections.emptyList(), order, minDocCount, offset, emptyBucketInfo, formatter, keyed, pipelineAggregators(), metaData()); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index 2743989a4b5..79f81e28374 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -111,7 +111,7 @@ public final class DateHistogramAggregatorFactory if (timeZone() != null) { tzRoundingBuilder.timeZone(timeZone()); } - Rounding rounding = tzRoundingBuilder.offset(offset).build(); + Rounding rounding = tzRoundingBuilder.build(); return rounding; } @@ -137,7 +137,7 @@ public final class DateHistogramAggregatorFactory // parse any string bounds to longs and round them roundedBounds = extendedBounds.parseAndValidate(name, context.searchContext(), config.format()).round(rounding); } - return new DateHistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource, + return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, roundedBounds, valuesSource, config.format(), context, parent, pipelineAggregators, metaData); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 4d46c2c1850..56d3792e0c6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -178,14 +178,17 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< private final DocValueFormat format; private final boolean keyed; private final long minDocCount; + private final long offset; private final EmptyBucketInfo emptyBucketInfo; - InternalDateHistogram(String name, List buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, + InternalDateHistogram(String name, List buckets, InternalOrder order, long minDocCount, long offset, + EmptyBucketInfo emptyBucketInfo, DocValueFormat formatter, boolean keyed, List pipelineAggregators, Map metaData) { super(name, pipelineAggregators, metaData); this.buckets = buckets; this.order = order; + this.offset = offset; assert (minDocCount == 0) == (emptyBucketInfo != null); this.minDocCount = minDocCount; this.emptyBucketInfo = emptyBucketInfo; @@ -205,6 +208,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< } else { emptyBucketInfo = null; } + offset = in.readLong(); format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); buckets = in.readList(stream -> new Bucket(stream, keyed, format)); @@ -217,6 +221,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< if (minDocCount == 0) { emptyBucketInfo.writeTo(out); } + out.writeLong(offset); out.writeNamedWriteable(format); out.writeBoolean(keyed); out.writeList(buckets); @@ -234,7 +239,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< @Override public InternalDateHistogram create(List buckets) { - return new InternalDateHistogram(name, buckets, order, minDocCount, emptyBucketInfo, format, + return new InternalDateHistogram(name, buckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators(), metaData); } @@ -328,7 +333,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< long max = bounds.getMax(); while (key <= max) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); - key = emptyBucketInfo.rounding.nextRoundingValue(key); + key = nextKey(key).longValue(); } } } else { @@ -337,7 +342,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< if (key < firstBucket.key) { while (key < firstBucket.key) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); - key = emptyBucketInfo.rounding.nextRoundingValue(key); + key = nextKey(key).longValue(); } } } @@ -349,10 +354,10 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< while (iter.hasNext()) { Bucket nextBucket = list.get(iter.nextIndex()); if (lastBucket != null) { - long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); + long key = nextKey(lastBucket.key).longValue(); while (key < nextBucket.key) { iter.add(new InternalDateHistogram.Bucket(key, 0, keyed, format, reducedEmptySubAggs)); - key = emptyBucketInfo.rounding.nextRoundingValue(key); + key = nextKey(key).longValue(); } assert key == nextBucket.key; } @@ -393,7 +398,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< CollectionUtil.introSort(reducedBuckets, order.comparator()); } - return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, + return new InternalDateHistogram(getName(), reducedBuckets, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators(), getMetaData()); } @@ -424,7 +429,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< @Override public Number nextKey(Number key) { - return emptyBucketInfo.rounding.nextRoundingValue(key.longValue()); + return emptyBucketInfo.rounding.nextRoundingValue(key.longValue() - offset) + offset; } @Override @@ -435,7 +440,7 @@ public final class InternalDateHistogram extends InternalMultiBucketAggregation< buckets2.add((Bucket) b); } buckets2 = Collections.unmodifiableList(buckets2); - return new InternalDateHistogram(name, buckets2, order, minDocCount, emptyBucketInfo, format, + return new InternalDateHistogram(name, buckets2, order, minDocCount, offset, emptyBucketInfo, format, keyed, pipelineAggregators(), getMetaData()); } diff --git a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java deleted file mode 100644 index 86e4e3b6cd6..00000000000 --- a/core/src/test/java/org/elasticsearch/common/rounding/OffsetRoundingTests.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.rounding; - -import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; - -public class OffsetRoundingTests extends ESTestCase { - - /** - * Simple test case to illustrate how Rounding.Offset works on readable input. - * offset shifts input value back before rounding (so here 6 - 7 -> -1) - * then shifts rounded Value back (here -10 -> -3) - */ - public void testOffsetRounding() { - final long interval = 10; - final long offset = 7; - Rounding.OffsetRounding rounding = new Rounding.OffsetRounding( - new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC), offset); - assertEquals(-3, rounding.round(6)); - assertEquals(7, rounding.nextRoundingValue(-3)); - assertEquals(7, rounding.round(7)); - assertEquals(17, rounding.nextRoundingValue(7)); - assertEquals(7, rounding.round(16)); - assertEquals(17, rounding.round(17)); - assertEquals(27, rounding.nextRoundingValue(17)); - } - - /** - * test OffsetRounding with an internal interval rounding on random inputs - */ - public void testOffsetRoundingRandom() { - for (int i = 0; i < 1000; ++i) { - final long interval = randomIntBetween(1, 100); - Rounding internalRounding = new Rounding.TimeIntervalRounding(interval, DateTimeZone.UTC); - final long offset = randomIntBetween(-100, 100); - Rounding.OffsetRounding rounding = new Rounding.OffsetRounding(internalRounding, offset); - long safetyMargin = Math.abs(interval) + Math.abs(offset); // to prevent range overflow - long value = Math.max(randomLong() - safetyMargin, Long.MIN_VALUE + safetyMargin); - final long r_value = rounding.round(value); - final long nextRoundingValue = rounding.nextRoundingValue(r_value); - assertThat("Rounding should be idempotent", r_value, equalTo(rounding.round(r_value))); - assertThat("Rounded value smaller than unrounded, regardless of offset", r_value - offset, lessThanOrEqualTo(value - offset)); - assertThat("Rounded value <= value < next interval start", r_value + interval, greaterThan(value)); - assertThat("NextRounding value should be interval from rounded value", r_value + interval, equalTo(nextRoundingValue)); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java index 41f60556d9d..ff83ddfa57d 100644 --- a/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java +++ b/core/src/test/java/org/elasticsearch/common/rounding/TimeZoneRoundingTests.java @@ -55,10 +55,6 @@ public class TimeZoneRoundingTests extends ESTestCase { tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).build(); assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-09T00:00:00.000Z"), tz)); assertThat(tzRounding.nextRoundingValue(time("2012-01-09T00:00:00.000Z")), isDate(time("2012-01-16T00:00:00.000Z"), tz)); - - tzRounding = Rounding.builder(DateTimeUnit.WEEK_OF_WEEKYEAR).offset(-TimeValue.timeValueHours(24).millis()).build(); - assertThat(tzRounding.round(time("2012-01-10T01:01:01")), isDate(time("2012-01-08T00:00:00.000Z"), tz)); - assertThat(tzRounding.nextRoundingValue(time("2012-01-08T00:00:00.000Z")), isDate(time("2012-01-15T00:00:00.000Z"), tz)); } public void testUTCIntervalRounding() { From f273981f37ed02158de818f510c7826e13cfe592 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 16:28:12 +0100 Subject: [PATCH 13/20] Added failure message to test --- .../transport/client/PreBuiltTransportClientTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java index c519b29f9bc..fcc6f33ac6e 100644 --- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java +++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java @@ -56,7 +56,8 @@ public class PreBuiltTransportClientTests extends RandomizedTest { new PreBuiltTransportClient(Settings.EMPTY, plugin); fail("exception expected"); } catch (IllegalArgumentException ex) { - assertTrue(ex.getMessage().startsWith("plugin already exists: ")); + assertTrue("Expected message to start with [plugin already exists: ] but was instead [" + ex.getMessage() + "]", + ex.getMessage().startsWith("plugin already exists: ")); } } } From f1110f6f2a831ce06609a83189e2896bb03813cb Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Thu, 4 Aug 2016 16:45:05 +0100 Subject: [PATCH 14/20] fix import statements --- .../transport/client/PreBuiltTransportClientTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java index fcc6f33ac6e..0b7c3380b94 100644 --- a/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java +++ b/client/transport/src/test/java/org/elasticsearch/transport/client/PreBuiltTransportClientTests.java @@ -32,8 +32,8 @@ import org.junit.Test; import java.util.Arrays; -import static org.junit.Assert.*; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; public class PreBuiltTransportClientTests extends RandomizedTest { From cd9388ce660a697bd499f4fb0d8377821656f429 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 4 Aug 2016 19:46:45 +0200 Subject: [PATCH 15/20] [TEST] parse query alternate versions in strict mode AbstractQueryTestCase parses the main version of the query in strict mode, meaning that it will fail if any deprecated syntax is used. It should do the same for alternate versions (e.g. short versions). This is the way it is because the two alternate versions for ids query are both deprecated. Moved testing for those to a specific test method that isolates the deprecations and actually tests that the two are deprecated. --- .../index/query/IdsQueryBuilderTests.java | 76 +++++++++++-------- .../test/AbstractQueryTestCase.java | 2 +- 2 files changed, 44 insertions(+), 34 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java index 42f4aaf56aa..4ad90edc8cb 100644 --- a/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/IdsQueryBuilderTests.java @@ -23,13 +23,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.lucene.search.MatchNoDocsQuery; import org.elasticsearch.test.AbstractQueryTestCase; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.containsString; @@ -94,37 +93,6 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase } } - @Override - protected Map getAlternateVersions() { - Map alternateVersions = new HashMap<>(); - - IdsQueryBuilder tempQuery = createTestQueryBuilder(); - if (tempQuery.types() != null && tempQuery.types().length > 0) { - String type = tempQuery.types()[0]; - IdsQueryBuilder testQuery = new IdsQueryBuilder(type); - - //single value type can also be called _type - String contentString1 = "{\n" + - " \"ids\" : {\n" + - " \"_type\" : \"" + type + "\",\n" + - " \"values\" : []\n" + - " }\n" + - "}"; - alternateVersions.put(contentString1, testQuery); - - //array of types can also be called type rather than types - String contentString2 = "{\n" + - " \"ids\" : {\n" + - " \"type\" : [\"" + type + "\"],\n" + - " \"values\" : []\n" + - " }\n" + - "}"; - alternateVersions.put(contentString2, testQuery); - } - - return alternateVersions; - } - public void testIllegalArguments() { try { new IdsQueryBuilder((String[])null); @@ -166,4 +134,46 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase assertEquals(json, 3, parsed.ids().size()); assertEquals(json, "my_type", parsed.types()[0]); } + + public void testFromJsonDeprecatedSyntax() throws IOException { + IdsQueryBuilder tempQuery = createTestQueryBuilder(); + assumeTrue("test requires at least one type", tempQuery.types() != null && tempQuery.types().length > 0); + + String type = tempQuery.types()[0]; + IdsQueryBuilder testQuery = new IdsQueryBuilder(type); + + //single value type can also be called _type + String contentString = "{\n" + + " \"ids\" : {\n" + + " \"_type\" : \"" + type + "\",\n" + + " \"values\" : []\n" + + " }\n" + + "}"; + + IdsQueryBuilder parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY); + assertEquals(testQuery, parsed); + + try { + parseQuery(contentString); + fail("parse should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("Deprecated field [_type] used, expected [type] instead", e.getMessage()); + } + + //array of types can also be called type rather than types + contentString = "{\n" + + " \"ids\" : {\n" + + " \"types\" : [\"" + type + "\"],\n" + + " \"values\" : []\n" + + " }\n" + + "}"; + parsed = (IdsQueryBuilder) parseQuery(contentString, ParseFieldMatcher.EMPTY); + assertEquals(testQuery, parsed); + try { + parseQuery(contentString); + fail("parse should have failed"); + } catch(IllegalArgumentException e) { + assertEquals("Deprecated field [types] used, expected [type] instead", e.getMessage()); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 92acd702e3a..f59afcb40d8 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -245,7 +245,7 @@ public abstract class AbstractQueryTestCase> assertParsedQuery(shuffled.bytes(), testQuery); for (Map.Entry alternateVersion : getAlternateVersions().entrySet()) { String queryAsString = alternateVersion.getKey(); - assertParsedQuery(new BytesArray(queryAsString), alternateVersion.getValue(), ParseFieldMatcher.EMPTY); + assertParsedQuery(new BytesArray(queryAsString), alternateVersion.getValue()); } } } From 70100821123f53653417f7752f26d442b4fdafb3 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Thu, 4 Aug 2016 20:42:12 +0200 Subject: [PATCH 16/20] Add checksumming and versions to the Translog's Checkpoint files (#19797) This prepares the infrastructure to be able to extend the checkpoint file to store more information. --- .../index/translog/Checkpoint.java | 115 +++++++++++++----- .../index/translog/Translog.java | 4 +- .../translog/TruncateTranslogCommand.java | 13 +- 3 files changed, 91 insertions(+), 41 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java index 0fd59090944..d4eb9807827 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Checkpoint.java @@ -18,16 +18,19 @@ */ package org.elasticsearch.index.translog; -import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.DataInput; import org.apache.lucene.store.DataOutput; -import org.apache.lucene.store.InputStreamDataInput; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.OutputStreamIndexOutput; +import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.common.io.Channels; +import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.InputStream; import java.nio.channels.FileChannel; -import java.nio.file.Files; import java.nio.file.OpenOption; import java.nio.file.Path; @@ -35,69 +38,117 @@ import java.nio.file.Path; */ class Checkpoint { - static final int BUFFER_SIZE = Integer.BYTES // ops - + Long.BYTES // offset - + Long.BYTES;// generation final long offset; final int numOps; final long generation; + private static final int INITIAL_VERSION = 1; // start with 1, just to recognize there was some magic serialization logic before + + private static final String CHECKPOINT_CODEC = "ckp"; + + static final int FILE_SIZE = CodecUtil.headerLength(CHECKPOINT_CODEC) + + Integer.BYTES // ops + + Long.BYTES // offset + + Long.BYTES // generation + + CodecUtil.footerLength(); + + static final int LEGACY_NON_CHECKSUMMED_FILE_LENGTH = Integer.BYTES // ops + + Long.BYTES // offset + + Long.BYTES; // generation + Checkpoint(long offset, int numOps, long generation) { this.offset = offset; this.numOps = numOps; this.generation = generation; } - Checkpoint(DataInput in) throws IOException { - offset = in.readLong(); - numOps = in.readInt(); - generation = in.readLong(); - } - - private void write(FileChannel channel) throws IOException { - byte[] buffer = new byte[BUFFER_SIZE]; - final ByteArrayDataOutput out = new ByteArrayDataOutput(buffer); - write(out); - Channels.writeToChannel(buffer, channel); - } - - void write(DataOutput out) throws IOException { + private void write(DataOutput out) throws IOException { out.writeLong(offset); out.writeInt(numOps); out.writeLong(generation); } + // reads a checksummed checkpoint introduced in ES 5.0.0 + static Checkpoint readChecksummedV1(DataInput in) throws IOException { + return new Checkpoint(in.readLong(), in.readInt(), in.readLong()); + } + + // reads checkpoint from ES < 5.0.0 + static Checkpoint readNonChecksummed(DataInput in) throws IOException { + return new Checkpoint(in.readLong(), in.readInt(), in.readLong()); + } + @Override public String toString() { return "Checkpoint{" + - "offset=" + offset + - ", numOps=" + numOps + - ", translogFileGeneration= " + generation + - '}'; + "offset=" + offset + + ", numOps=" + numOps + + ", translogFileGeneration= " + generation + + '}'; } public static Checkpoint read(Path path) throws IOException { - try (InputStream in = Files.newInputStream(path)) { - return new Checkpoint(new InputStreamDataInput(in)); + try (Directory dir = new SimpleFSDirectory(path.getParent())) { + try (final IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) { + if (indexInput.length() == LEGACY_NON_CHECKSUMMED_FILE_LENGTH) { + // OLD unchecksummed file that was written < ES 5.0.0 + return Checkpoint.readNonChecksummed(indexInput); + } + // We checksum the entire file before we even go and parse it. If it's corrupted we barf right here. + CodecUtil.checksumEntireFile(indexInput); + final int fileVersion = CodecUtil.checkHeader(indexInput, CHECKPOINT_CODEC, INITIAL_VERSION, INITIAL_VERSION); + return Checkpoint.readChecksummedV1(indexInput); + } } } public static void write(ChannelFactory factory, Path checkpointFile, Checkpoint checkpoint, OpenOption... options) throws IOException { + final ByteArrayOutputStream byteOutputStream = new ByteArrayOutputStream(FILE_SIZE) { + @Override + public synchronized byte[] toByteArray() { + // don't clone + return buf; + } + }; + final String resourceDesc = "checkpoint(path=\"" + checkpointFile + "\", gen=" + checkpoint + ")"; + try (final OutputStreamIndexOutput indexOutput = + new OutputStreamIndexOutput(resourceDesc, checkpointFile.toString(), byteOutputStream, FILE_SIZE)) { + CodecUtil.writeHeader(indexOutput, CHECKPOINT_CODEC, INITIAL_VERSION); + checkpoint.write(indexOutput); + CodecUtil.writeFooter(indexOutput); + + assert indexOutput.getFilePointer() == FILE_SIZE : + "get you number straights. Bytes written: " + indexOutput.getFilePointer() + " buffer size: " + FILE_SIZE; + assert indexOutput.getFilePointer() < 512 : + "checkpoint files have to be smaller 512b for atomic writes. size: " + indexOutput.getFilePointer(); + + } + // now go and write to the channel, in one go. try (FileChannel channel = factory.open(checkpointFile, options)) { - checkpoint.write(channel); + Channels.writeToChannel(byteOutputStream.toByteArray(), channel); + // no need to force metadata, file size stays the same and we did the full fsync + // when we first created the file, so the directory entry doesn't change as well channel.force(false); } } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } Checkpoint that = (Checkpoint) o; - if (offset != that.offset) return false; - if (numOps != that.numOps) return false; + if (offset != that.offset) { + return false; + } + if (numOps != that.numOps) { + return false; + } return generation == that.generation; } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index eb22c84cf0e..b6ace07a55d 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -200,7 +200,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC Files.createDirectories(location); final long generation = 1; Checkpoint checkpoint = new Checkpoint(0, 0, generation); - Checkpoint.write(getChannelFactory(), location.resolve(CHECKPOINT_FILE_NAME), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); + Checkpoint.write(getChannelFactory(), checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + IOUtils.fsync(checkpointFile, false); current = createWriter(generation); this.lastCommittedTranslogFileGeneration = NOT_SET_GENERATION; diff --git a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index b6b91f14ba8..6514cd42709 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -36,11 +36,9 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.SettingCommand; import org.elasticsearch.cli.Terminal; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.index.translog.Checkpoint; import java.io.IOException; import java.nio.channels.Channels; @@ -168,12 +166,11 @@ public class TruncateTranslogCommand extends SettingCommand { /** Write a checkpoint file to the given location with the given generation */ public static void writeEmptyCheckpoint(Path filename, int translogLength, long translogGeneration) throws IOException { - try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); - OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(fc))) { - Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration); - emptyCheckpoint.write(out); - fc.force(true); - } + Checkpoint emptyCheckpoint = new Checkpoint(translogLength, 0, translogGeneration); + Checkpoint.write(FileChannel::open, filename, emptyCheckpoint, + StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + // fsync with metadata here to make sure. + IOUtils.fsync(filename, false); } /** From 4598c36027e883a8ace95a357d9863f96340d00d Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 4 Aug 2016 21:00:59 +0200 Subject: [PATCH 17/20] Fix various concurrency issues in transport (#19675) Due to various issues (most notably a missing happens-before edge between socket accept and channel close in MockTcpTransport), MockTcpTransportTests sometimes did not terminate. With this commit we fix various concurrency issues that led to this hanging test. Failing example build: https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-os-compatibility/os=oraclelinux/835/console --- .../elasticsearch/transport/TcpTransport.java | 25 +++---- .../AbstractSimpleTransportTestCase.java | 11 ++- .../transport/MockTcpTransport.java | 69 ++++++++++++++----- 3 files changed, 64 insertions(+), 41 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java index 7145777aad7..d567e838138 100644 --- a/core/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -93,7 +93,6 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Supplier; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -768,12 +767,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i threadPool.generic().execute(() -> { globalLock.writeLock().lock(); try { - for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { - NodeChannels nodeChannels = it.next(); - it.remove(); - IOUtils.closeWhileHandlingException(nodeChannels); - } - + // first stop to accept any incoming connections so nobody can connect to this transport for (Map.Entry> entry : serverChannels.entrySet()) { try { closeChannels(entry.getValue()); @@ -781,16 +775,13 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i logger.debug("Error closing serverChannel for profile [{}]", e, entry.getKey()); } } - try { - stopInternal(); - } finally { - for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { - NodeChannels nodeChannels = it.next(); - it.remove(); - IOUtils.closeWhileHandlingException(nodeChannels); - } - } + for (Iterator it = connectedNodes.values().iterator(); it.hasNext(); ) { + NodeChannels nodeChannels = it.next(); + it.remove(); + IOUtils.closeWhileHandlingException(nodeChannels); + } + stopInternal(); } finally { globalLock.writeLock().unlock(); latch.countDown(); @@ -800,7 +791,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent i try { latch.await(30, TimeUnit.SECONDS); } catch (InterruptedException e) { - Thread.interrupted(); + Thread.currentThread().interrupt(); // ignore } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index eb4dbb8bca5..33c5fcccad1 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -69,12 +69,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { protected ThreadPool threadPool; protected static final Version version0 = Version.CURRENT.minimumCompatibilityVersion(); - protected DiscoveryNode nodeA; - protected MockTransportService serviceA; + protected volatile DiscoveryNode nodeA; + protected volatile MockTransportService serviceA; protected static final Version version1 = Version.fromId(Version.CURRENT.id + 1); - protected DiscoveryNode nodeB; - protected MockTransportService serviceB; + protected volatile DiscoveryNode nodeB; + protected volatile MockTransportService serviceB; protected abstract MockTransportService build(Settings settings, Version version); @@ -489,9 +489,6 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true)); } - @TestLogging("transport:DEBUG,transport.tracer:TRACE") - // boaz is on this - @AwaitsFix(bugUrl = "https://elasticsearch-ci.elastic.co/job/elastic+elasticsearch+master+multijob-os-compatibility/os=oraclelinux/835") public void testConcurrentSendRespondAndDisconnect() throws BrokenBarrierException, InterruptedException { Set sendingErrors = ConcurrentCollections.newConcurrentSet(); Set responseErrors = ConcurrentCollections.newConcurrentSet(); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index c128ee49022..da7fcc53410 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -122,7 +122,7 @@ public class MockTcpTransport extends TcpTransport try { started.await(); } catch (InterruptedException e) { - Thread.interrupted(); + Thread.currentThread().interrupt(); } return serverMockChannel; } @@ -261,6 +261,14 @@ public class MockTcpTransport extends TcpTransport private final CancellableThreads cancellableThreads = new CancellableThreads(); private final Closeable onClose; + /** + * Constructs a new MockChannel instance intended for handling the actual incoming / outgoing traffic. + * + * @param socket The client socket. Mut not be null. + * @param localAddress Address associated with the corresponding local server socket. Must not be null. + * @param profile The associated profile name. + * @param onClose Callback to execute when this channel is closed. + */ public MockChannel(Socket socket, InetSocketAddress localAddress, String profile, Consumer onClose) { this.localAddress = localAddress; this.activeChannel = socket; @@ -268,13 +276,44 @@ public class MockTcpTransport extends TcpTransport this.profile = profile; this.onClose = () -> onClose.accept(this); } + + /** + * Constructs a new MockChannel instance intended for accepting requests. + * + * @param serverSocket The associated server socket. Must not be null. + * @param profile The associated profile name. + */ + public MockChannel(ServerSocket serverSocket, String profile) { + this.localAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress(); + this.serverSocket = serverSocket; + this.profile = profile; + this.activeChannel = null; + this.onClose = null; + } + public void accept(Executor executor) throws IOException { while (isOpen.get()) { - Socket accept = serverSocket.accept(); - configureSocket(accept); - MockChannel mockChannel = new MockChannel(accept, localAddress, profile, workerChannels::remove); - workerChannels.put(mockChannel, Boolean.TRUE); - mockChannel.loopRead(executor); + Socket incomingSocket = serverSocket.accept(); + MockChannel incomingChannel = null; + try { + configureSocket(incomingSocket); + incomingChannel = new MockChannel(incomingSocket, localAddress, profile, workerChannels::remove); + //establish a happens-before edge between closing and accepting a new connection + synchronized (this) { + if (isOpen.get()) { + workerChannels.put(incomingChannel, Boolean.TRUE); + // this spawns a new thread immediately, so OK under lock + incomingChannel.loopRead(executor); + // the channel is properly registered and will be cleared by the close code. + incomingSocket = null; + incomingChannel = null; + } + } + } finally { + // ensure we don't leak sockets and channels in the failure case. Note that we null both + // if there are no exceptions so this becomes a no op. + IOUtils.closeWhileHandlingException(incomingSocket, incomingChannel); + } } } @@ -294,26 +333,22 @@ public class MockTcpTransport extends TcpTransport @Override protected void doRun() throws Exception { StreamInput input = new InputStreamStreamInput(new BufferedInputStream(activeChannel.getInputStream())); - while (isOpen.get()) { + // There is a (slim) chance that we get interrupted right after a loop iteration, so check explicitly + while (isOpen.get() && !Thread.currentThread().isInterrupted()) { cancellableThreads.executeIO(() -> readMessage(MockChannel.this, input)); } } }); } - public MockChannel(ServerSocket serverSocket, String profile) { - this.localAddress = (InetSocketAddress) serverSocket.getLocalSocketAddress(); - this.serverSocket = serverSocket; - this.profile = profile; - this.activeChannel = null; - this.onClose = null; - } - @Override public void close() throws IOException { if (isOpen.compareAndSet(true, false)) { - IOUtils.close( () -> cancellableThreads.cancel("channel closed"), serverSocket, activeChannel, - () -> IOUtils.close(workerChannels.keySet()), onClose); + //establish a happens-before edge between closing and accepting a new connection + synchronized (this) { + IOUtils.close(serverSocket, activeChannel, () -> IOUtils.close(workerChannels.keySet()), + () -> cancellableThreads.cancel("channel closed"), onClose); + } } } } From 1e587406d8593f5443948d3ee464a4c470d550df Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 2 Aug 2016 17:35:31 -0400 Subject: [PATCH 18/20] Fail yaml tests and docs snippets that get unexpected warnings Adds `warnings` syntax to the yaml test that allows you to expect a `Warning` header that looks like: ``` - do: warnings: - '[index] is deprecated' - quotes are not required because yaml - but this argument is always a list, never a single string - no matter how many warnings you expect get: index: test type: test id: 1 ``` These are accessible from the docs with: ``` // TEST[warning:some warning] ``` This should help to force you to update the docs if you deprecate something. You *must* add the warnings marker to the docs or the build will fail. While you are there you *should* update the docs to add deprecation warnings visible in the rendered results. --- .../doc/RestTestsFromSnippetsTask.groovy | 18 ++++- .../gradle/doc/SnippetsTask.groovy | 11 +++- docs/README.asciidoc | 4 ++ docs/plugins/mapper-attachments.asciidoc | 14 ++-- docs/reference/indices/analyze.asciidoc | 2 +- .../reference/mapping/params/lat-lon.asciidoc | 21 ++++-- .../query-dsl/function-score-query.asciidoc | 12 ++-- .../query-dsl/indices-query.asciidoc | 5 +- docs/reference/query-dsl/mlt-query.asciidoc | 4 +- .../query-dsl/parent-id-query.asciidoc | 2 +- .../query-dsl/percolate-query.asciidoc | 2 +- .../reference/query-dsl/prefix-query.asciidoc | 3 +- .../query-dsl/template-query.asciidoc | 8 ++- .../search/request/highlighting.asciidoc | 10 +-- .../search/request/source-filtering.asciidoc | 7 +- .../test/lang_mustache/40_template_query.yaml | 22 ++++++- .../rest-api-spec/test/README.asciidoc | 20 +++++- .../rest-api-spec/test/mlt/20_docs.yaml | 6 +- .../test/search/10_source_filtering.yaml | 2 +- .../rest/yaml/ClientYamlTestResponse.java | 16 +++++ .../test/rest/yaml/Features.java | 1 + .../ClientYamlTestSuiteParseContext.java | 9 ++- .../rest/yaml/parser/DoSectionParser.java | 18 +++++ .../test/rest/yaml/section/DoSection.java | 63 +++++++++++++++++- .../yaml/parser/DoSectionParserTests.java | 58 +++++++++++++--- .../rest/yaml/section/DoSectionTests.java | 66 +++++++++++++++++++ 26 files changed, 347 insertions(+), 57 deletions(-) create mode 100644 test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index adea61cd4f3..100715586d3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -119,6 +119,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { current.println(" reason: $test.skipTest") } if (test.setup != null) { + // Insert a setup defined outside of the docs String setup = setups[test.setup] if (setup == null) { throw new InvalidUserDataException("Couldn't find setup " @@ -136,13 +137,23 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { response.contents.eachLine { current.println(" $it") } } - void emitDo(String method, String pathAndQuery, - String body, String catchPart, boolean inSetup) { + void emitDo(String method, String pathAndQuery, String body, + String catchPart, List warnings, boolean inSetup) { def (String path, String query) = pathAndQuery.tokenize('?') current.println(" - do:") if (catchPart != null) { current.println(" catch: $catchPart") } + if (false == warnings.isEmpty()) { + current.println(" warnings:") + for (String warning in warnings) { + // Escape " because we're going to quote the warning + String escaped = warning.replaceAll('"', '\\\\"') + /* Quote the warning in case it starts with [ which makes + * it look too much like an array. */ + current.println(" - \"$escaped\"") + } + } current.println(" raw:") current.println(" method: $method") current.println(" path: \"$path\"") @@ -200,7 +211,8 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { // Leading '/'s break the generated paths pathAndQuery = pathAndQuery.substring(1) } - emitDo(method, pathAndQuery, body, catchPart, inSetup) + emitDo(method, pathAndQuery, body, catchPart, snippet.warnings, + inSetup) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy index afd91858e9d..749c0f916f8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy @@ -37,8 +37,9 @@ public class SnippetsTask extends DefaultTask { private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/ private static final String SKIP = /skip:([^\]]+)/ private static final String SETUP = /setup:([^ \]]+)/ + private static final String WARNING = /warning:(.+)/ private static final String TEST_SYNTAX = - /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP) ?/ + /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/ /** * Action to take on each snippet. Called with a single parameter, an @@ -158,6 +159,10 @@ public class SnippetsTask extends DefaultTask { snippet.setup = it.group(6) return } + if (it.group(7) != null) { + snippet.warnings.add(it.group(7)) + return + } throw new InvalidUserDataException( "Invalid test marker: $line") } @@ -230,6 +235,7 @@ public class SnippetsTask extends DefaultTask { String language = null String catchPart = null String setup = null + List warnings = new ArrayList() @Override public String toString() { @@ -254,6 +260,9 @@ public class SnippetsTask extends DefaultTask { if (setup) { result += "[setup:$setup]" } + for (String warning in warnings) { + result += "[warning:$warning]" + } } if (testResponse) { result += '// TESTRESPONSE' diff --git a/docs/README.asciidoc b/docs/README.asciidoc index da07009d13d..5da211c6622 100644 --- a/docs/README.asciidoc +++ b/docs/README.asciidoc @@ -28,6 +28,10 @@ are tests even if they don't have `// CONSOLE`. * `// TEST[setup:name]`: Run some setup code before running the snippet. This is useful for creating and populating indexes used in the snippet. The setup code is defined in `docs/build.gradle`. + * `// TEST[warning:some warning]`: Expect the response to include a `Warning` + header. If the response doesn't include a `Warning` header with the exact + text then the test fails. If the response includes `Warning` headers that + aren't expected then the test fails. * `// TESTRESPONSE`: Matches this snippet against the body of the response of the last test. If the response is JSON then order is ignored. With `// TEST[continued]` you can make tests that contain multiple command snippets diff --git a/docs/plugins/mapper-attachments.asciidoc b/docs/plugins/mapper-attachments.asciidoc index f2c034a317e..119ec10c905 100644 --- a/docs/plugins/mapper-attachments.asciidoc +++ b/docs/plugins/mapper-attachments.asciidoc @@ -196,14 +196,14 @@ PUT /test "file" : { "type" : "attachment", "fields" : { - "content" : {"index" : "no"}, - "title" : {"store" : "yes"}, - "date" : {"store" : "yes"}, + "content" : {"index" : true}, + "title" : {"store" : true}, + "date" : {"store" : true}, "author" : {"analyzer" : "my_analyzer"}, - "keywords" : {"store" : "yes"}, - "content_type" : {"store" : "yes"}, - "content_length" : {"store" : "yes"}, - "language" : {"store" : "yes"} + "keywords" : {"store" : true}, + "content_type" : {"store" : true}, + "content_length" : {"store" : true}, + "language" : {"store" : true} } } } diff --git a/docs/reference/indices/analyze.asciidoc b/docs/reference/indices/analyze.asciidoc index 5f75da11176..e5ed67bf12f 100644 --- a/docs/reference/indices/analyze.asciidoc +++ b/docs/reference/indices/analyze.asciidoc @@ -127,7 +127,7 @@ experimental[The format of the additional detail information is experimental and GET _analyze { "tokenizer" : "standard", - "token_filter" : ["snowball"], + "filter" : ["snowball"], "text" : "detailed output", "explain" : true, "attributes" : ["keyword"] <1> diff --git a/docs/reference/mapping/params/lat-lon.asciidoc b/docs/reference/mapping/params/lat-lon.asciidoc index c610d8a1771..88c91c30d06 100644 --- a/docs/reference/mapping/params/lat-lon.asciidoc +++ b/docs/reference/mapping/params/lat-lon.asciidoc @@ -1,6 +1,9 @@ [[lat-lon]] === `lat_lon` +deprecated[5.0.0, ????????] +// https://github.com/elastic/elasticsearch/issues/19792 + <> are usually performed by plugging the value of each <> field into a formula to determine whether it falls into the required area or not. Unlike most queries, the inverted index @@ -10,7 +13,7 @@ Setting `lat_lon` to `true` causes the latitude and longitude values to be indexed as numeric fields (called `.lat` and `.lon`). These fields can be used by the <> and <> queries instead of -performing in-memory calculations. +performing in-memory calculations. So this mapping: [source,js] -------------------------------------------------- @@ -27,8 +30,15 @@ PUT my_index } } } +-------------------------------------------------- +// TEST[warning:geo_point lat_lon parameter is deprecated and will be removed in the next major release] +<1> Setting `lat_lon` to true indexes the geo-point in the `location.lat` and `location.lon` fields. -PUT my_index/my_type/1 +Allows these actions: + +[source,js] +-------------------------------------------------- +PUT my_index/my_type/1?refresh { "location": { "lat": 41.12, @@ -46,18 +56,17 @@ GET my_index/_search "lon": -71 }, "distance": "50km", - "optimize_bbox": "indexed" <2> + "optimize_bbox": "indexed" <1> } } } -------------------------------------------------- // CONSOLE -<1> Setting `lat_lon` to true indexes the geo-point in the `location.lat` and `location.lon` fields. -<2> The `indexed` option tells the geo-distance query to use the inverted index instead of the in-memory calculation. +// TEST[continued] +<1> The `indexed` option tells the geo-distance query to use the inverted index instead of the in-memory calculation. Whether the in-memory or indexed operation performs better depends both on your dataset and on the types of queries that you are running. NOTE: The `lat_lon` option only makes sense for single-value `geo_point` fields. It will not work with arrays of geo-points. - diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index b1b6b56c2b2..0b6214396c9 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -18,7 +18,7 @@ GET /_search { "query": { "function_score": { - "query": {}, + "query": { "match_all": {} }, "boost": "5", "random_score": {}, <1> "boost_mode":"multiply" @@ -40,17 +40,17 @@ GET /_search { "query": { "function_score": { - "query": {}, + "query": { "match_all": {} }, "boost": "5", <1> "functions": [ { - "filter": {}, + "filter": { "match": { "test": "bar" } }, "random_score": {}, <2> "weight": 23 }, { - "filter": {}, - "weight": 42 + "filter": { "match": { "test": "cat" } }, + "weight": 42 } ], "max_boost": 42, @@ -170,7 +170,7 @@ you wish to inhibit this, set `"boost_mode": "replace"` The `weight` score allows you to multiply the score by the provided `weight`. This can sometimes be desired since boost value set on specific queries gets normalized, while for this score function it does -not. The number value is of type float. +not. The number value is of type float. [source,js] -------------------------------------------------- diff --git a/docs/reference/query-dsl/indices-query.asciidoc b/docs/reference/query-dsl/indices-query.asciidoc index 8f2f958086e..112a779e3f9 100644 --- a/docs/reference/query-dsl/indices-query.asciidoc +++ b/docs/reference/query-dsl/indices-query.asciidoc @@ -1,6 +1,8 @@ [[query-dsl-indices-query]] === Indices Query +deprecated[5.0.0, Search on the '_index' field instead] + The `indices` query is useful in cases where a search is executed across multiple indices. It allows to specify a list of index names and an inner query that is only executed for indices matching names on that list. @@ -20,7 +22,8 @@ GET /_search } } -------------------------------------------------- -// CONSOLE +// CONSOLE +// TEST[warning:indices query is deprecated. Instead search on the '_index' field] You can use the `index` field to provide a single index. diff --git a/docs/reference/query-dsl/mlt-query.asciidoc b/docs/reference/query-dsl/mlt-query.asciidoc index b132b49f234..8e23afdbb86 100644 --- a/docs/reference/query-dsl/mlt-query.asciidoc +++ b/docs/reference/query-dsl/mlt-query.asciidoc @@ -6,7 +6,7 @@ set of documents. In order to do so, MLT selects a set of representative terms of these input documents, forms a query using these terms, executes the query and returns the results. The user controls the input documents, how the terms should be selected and how the query is formed. `more_like_this` can be -shortened to `mlt` deprecated[5.0.0,use `more_like_this` instead). +shortened to `mlt` deprecated[5.0.0,use `more_like_this` instead]. The simplest use case consists of asking for documents that are similar to a provided piece of text. Here, we are asking for all movies that have some text @@ -175,7 +175,7 @@ follows a similar syntax to the `per_field_analyzer` parameter of the Additionally, to provide documents not necessarily present in the index, <> are also supported. -`unlike`:: +`unlike`:: The `unlike` parameter is used in conjunction with `like` in order not to select terms found in a chosen set of documents. In other words, we could ask for documents `like: "Apple"`, but `unlike: "cake crumble tree"`. The syntax diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index a7a28cf88e8..f662dc825c0 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -57,7 +57,7 @@ GET /my_index/_search { "query": { "has_parent": { - "type": "blog_post", + "parent_type": "blog_post", "query": { "term": { "_id": "1" diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 0d079f6072c..2ccc84cabb9 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -19,7 +19,7 @@ PUT /my-index "doctype": { "properties": { "message": { - "type": "string" + "type": "keyword" } } }, diff --git a/docs/reference/query-dsl/prefix-query.asciidoc b/docs/reference/query-dsl/prefix-query.asciidoc index d2b75d10e5f..270fc925f0c 100644 --- a/docs/reference/query-dsl/prefix-query.asciidoc +++ b/docs/reference/query-dsl/prefix-query.asciidoc @@ -28,7 +28,7 @@ GET /_search -------------------------------------------------- // CONSOLE -Or : +Or with the `prefix` deprecated[5.0.0, Use `value`] syntax: [source,js] -------------------------------------------------- @@ -39,6 +39,7 @@ GET /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:Deprecated field [prefix] used, expected [value] instead] This multi term query allows you to control how it gets rewritten using the <> diff --git a/docs/reference/query-dsl/template-query.asciidoc b/docs/reference/query-dsl/template-query.asciidoc index f66dbe56d3e..b4b00e5babd 100644 --- a/docs/reference/query-dsl/template-query.asciidoc +++ b/docs/reference/query-dsl/template-query.asciidoc @@ -1,6 +1,8 @@ [[query-dsl-template-query]] === Template Query +deprecated[5.0.0, Use the <> API] + A query that accepts a query template and a map of key/value pairs to fill in template parameters. Templating is based on Mustache. For simple token substitution all you provide is a query containing some variable that you want to substitute and the actual @@ -21,6 +23,7 @@ GET /_search } ------------------------------------------ // CONSOLE +// TEST[warning:[template] query is deprecated, use search template api instead] The above request is translated into: @@ -54,6 +57,7 @@ GET /_search } ------------------------------------------ // CONSOLE +// TEST[warning:[template] query is deprecated, use search template api instead] <1> New line characters (`\n`) should be escaped as `\\n` or removed, and quotes (`"`) should be escaped as `\\"`. @@ -80,6 +84,7 @@ GET /_search } ------------------------------------------ // CONSOLE +// TEST[warning:[template] query is deprecated, use search template api instead] <1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`. @@ -113,11 +118,10 @@ GET /_search ------------------------------------------ // CONSOLE // TEST[continued] +// TEST[warning:[template] query is deprecated, use search template api instead] <1> Name of the query template in `config/scripts/`, i.e., `my_template.mustache`. There is also a dedicated `template` endpoint, allows you to template an entire search request. Please see <> for more details. - - diff --git a/docs/reference/search/request/highlighting.asciidoc b/docs/reference/search/request/highlighting.asciidoc index efb7053c179..2e7bc9f1805 100644 --- a/docs/reference/search/request/highlighting.asciidoc +++ b/docs/reference/search/request/highlighting.asciidoc @@ -52,9 +52,9 @@ It tries hard to reflect the query matching logic in terms of understanding word [WARNING] If you want to highlight a lot of fields in a lot of documents with complex queries this highlighter will not be fast. -In its efforts to accurately reflect query logic it creates a tiny in-memory index and re-runs the original query criteria through -Lucene's query execution planner to get access to low-level match information on the current document. -This is repeated for every field and every document that needs highlighting. If this presents a performance issue in your system consider using an alternative highlighter. +In its efforts to accurately reflect query logic it creates a tiny in-memory index and re-runs the original query criteria through +Lucene's query execution planner to get access to low-level match information on the current document. +This is repeated for every field and every document that needs highlighting. If this presents a performance issue in your system consider using an alternative highlighter. [[postings-highlighter]] ==== Postings highlighter @@ -387,7 +387,7 @@ GET /_search "match_phrase": { "content": { "query": "foo bar", - "phrase_slop": 1 + "slop": 1 } } }, @@ -413,7 +413,7 @@ GET /_search "match_phrase": { "content": { "query": "foo bar", - "phrase_slop": 1, + "slop": 1, "boost": 10.0 } } diff --git a/docs/reference/search/request/source-filtering.asciidoc b/docs/reference/search/request/source-filtering.asciidoc index 08625751eec..03c02538a0e 100644 --- a/docs/reference/search/request/source-filtering.asciidoc +++ b/docs/reference/search/request/source-filtering.asciidoc @@ -53,15 +53,16 @@ GET /_search -------------------------------------------------- // CONSOLE -Finally, for complete control, you can specify both include and exclude patterns: +Finally, for complete control, you can specify both `includes` and `excludes` +patterns: [source,js] -------------------------------------------------- GET /_search { "_source": { - "include": [ "obj1.*", "obj2.*" ], - "exclude": [ "*.description" ] + "includes": [ "obj1.*", "obj2.*" ], + "excludes": [ "*.description" ] }, "query" : { "term" : { "user" : "kimchy" } diff --git a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml index cf3e6883e45..2360dfc37f0 100644 --- a/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml +++ b/modules/lang-mustache/src/test/resources/rest-api-spec/test/lang_mustache/40_template_query.yaml @@ -1,5 +1,7 @@ --- "Template query": + - skip: + features: warnings - do: index: @@ -23,54 +25,72 @@ - match: { acknowledged: true } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": { "term": { "text": { "value": "{{template}}" } } }, "params": { "template": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "file": "file_query_template", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "id": "1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "id": "/mustache/1", "params": { "my_value": "value1" } } } } - match: { hits.total: 1 } - do: - search: + warnings: + - '[template] query is deprecated, use search template api instead' + search: body: { "query": { "template": { "inline": {"match_{{template}}": {}}, "params" : { "template" : "all" } } } } - match: { hits.total: 2 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{ \"term\": { \"text\": { \"value\": \"{{template}}\" } } }", "params": { "template": "value1" } } } } - match: { hits.total: 1 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{\"match_{{template}}\": {}}", "params" : { "template" : "all" } } } } - match: { hits.total: 2 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{\"match_all\": {}}", "params" : {} } } } - match: { hits.total: 2 } - do: + warnings: + - '[template] query is deprecated, use search template api instead' search: body: { "query": { "template": { "inline": "{\"query_string\": { \"query\" : \"{{query}}\" }}", "params" : { "query" : "text:\"value2 value3\"" } } } } - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc index 4e88cef4c9f..16b607f4a22 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc @@ -173,6 +173,25 @@ The argument to `catch` can be any of: If `catch` is specified, then the `response` var must be cleared, and the test should fail if no error is thrown. +If the arguments to `do` include `warnings` then we are expecting a `Warning` +header to come back from the request. If the arguments *don't* include a +`warnings` argument then we *don't* expect the response to include a `Warning` +header. The warnings must match exactly. Using it looks like this: + +.... + - do: + warnings: + - '[index] is deprecated' + - quotes are not required because yaml + - but this argument is always a list, never a single string + - no matter how many warnings you expect + get: + index: test + type: test + id: 1 +.... + + === `set` For some tests, it is necessary to extract a value from the previous `response`, in @@ -284,4 +303,3 @@ This depends on the datatype of the value being examined, eg: - length: { _tokens: 3 } # the `_tokens` array has 3 elements - length: { _source: 5 } # the `_source` hash has 5 keys .... - diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml index 85d22a4dc23..415a38f00c1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yaml @@ -41,7 +41,7 @@ body: query: more_like_this: - docs: + like: - _index: test_1 _type: test @@ -51,8 +51,8 @@ _index: test_1 _type: test _id: 2 - ids: - - 3 + - + _id: 3 include: true min_doc_freq: 0 min_term_freq: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml index 424153aa573..48857522cb8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/10_source_filtering.yaml @@ -72,7 +72,7 @@ setup: search: body: _source: - include: [ include.field1, include.field2 ] + includes: [ include.field1, include.field2 ] query: { match_all: {} } - match: { hits.hits.0._source.include.field1: v1 } - match: { hits.hits.0._source.include.field2: v2 } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java index 8f449274ea5..481ae752d05 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestResponse.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml; +import org.apache.http.Header; import org.apache.http.client.methods.HttpHead; import org.apache.http.util.EntityUtils; import org.elasticsearch.client.Response; @@ -25,6 +26,8 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; /** * Response obtained from a REST call, eagerly reads the response body into a string for later optional parsing. @@ -70,6 +73,19 @@ public class ClientYamlTestResponse { return response.getStatusLine().getReasonPhrase(); } + /** + * Get a list of all of the values of all warning headers returned in the response. + */ + public List getWarningHeaders() { + List warningHeaders = new ArrayList<>(); + for (Header header : response.getHeaders()) { + if (header.getName().equals("Warning")) { + warningHeaders.add(header.getValue()); + } + } + return warningHeaders; + } + /** * Returns the body properly parsed depending on the content type. * Might be a string or a json object parsed as a map. diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index df797dd53dd..6e47220ec08 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -41,6 +41,7 @@ public final class Features { "groovy_scripting", "headers", "stash_in_path", + "warnings", "yaml")); private Features() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java index 73fd57c3deb..e466c1010f6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/ClientYamlTestSuiteParseContext.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.test.rest.yaml.parser; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParseFieldMatcherSupplier; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; @@ -36,7 +38,7 @@ import java.util.Map; * Context shared across the whole tests parse phase. * Provides shared parse methods and holds information needed to parse the test sections (e.g. es version) */ -public class ClientYamlTestSuiteParseContext { +public class ClientYamlTestSuiteParseContext implements ParseFieldMatcherSupplier { private static final SetupSectionParser SETUP_SECTION_PARSER = new SetupSectionParser(); private static final TeardownSectionParser TEARDOWN_SECTION_PARSER = new TeardownSectionParser(); @@ -185,4 +187,9 @@ public class ClientYamlTestSuiteParseContext { Map.Entry entry = map.entrySet().iterator().next(); return Tuple.tuple(entry.getKey(), entry.getValue()); } + + @Override + public ParseFieldMatcher getParseFieldMatcher() { + return ParseFieldMatcher.STRICT; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java index eda0f728f93..b89aa821ec9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParser.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.test.rest.yaml.parser; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -25,9 +26,13 @@ import org.elasticsearch.test.rest.yaml.section.ApiCallSection; import org.elasticsearch.test.rest.yaml.section.DoSection; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; +import static java.util.Collections.unmodifiableList; + /** * Parser for do sections */ @@ -44,6 +49,7 @@ public class DoSectionParser implements ClientYamlTestFragmentParser DoSection doSection = new DoSection(parseContext.parser().getTokenLocation()); ApiCallSection apiCallSection = null; Map headers = new HashMap<>(); + List expectedWarnings = new ArrayList<>(); while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -52,6 +58,17 @@ public class DoSectionParser implements ClientYamlTestFragmentParser if ("catch".equals(currentFieldName)) { doSection.setCatch(parser.text()); } + } else if (token == XContentParser.Token.START_ARRAY) { + if ("warnings".equals(currentFieldName)) { + while ((token = parser.nextToken()) == XContentParser.Token.VALUE_STRING) { + expectedWarnings.add(parser.text()); + } + if (token != XContentParser.Token.END_ARRAY) { + throw new ParsingException(parser.getTokenLocation(), "[warnings] must be a string array but saw [" + token + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), "unknown array [" + currentFieldName + "]"); + } } else if (token == XContentParser.Token.START_OBJECT) { if ("headers".equals(currentFieldName)) { String headerName = null; @@ -97,6 +114,7 @@ public class DoSectionParser implements ClientYamlTestFragmentParser apiCallSection.addHeaders(headers); } doSection.setApiCallSection(apiCallSection); + doSection.setExpectedWarningHeaders(unmodifiableList(expectedWarnings)); } finally { parser.nextToken(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 4bad7f57a3a..af4a8e4f51a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -29,8 +29,12 @@ import org.elasticsearch.test.rest.yaml.ClientYamlTestResponseException; import java.io.IOException; import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; +import java.util.Set; +import static java.util.Collections.emptyList; import static org.elasticsearch.common.collect.Tuple.tuple; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; import static org.hamcrest.Matchers.allOf; @@ -49,6 +53,10 @@ import static org.junit.Assert.fail; * headers: * Authorization: Basic user:pass * Content-Type: application/json + * warnings: + * - Stuff is deprecated, yo + * - Don't use deprecated stuff + * - Please, stop. It hurts. * update: * index: test_1 * type: test @@ -63,6 +71,7 @@ public class DoSection implements ExecutableSection { private final XContentLocation location; private String catchParam; private ApiCallSection apiCallSection; + private List expectedWarningHeaders = emptyList(); public DoSection(XContentLocation location) { this.location = location; @@ -84,6 +93,22 @@ public class DoSection implements ExecutableSection { this.apiCallSection = apiCallSection; } + /** + * Warning headers that we expect from this response. If the headers don't match exactly this request is considered to have failed. + * Defaults to emptyList. + */ + public List getExpectedWarningHeaders() { + return expectedWarningHeaders; + } + + /** + * Set the warning headers that we expect from this response. If the headers don't match exactly this request is considered to have + * failed. Defaults to emptyList. + */ + public void setExpectedWarningHeaders(List expectedWarningHeaders) { + this.expectedWarningHeaders = expectedWarningHeaders; + } + @Override public XContentLocation getLocation() { return location; @@ -100,7 +125,7 @@ public class DoSection implements ExecutableSection { } try { - ClientYamlTestResponse restTestResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), + ClientYamlTestResponse response = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), apiCallSection.getBodies(), apiCallSection.getHeaders()); if (Strings.hasLength(catchParam)) { String catchStatusCode; @@ -111,8 +136,9 @@ public class DoSection implements ExecutableSection { } else { throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported"); } - fail(formatStatusCodeMessage(restTestResponse, catchStatusCode)); + fail(formatStatusCodeMessage(response, catchStatusCode)); } + checkWarningHeaders(response.getWarningHeaders()); } catch(ClientYamlTestResponseException e) { ClientYamlTestResponse restTestResponse = e.getRestTestResponse(); if (!Strings.hasLength(catchParam)) { @@ -135,6 +161,39 @@ public class DoSection implements ExecutableSection { } } + /** + * Check that the response contains only the warning headers that we expect. + */ + void checkWarningHeaders(List warningHeaders) { + StringBuilder failureMessage = null; + // LinkedHashSet so that missing expected warnings come back in a predictable order which is nice for testing + Set expected = new LinkedHashSet<>(expectedWarningHeaders); + for (String header : warningHeaders) { + if (expected.remove(header)) { + // Was expected, all good. + continue; + } + if (failureMessage == null) { + failureMessage = new StringBuilder("got unexpected warning headers ["); + } + failureMessage.append('\n').append(header); + } + if (false == expected.isEmpty()) { + if (failureMessage == null) { + failureMessage = new StringBuilder(); + } else { + failureMessage.append("\n] "); + } + failureMessage.append("didn't get expected warning headers ["); + for (String header : expected) { + failureMessage.append('\n').append(header); + } + } + if (failureMessage != null) { + fail(failureMessage + "\n]"); + } + } + private void assertStatusCode(ClientYamlTestResponse restTestResponse) { Tuple> stringMatcherTuple = catches.get(catchParam); assertThat(formatStatusCodeMessage(restTestResponse, stringMatcherTuple.v1()), diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java index 5d79432155f..0cecc508bf5 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/parser/DoSectionParserTests.java @@ -29,8 +29,10 @@ import org.elasticsearch.test.rest.yaml.section.DoSection; import org.hamcrest.MatcherAssert; import java.io.IOException; +import java.util.Arrays; import java.util.Map; +import static java.util.Collections.singletonList; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -344,11 +346,11 @@ public class DoSectionParserTests extends AbstractParserTestCase { public void testParseDoSectionWithHeaders() throws Exception { parser = YamlXContent.yamlXContent.createParser( "headers:\n" + - " Authorization: \"thing one\"\n" + - " Content-Type: \"application/json\"\n" + - "indices.get_warmer:\n" + - " index: test_index\n" + - " name: test_warmer" + " Authorization: \"thing one\"\n" + + " Content-Type: \"application/json\"\n" + + "indices.get_warmer:\n" + + " index: test_index\n" + + " name: test_warmer" ); DoSectionParser doSectionParser = new DoSectionParser(); @@ -381,9 +383,9 @@ public class DoSectionParserTests extends AbstractParserTestCase { public void testParseDoSectionMultivaluedField() throws Exception { parser = YamlXContent.yamlXContent.createParser( "indices.get_field_mapping:\n" + - " index: test_index\n" + - " type: test_type\n" + - " field: [ text , text1 ]" + " index: test_index\n" + + " type: test_type\n" + + " field: [ text , text1 ]" ); DoSectionParser doSectionParser = new DoSectionParser(); @@ -400,6 +402,46 @@ public class DoSectionParserTests extends AbstractParserTestCase { assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); } + public void testParseDoSectionExpectedWarnings() throws Exception { + parser = YamlXContent.yamlXContent.createParser( + "indices.get_field_mapping:\n" + + " index: test_index\n" + + " type: test_type\n" + + "warnings:\n" + + " - some test warning they are typically pretty long\n" + + " - some other test warning somtimes they have [in] them" + ); + + DoSectionParser doSectionParser = new DoSectionParser(); + DoSection doSection = doSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); + + assertThat(doSection.getCatch(), nullValue()); + assertThat(doSection.getApiCallSection(), notNullValue()); + assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); + assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); + assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); + assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); + assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); + assertThat(doSection.getExpectedWarningHeaders(), equalTo(Arrays.asList( + "some test warning they are typically pretty long", + "some other test warning somtimes they have [in] them"))); + + parser = YamlXContent.yamlXContent.createParser( + "indices.get_field_mapping:\n" + + " index: test_index\n" + + "warnings:\n" + + " - just one entry this time" + ); + + doSection = doSectionParser.parse(new ClientYamlTestSuiteParseContext("api", "suite", parser)); + assertThat(doSection.getCatch(), nullValue()); + assertThat(doSection.getApiCallSection(), notNullValue()); + assertThat(doSection.getExpectedWarningHeaders(), equalTo(singletonList( + "just one entry this time"))); + + } + private static void assertJsonEquals(Map actual, String expected) throws IOException { Map expectedMap; try (XContentParser parser = JsonXContent.jsonXContent.createParser(expected)) { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java new file mode 100644 index 00000000000..e981b2d999c --- /dev/null +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +public class DoSectionTests extends ESTestCase { + public void testWarningHeaders() throws IOException { + DoSection section = new DoSection(new XContentLocation(1, 1)); + + // No warning headers doesn't throw an exception + section.checkWarningHeaders(emptyList()); + + // Any warning headers fail + AssertionError e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(singletonList("test"))); + assertEquals("got unexpected warning headers [\ntest\n]", e.getMessage()); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(Arrays.asList("test", "another", "some more"))); + assertEquals("got unexpected warning headers [\ntest\nanother\nsome more\n]", e.getMessage()); + + // But not when we expect them + section.setExpectedWarningHeaders(singletonList("test")); + section.checkWarningHeaders(singletonList("test")); + section.setExpectedWarningHeaders(Arrays.asList("test", "another", "some more")); + section.checkWarningHeaders(Arrays.asList("test", "another", "some more")); + + // But if you don't get some that you did expect, that is an error + section.setExpectedWarningHeaders(singletonList("test")); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(emptyList())); + assertEquals("didn't get expected warning headers [\ntest\n]", e.getMessage()); + section.setExpectedWarningHeaders(Arrays.asList("test", "another", "some more")); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(emptyList())); + assertEquals("didn't get expected warning headers [\ntest\nanother\nsome more\n]", e.getMessage()); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(Arrays.asList("test", "some more"))); + assertEquals("didn't get expected warning headers [\nanother\n]", e.getMessage()); + + // It is also an error if you get some warning you want and some you don't want + section.setExpectedWarningHeaders(Arrays.asList("test", "another", "some more")); + e = expectThrows(AssertionError.class, () -> section.checkWarningHeaders(Arrays.asList("test", "cat"))); + assertEquals("got unexpected warning headers [\ncat\n] didn't get expected warning headers [\nanother\nsome more\n]", + e.getMessage()); + } +} From e08f11dabc3ca04fbcded026aee512696dfe835d Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Thu, 4 Aug 2016 22:47:39 +0200 Subject: [PATCH 19/20] Remove BWC serialization logic for pre 2.2 nodes (#19810) This change removes all pre 2.2 logic from InternalSearchResponse serialization. It's unneeded in 5.0 since we require full cluster restart --- .../internal/InternalSearchResponse.java | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index 26410cc9680..09a787ac3cb 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -136,14 +136,8 @@ public class InternalSearchResponse implements Streamable, ToXContent { suggest = Suggest.readSuggest(in); } timedOut = in.readBoolean(); - terminatedEarly = in.readOptionalBoolean(); - - if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { - profileResults = new SearchProfileShardResults(in); - } else { - profileResults = null; - } + profileResults = in.readOptionalWriteable(SearchProfileShardResults::new); } @Override @@ -162,16 +156,7 @@ public class InternalSearchResponse implements Streamable, ToXContent { suggest.writeTo(out); } out.writeBoolean(timedOut); - out.writeOptionalBoolean(terminatedEarly); - - if (out.getVersion().onOrAfter(Version.V_2_2_0)) { - if (profileResults == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - profileResults.writeTo(out); - } - } + out.writeOptionalWriteable(profileResults); } } From 609a199bd49efb6542fd167f0591c9d6a2847727 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Fri, 5 Aug 2016 08:58:03 +0200 Subject: [PATCH 20/20] Upon being elected as master, prefer joins' node info to existing cluster state (#19743) When we introduces [persistent node ids](https://github.com/elastic/elasticsearch/pull/19140) we were concerned that people may copy data folders from one to another resulting in two nodes competing for the same id in the cluster. To solve this we elected to not allow an incoming join if a different with same id already exists in the cluster, or if some other node already has the same transport address as the incoming join. The rationeel there was that it is better to prefer existing nodes and that we can rely on node fault detection to remove any node from the cluster that isn't correct any more, making room for the node that wants to join (and will keep trying). Sadly there were two problems with this: 1) One minor and easy to fix - we didn't allow for the case where the existing node can have the same network address as the incoming one, but have a different ephemeral id (after node restart). This confused the logic in `AllocationService`, in this rare cases. The cluster is good enough to detect this and recover later on, but it's not clean. 2) The assumption that Node Fault Detection will clean up is *wrong* when the node just won an election (it wasn't master before) and needs to process the incoming joins in order to commit the cluster state and assume it's mastership. In those cases, the Node Fault Detection isn't active. This PR fixes these two and prefers incoming nodes to existing node when finishing an election. On top of the, on request by @ywelsch , `AllocationService` synchronization between the nodes of the cluster and it's routing table is now explicit rather than something we do all the time. The same goes for promotion of replicas to primaries. --- .../allocation/AllocationBenchmark.java | 2 +- .../elasticsearch/cluster/ClusterState.java | 5 +- .../cluster/node/DiscoveryNodes.java | 32 ++-- .../routing/allocation/AllocationService.java | 36 +++- .../cluster/service/ClusterService.java | 2 +- .../discovery/local/LocalDiscovery.java | 8 +- .../discovery/zen/NodeJoinController.java | 38 +++- .../discovery/zen/ZenDiscovery.java | 3 +- .../org/elasticsearch/tribe/TribeService.java | 2 +- .../node/tasks/TransportTasksActionTests.java | 2 +- .../cluster/reroute/ClusterRerouteTests.java | 2 +- .../shrink/TransportShrinkActionTests.java | 4 +- .../ingest/IngestProxyActionFilterTests.java | 2 +- .../TransportMultiSearchActionTests.java | 10 +- .../TransportBroadcastByNodeActionTests.java | 2 +- .../nodes/TransportNodesActionTests.java | 2 +- .../ClusterStateCreationUtils.java | 6 +- .../TransportClientHeadersTests.java | 2 +- .../cluster/ClusterChangedEventTests.java | 2 +- .../cluster/ClusterStateDiffIT.java | 6 +- .../cluster/ClusterStateTests.java | 2 +- .../cluster/NodeConnectionsServiceTests.java | 2 +- ...rdFailedClusterStateTaskExecutorTests.java | 2 +- .../MetaDataCreateIndexServiceTests.java | 4 +- .../cluster/node/DiscoveryNodesTests.java | 6 +- .../DelayedAllocationServiceTests.java | 32 ++-- .../cluster/routing/PrimaryTermsTests.java | 4 +- .../cluster/routing/RoutingTableTests.java | 2 +- .../cluster/routing/UnassignedInfoTests.java | 24 +-- .../allocation/ActiveAllocationIdTests.java | 8 +- .../allocation/AddIncrementallyTests.java | 10 +- .../allocation/AllocationCommandsTests.java | 19 +- .../allocation/AllocationPriorityTests.java | 2 +- .../allocation/AwarenessAllocationTests.java | 98 +++++----- .../allocation/BalanceConfigurationTests.java | 14 +- .../allocation/CatAllocationTestCase.java | 2 +- .../ClusterRebalanceRoutingTests.java | 32 ++-- .../ConcurrentRebalanceRoutingTests.java | 4 +- .../allocation/DeadNodesAllocationTests.java | 32 ++-- .../DecisionsImpactOnClusterHealthTests.java | 2 +- ...ReplicaAsPrimaryDuringRelocationTests.java | 6 +- .../ExpectedShardSizeAllocationTests.java | 6 +- .../allocation/FailedNodeRoutingTests.java | 4 +- .../allocation/FailedShardsRoutingTests.java | 22 +-- .../FilterAllocationDeciderTests.java | 2 +- .../allocation/FilterRoutingTests.java | 18 +- .../routing/allocation/IndexBalanceTests.java | 10 +- .../MaxRetryAllocationDeciderTests.java | 4 +- .../NodeVersionAllocationDeciderTests.java | 38 ++-- ...alPrimariesToRelocatingPrimariesTests.java | 8 +- .../PreferPrimaryAllocationTests.java | 2 +- .../PrimaryElectionRoutingTests.java | 14 +- ...yNotRelocatedWhileBeingRecoveredTests.java | 6 +- .../RandomAllocationDeciderTests.java | 12 +- .../allocation/RebalanceAfterActiveTests.java | 4 +- .../ReplicaAllocatedAfterPrimaryTests.java | 2 +- .../RoutingNodesIntegrityTests.java | 12 +- .../allocation/SameShardRoutingTests.java | 6 +- .../allocation/ShardVersioningTests.java | 2 +- .../ShardsLimitAllocationTests.java | 8 +- .../SingleShardNoReplicasRoutingTests.java | 18 +- .../SingleShardOneReplicaRoutingTests.java | 8 +- .../allocation/StartedShardsRoutingTests.java | 2 +- .../TenShardsOneReplicaRoutingTests.java | 6 +- .../allocation/ThrottlingAllocationTests.java | 18 +- .../UpdateNumberOfReplicasTests.java | 4 +- .../decider/DiskThresholdDeciderTests.java | 38 ++-- .../DiskThresholdDeciderUnitTests.java | 10 +- .../decider/EnableAllocationTests.java | 32 ++-- .../ClusterSerializationTests.java | 4 +- .../ClusterStateToStringTests.java | 2 +- .../structure/RoutingIteratorTests.java | 19 +- .../discovery/ZenFaultDetectionTests.java | 8 +- .../zen/NodeJoinControllerTests.java | 177 +++++++++++++----- ...eRemovalClusterStateTaskExecutorTests.java | 14 +- .../discovery/zen/ZenDiscoveryIT.java | 2 +- .../discovery/zen/ZenDiscoveryUnitTests.java | 6 +- .../zen/ping/unicast/UnicastZenPingIT.java | 8 +- .../PendingClusterStatesQueueTests.java | 2 +- .../PublishClusterStateActionTests.java | 38 ++-- .../gateway/AsyncShardFetchTests.java | 14 +- .../gateway/GatewayMetaStateTests.java | 4 +- .../gateway/PrimaryShardAllocatorTests.java | 10 +- .../gateway/ReplicaShardAllocatorTests.java | 4 +- .../indices/cluster/ClusterStateChanges.java | 9 +- ...ClusterStateServiceRandomUpdatesTests.java | 4 +- .../indices/state/RareClusterStateIT.java | 5 +- .../indices/store/IndicesStoreTests.java | 12 +- 88 files changed, 646 insertions(+), 487 deletions(-) diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java index 5e5f35f6040..86902b380c8 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -145,7 +145,7 @@ public class AllocationBenchmark { RoutingTable routingTable = rb.build(); DiscoveryNodes.Builder nb = DiscoveryNodes.builder(); for (int i = 1; i <= numNodes; i++) { - nb.put(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags)))); + nb.add(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags)))); } initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).nodes diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index 6745900057d..632179153f9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -625,6 +625,10 @@ public class ClusterState implements ToXContent, Diffable { return this; } + public DiscoveryNodes nodes() { + return nodes; + } + public Builder routingResult(RoutingAllocation.Result routingResult) { this.routingTable = routingResult.routingTable(); this.metaData = routingResult.metaData(); @@ -723,7 +727,6 @@ public class ClusterState implements ToXContent, Diffable { public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { return PROTO.readFrom(in, localNode); } - } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 68dedc433da..9d0edf7b910 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -357,14 +357,14 @@ public class DiscoveryNodes extends AbstractDiffable implements Builder builder = new Builder().masterNodeId(masterNodeId).localNodeId(localNodeId); for (DiscoveryNode node : this) { if (newNodes.contains(node.getId())) { - builder.put(node); + builder.add(node); } } return builder.build(); } public DiscoveryNodes newNode(DiscoveryNode node) { - return new Builder(this).put(node).build(); + return new Builder(this).add(node).build(); } /** @@ -554,8 +554,8 @@ public class DiscoveryNodes extends AbstractDiffable implements node = localNode; } // some one already built this and validated it's OK, skip the n2 scans - assert builder.validatePut(node) == null : "building disco nodes from network doesn't pass preflight: " - + builder.validatePut(node); + assert builder.validateAdd(node) == null : "building disco nodes from network doesn't pass preflight: " + + builder.validateAdd(node); builder.putUnsafe(node); } return builder.build(); @@ -592,10 +592,10 @@ public class DiscoveryNodes extends AbstractDiffable implements /** * adds a disco node to the builder. Will throw an {@link IllegalArgumentException} if - * the supplied node doesn't pass the pre-flight checks performed by {@link #validatePut(DiscoveryNode)} + * the supplied node doesn't pass the pre-flight checks performed by {@link #validateAdd(DiscoveryNode)} */ - public Builder put(DiscoveryNode node) { - final String preflight = validatePut(node); + public Builder add(DiscoveryNode node) { + final String preflight = validateAdd(node); if (preflight != null) { throw new IllegalArgumentException(preflight); } @@ -603,6 +603,16 @@ public class DiscoveryNodes extends AbstractDiffable implements return this; } + /** + * Get a node by its id + * + * @param nodeId id of the wanted node + * @return wanted node if it exists. Otherwise null + */ + @Nullable public DiscoveryNode get(String nodeId) { + return nodes.get(nodeId); + } + private void putUnsafe(DiscoveryNode node) { nodes.put(node.getId(), node); } @@ -635,10 +645,10 @@ public class DiscoveryNodes extends AbstractDiffable implements * * @return null if all is OK or an error message explaining why a node can not be added. * - * Note: if this method returns a non-null value, calling {@link #put(DiscoveryNode)} will fail with an + * Note: if this method returns a non-null value, calling {@link #add(DiscoveryNode)} will fail with an * exception */ - private String validatePut(DiscoveryNode node) { + private String validateAdd(DiscoveryNode node) { for (ObjectCursor cursor : nodes.values()) { final DiscoveryNode existingNode = cursor.value; if (node.getAddress().equals(existingNode.getAddress()) && @@ -646,9 +656,9 @@ public class DiscoveryNodes extends AbstractDiffable implements return "can't add node " + node + ", found existing node " + existingNode + " with same address"; } if (node.getId().equals(existingNode.getId()) && - node.getAddress().equals(existingNode.getAddress()) == false) { + node.equals(existingNode) == false) { return "can't add node " + node + ", found existing node " + existingNode - + " with the same id, but a different address"; + + " with the same id but is a different node instance"; } } return null; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index f58bc22c63f..8745007e44f 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -249,11 +249,36 @@ public class AllocationService extends AbstractComponent { applyFailedShard(allocation, failedShard, unassignedInfo); } gatewayAllocator.applyFailedShards(allocation); + reroute(allocation); String failedShardsAsString = firstListElementsToCommaDelimitedString(failedShards, s -> s.routingEntry.shardId().toString()); return buildResultAndLogHealthChange(allocation, "shards failed [" + failedShardsAsString + "] ..."); } + /** + * unassigned an shards that are associated with nodes that are no longer part of the cluster, potentially promoting replicas + * if needed. + */ + public RoutingAllocation.Result deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { + RoutingNodes routingNodes = getMutableRoutingNodes(clusterState); + // shuffle the unassigned nodes, just so we won't have things like poison failed shards + routingNodes.unassigned().shuffle(); + RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState, + clusterInfoService.getClusterInfo(), currentNanoTime(), false); + + // first, clear from the shards any node id they used to belong to that is now dead + boolean changed = deassociateDeadNodes(allocation); + + if (reroute) { + changed |= reroute(allocation); + } + + if (!changed) { + return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); + } + return buildResultAndLogHealthChange(allocation, reason); + } + /** * Removes delay markers from unassigned shards based on current time stamp. Returns true if markers were removed. */ @@ -352,13 +377,9 @@ public class AllocationService extends AbstractComponent { } private boolean reroute(RoutingAllocation allocation) { - boolean changed = false; - // first, clear from the shards any node id they used to belong to that is now dead - changed |= deassociateDeadNodes(allocation); + assert deassociateDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See deassociateDeadNodes"; - // elect primaries *before* allocating unassigned, so backups of primaries that failed - // will be moved to primary state and not wait for primaries to be allocated and recovered (*from gateway*) - changed |= electPrimariesAndUnassignedDanglingReplicas(allocation); + boolean changed = electPrimariesAndUnassignedDanglingReplicas(allocation); // now allocate all the unassigned to available nodes if (allocation.routingNodes().unassigned().size() > 0) { @@ -390,8 +411,8 @@ public class AllocationService extends AbstractComponent { if (candidate != null) { shardEntry = unassignedIterator.demotePrimaryToReplicaShard(); ShardRouting primarySwappedCandidate = routingNodes.promoteAssignedReplicaShardToPrimary(candidate); + changed = true; if (primarySwappedCandidate.relocatingNodeId() != null) { - changed = true; // its also relocating, make sure to move the other routing to primary RoutingNode node = routingNodes.node(primarySwappedCandidate.relocatingNodeId()); if (node != null) { @@ -406,7 +427,6 @@ public class AllocationService extends AbstractComponent { IndexMetaData index = allocation.metaData().getIndexSafe(primarySwappedCandidate.index()); if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) { routingNodes.reinitShadowPrimary(primarySwappedCandidate); - changed = true; } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index 2dce2c82d90..21e2defd9b0 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -156,7 +156,7 @@ public class ClusterService extends AbstractLifecycleComponent { public synchronized void setLocalNode(DiscoveryNode localNode) { assert clusterState.nodes().getLocalNodeId() == null : "local node is already set"; - DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).put(localNode).localNodeId(localNode.getId()); + DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()); this.clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java index 790d40b8f59..c544db4047f 100644 --- a/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/local/LocalDiscovery.java @@ -134,7 +134,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.put(discovery.localNode()); + nodesBuilder.add(discovery.localNode()); } nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); // remove the NO_MASTER block in this case @@ -160,7 +160,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov public ClusterState execute(ClusterState currentState) { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) { - nodesBuilder.put(discovery.localNode()); + nodesBuilder.add(discovery.localNode()); } nodesBuilder.localNodeId(master.localNode().getId()).masterNodeId(master.localNode().getId()); currentState = ClusterState.builder(currentState).nodes(nodesBuilder).build(); @@ -231,8 +231,8 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov } // reroute here, so we eagerly remove dead nodes from the routing ClusterState updatedState = ClusterState.builder(currentState).nodes(newNodes).build(); - RoutingAllocation.Result routingResult = master.allocationService.reroute( - ClusterState.builder(updatedState).build(), "elected as master"); + RoutingAllocation.Result routingResult = master.allocationService.deassociateDeadNodes( + ClusterState.builder(updatedState).build(), true, "node stopped"); return ClusterState.builder(updatedState).routingResult(routingResult).build(); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index ff5cdd4e31e..1f4f57c4ed4 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -413,8 +413,7 @@ public class NodeJoinController extends AbstractComponent { final DiscoveryNodes currentNodes = currentState.nodes(); boolean nodesChanged = false; - ClusterState.Builder newState = ClusterState.builder(currentState); - DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); + ClusterState.Builder newState; if (joiningNodes.size() == 1 && joiningNodes.get(0).equals(FINISH_ELECTION_TASK)) { return results.successes(joiningNodes).build(currentState); @@ -423,16 +422,17 @@ public class NodeJoinController extends AbstractComponent { // use these joins to try and become the master. // Note that we don't have to do any validation of the amount of joining nodes - the commit // during the cluster state publishing guarantees that we have enough - nodesBuilder.masterNodeId(currentNodes.getLocalNodeId()); - ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) - .removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); - newState.blocks(clusterBlocks); + newState = becomeMasterAndTrimConflictingNodes(currentState, joiningNodes); nodesChanged = true; - } else if (nodesBuilder.isLocalNodeElectedMaster() == false) { + } else if (currentNodes.isLocalNodeElectedMaster() == false) { logger.trace("processing node joins, but we are not the master. current master: {}", currentNodes.getMasterNode()); throw new NotMasterException("Node [" + currentNodes.getLocalNode() + "] not master for join request"); + } else { + newState = ClusterState.builder(currentState); } + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); + assert nodesBuilder.isLocalNodeElectedMaster(); // processing any joins @@ -443,7 +443,7 @@ public class NodeJoinController extends AbstractComponent { logger.debug("received a join request for an existing node [{}]", node); } else { try { - nodesBuilder.put(node); + nodesBuilder.add(node); nodesChanged = true; } catch (IllegalArgumentException e) { results.failure(node, e); @@ -468,6 +468,28 @@ public class NodeJoinController extends AbstractComponent { return results.build(newState.build()); } + private ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState currentState, List joiningNodes) { + assert currentState.nodes().getMasterNodeId() == null : currentState.prettyPrint(); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()); + nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); + ClusterBlocks clusterBlocks = ClusterBlocks.builder().blocks(currentState.blocks()) + .removeGlobalBlock(discoverySettings.getNoMasterBlock()).build(); + for (final DiscoveryNode joiningNode : joiningNodes) { + final DiscoveryNode existingNode = nodesBuilder.get(joiningNode.getId()); + if (existingNode != null && existingNode.equals(joiningNode) == false) { + logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", existingNode, joiningNode); + nodesBuilder.remove(existingNode.getId()); + } + } + + // now trim any left over dead nodes - either left there when the previous master stepped down + // or removed by us above + ClusterState tmpState = ClusterState.builder(currentState).nodes(nodesBuilder).blocks(clusterBlocks).build(); + RoutingAllocation.Result result = allocationService.deassociateDeadNodes(tmpState, false, + "removed dead nodes on election"); + return ClusterState.builder(tmpState).routingResult(result); + } + @Override public boolean runOnlyOnMaster() { // we validate that we are allowed to change the cluster state during cluster state processing diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 3b176f7eff9..d380b1fd601 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -570,7 +570,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) { return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes")); } else { - final RoutingAllocation.Result routingResult = allocationService.reroute(remainingNodesClusterState, describeTasks(tasks)); + final RoutingAllocation.Result routingResult = + allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks)); return resultBuilder.build(ClusterState.builder(remainingNodesClusterState).routingResult(routingResult).build()); } } diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index eb44a897386..40c805e0b00 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -382,7 +382,7 @@ public class TribeService extends AbstractLifecycleComponent { clusterStateChanged = true; logger.info("[{}] adding node [{}]", tribeName, discoNode); nodes.remove(tribe.getId()); // remove any existing node with the same id but different ephemeral id - nodes.put(discoNode); + nodes.add(discoNode); } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 2c78786ab04..955957abfb6 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -739,7 +739,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { // First group by node DiscoveryNodes.Builder discoNodes = DiscoveryNodes.builder(); for (TestNode testNode : this.testNodes) { - discoNodes.put(testNode.discoveryNode); + discoNodes.add(testNode.discoveryNode); } response.setDiscoveryNodes(discoNodes.build()); Map byNodes = serialize(response, new ToXContent.MapParams(Collections.singletonMap("group_by", "nodes"))); diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java index a88c32aa43d..b3b91e6bfd8 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteTests.java @@ -166,7 +166,7 @@ public class ClusterRerouteTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING .getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); RoutingTable prevRoutingTable = routingTable; routingTable = service.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java index 3fcade05839..49d0ce447ba 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java @@ -93,7 +93,7 @@ public class TransportShrinkActionTests extends ESTestCase { // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState("source", randomIntBetween(2, 10), 0, - Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), @@ -116,7 +116,7 @@ public class TransportShrinkActionTests extends ESTestCase { ClusterState clusterState = ClusterState.builder(createClusterState(indexName, randomIntBetween(2, 10), 0, Settings.builder() .put("index.blocks.write", true) - .build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), diff --git a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java index 3d1a1a1c69d..a602465197a 100644 --- a/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java +++ b/core/src/test/java/org/elasticsearch/action/ingest/IngestProxyActionFilterTests.java @@ -79,7 +79,7 @@ public class IngestProxyActionFilterTests extends ESTestCase { roles.add(DiscoveryNode.Role.INGEST); } DiscoveryNode node = new DiscoveryNode(nodeId, nodeId, LocalTransportAddress.buildUnique(), attributes, roles, VersionUtils.randomVersion(random())); - builder.put(node); + builder.add(node); if (i == totalNodes - 1) { localNode = node; } diff --git a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index d751424ef72..011fb172514 100644 --- a/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -37,16 +37,12 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; import java.util.Collections; -import java.util.Iterator; -import java.util.List; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.sameInstance; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -117,12 +113,12 @@ public class TransportMultiSearchActionTests extends ESTestCase { int numDataNodes = randomIntBetween(1, 10); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (int i = 0; i < numDataNodes; i++) { - builder.put(new DiscoveryNode("_id" + i, new LocalTransportAddress("_id" + i), Collections.emptyMap(), + builder.add(new DiscoveryNode("_id" + i, new LocalTransportAddress("_id" + i), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT)); } - builder.put(new DiscoveryNode("master", new LocalTransportAddress("mater"), Collections.emptyMap(), + builder.add(new DiscoveryNode("master", new LocalTransportAddress("mater"), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT)); - builder.put(new DiscoveryNode("ingest", new LocalTransportAddress("ingest"), Collections.emptyMap(), + builder.add(new DiscoveryNode("ingest", new LocalTransportAddress("ingest"), Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.INGEST), Version.CURRENT)); ClusterState state = ClusterState.builder(new ClusterName("_name")).nodes(builder).build(); diff --git a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 603ad664ec3..7b237383034 100644 --- a/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -220,7 +220,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase { int totalIndexShards = 0; for (int i = 0; i < numberOfNodes; i++) { final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); int numberOfShards = randomIntBetween(1, 10); totalIndexShards += numberOfShards; for (int j = 0; j < numberOfShards; j++) { diff --git a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index ae8ea4a0b95..744a116f4a7 100644 --- a/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -190,7 +190,7 @@ public class TransportNodesActionTests extends ESTestCase { attributes.put("custom", randomBoolean() ? "match" : randomAsciiOfLengthBetween(3, 5)); } final DiscoveryNode node = newNode(i, attributes, roles); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); discoveryNodes.add(node); } discoBuilder.localNodeId(randomFrom(discoveryNodes).getId()); diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 7496bb85faf..813e4f630c4 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -81,7 +81,7 @@ public class ClusterStateCreationUtils { Set unassignedNodes = new HashSet<>(); for (int i = 0; i < numberOfNodes + 1; i++) { final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); unassignedNodes.add(node.getId()); } discoBuilder.localNodeId(newNode(0).getId()); @@ -153,7 +153,7 @@ public class ClusterStateCreationUtils { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes + 1; i++) { final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(newNode(0).getId()); discoBuilder.masterNodeId(newNode(1).getId()); // we need a non-local master to test shard failures @@ -241,7 +241,7 @@ public class ClusterStateCreationUtils { public static ClusterState state(DiscoveryNode localNode, DiscoveryNode masterNode, DiscoveryNode... allNodes) { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); for (DiscoveryNode node : allNodes) { - discoBuilder.put(node); + discoBuilder.add(node); } if (masterNode != null) { discoBuilder.masterNodeId(masterNode.getId()); diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index 282f929ff24..e736e4b86a1 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -131,7 +131,7 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { ClusterName cluster1 = new ClusterName("cluster1"); ClusterState.Builder builder = ClusterState.builder(cluster1); //the sniffer detects only data nodes - builder.nodes(DiscoveryNodes.builder().put(new DiscoveryNode("node_id", address, Collections.emptyMap(), + builder.nodes(DiscoveryNodes.builder().add(new DiscoveryNode("node_id", address, Collections.emptyMap(), Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT))); ((TransportResponseHandler) handler) .handleResponse(new ClusterStateResponse(cluster1, builder.build())); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 555f23813cb..6326d96f317 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -310,7 +310,7 @@ public class ClusterChangedEventTests extends ESTestCase { } } final DiscoveryNode node = newNode(nodeId, roles); - builder.put(node); + builder.add(node); if (i == localNodeIndex) { builder.localNodeId(nodeId); } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 9ffabec6fc0..1a7a2093a58 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -78,7 +78,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase { emptyMap(), emptySet(), Version.CURRENT); DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), emptyMap(), emptySet(), Version.CURRENT); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.getId()).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build(); ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode); @@ -193,14 +193,14 @@ public class ClusterStateDiffIT extends ESIntegTestCase { if (nodeId.startsWith("node-")) { nodes.remove(nodeId); if (randomBoolean()) { - nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(), + nodes.add(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(), emptySet(), randomVersion(random()))); } } } int additionalNodeCount = randomIntBetween(1, 20); for (int i = 0; i < additionalNodeCount; i++) { - nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), + nodes.add(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), emptyMap(), emptySet(), randomVersion(random()))); } return ClusterState.builder(clusterState).nodes(nodes); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 6b99e525cb2..b0cba5bf1de 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -35,7 +35,7 @@ public class ClusterStateTests extends ESTestCase { final Version version = Version.CURRENT; final DiscoveryNode node1 = new DiscoveryNode("node1", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); final DiscoveryNode node2 = new DiscoveryNode("node2", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), version); - final DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build(); + final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); ClusterState noMaster1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); ClusterState noMaster2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java index b0bc3ee2e4e..ed7a20dc87e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/NodeConnectionsServiceTests.java @@ -73,7 +73,7 @@ public class NodeConnectionsServiceTests extends ESTestCase { private ClusterState clusterStateFromNodes(List nodes) { final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); for (DiscoveryNode node : nodes) { - builder.put(node); + builder.add(node); } return ClusterState.builder(new ClusterName("test")).nodes(builder).build(); } diff --git a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 31197e0a9a4..1f98f2cdc93 100644 --- a/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -154,7 +154,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa private ClusterState createClusterStateWithStartedShards(String reason) { int numberOfNodes = 1 + numberOfReplicas; DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); - IntStream.rangeClosed(1, numberOfNodes).mapToObj(node -> newNode("node" + node)).forEach(nodes::put); + IntStream.rangeClosed(1, numberOfNodes).mapToObj(node -> newNode("node" + node)).forEach(nodes::add); ClusterState stateAfterAddingNode = ClusterState.builder(clusterState).nodes(nodes).build(); RoutingTable afterReroute = diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index f6f7aaf3228..f1f20511fcc 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -129,7 +129,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { // create one that won't fail ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, - Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), @@ -157,7 +157,7 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase { .put("index.blocks.write", true) .put("index.similarity.default.type", "BM25") .put("index.analysis.analyzer.my_analyzer.tokenizer", "keyword") - .build())).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + .build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index ec741a908c5..d6a83108d0f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -123,13 +123,13 @@ public class DiscoveryNodesTests extends ESTestCase { DiscoveryNode masterB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB); DiscoveryNodes.Builder builderA = DiscoveryNodes.builder(); - nodesA.stream().forEach(builderA::put); + nodesA.stream().forEach(builderA::add); final String masterAId = masterA == null ? null : masterA.getId(); builderA.masterNodeId(masterAId); builderA.localNodeId(RandomPicks.randomFrom(random(), nodesA).getId()); DiscoveryNodes.Builder builderB = DiscoveryNodes.builder(); - nodesB.stream().forEach(builderB::put); + nodesB.stream().forEach(builderB::add); final String masterBId = masterB == null ? null : masterB.getId(); builderB.masterNodeId(masterBId); builderB.localNodeId(RandomPicks.randomFrom(random(), nodesB).getId()); @@ -186,7 +186,7 @@ public class DiscoveryNodesTests extends ESTestCase { DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); List nodesList = randomNodes(numNodes); for (DiscoveryNode node : nodesList) { - discoBuilder = discoBuilder.put(node); + discoBuilder = discoBuilder.add(node); } discoBuilder.localNodeId(randomFrom(nodesList).getId()); discoBuilder.masterNodeId(randomFrom(nodesList).getId()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java index 14f4ccf5581..8ce039e9260 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationServiceTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; @@ -30,12 +29,9 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -45,8 +41,6 @@ import org.junit.Before; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.singleton; @@ -96,7 +90,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).localNodeId("node1").masterNodeId("node1")) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1")) .build(); clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); // starting primaries @@ -113,10 +107,11 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()).remove("node2"); boolean nodeAvailableForAllocation = randomBoolean(); if (nodeAvailableForAllocation) { - nodes.put(newNode("node3")); + nodes.add(newNode("node3")); } clusterState = ClusterState.builder(clusterState).nodes(nodes).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build(); ClusterState newState = clusterState; List unassignedShards = newState.getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED); if (nodeAvailableForAllocation) { @@ -142,7 +137,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).localNodeId("node1").masterNodeId("node1")) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).localNodeId("node1").masterNodeId("node1")) .build(); final long baseTimestampNanos = System.nanoTime(); allocationService.setNanoTimeOverride(baseTimestampNanos); @@ -169,7 +164,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { // remove node that has replica and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(nodeId)).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build(); ClusterState stateWithDelayedShard = clusterState; // make sure the replica is marked as delayed (i.e. not reallocated) assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard)); @@ -239,8 +235,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("short_delay")).addAsNew(metaData.index("long_delay")).build()) .nodes(DiscoveryNodes.builder() - .put(newNode("node0", singleton(DiscoveryNode.Role.MASTER))).localNodeId("node0").masterNodeId("node0") - .put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + .add(newNode("node0", singleton(DiscoveryNode.Role.MASTER))).localNodeId("node0").masterNodeId("node0") + .add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); // allocate shards clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); // start primaries @@ -284,7 +280,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .build(); // make sure both replicas are marked as delayed (i.e. not reallocated) allocationService.setNanoTimeOverride(baseTimestampNanos); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "reroute")).build(); final ClusterState stateWithDelayedShards = clusterState; assertEquals(2, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShards)); RoutingNodes.UnassignedShards.UnassignedIterator iter = stateWithDelayedShards.getRoutingNodes().unassigned().iterator(); @@ -398,7 +395,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { .build()).build(); clusterState = ClusterState.builder(clusterState) .nodes(DiscoveryNodes.builder() - .put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4")) + .add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4")) .localNodeId("node1").masterNodeId("node1")) .build(); final long nodeLeftTimestampNanos = System.nanoTime(); @@ -425,7 +422,8 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { // remove node that has replica and reroute clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder(clusterState.nodes()).remove(nodeIdOfFooReplica)).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocationService.reroute(clusterState, "fake node left")).build(); + clusterState = ClusterState.builder(clusterState).routingResult( + allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build(); ClusterState stateWithDelayedShard = clusterState; // make sure the replica is marked as delayed (i.e. not reallocated) assertEquals(1, UnassignedInfo.getNumberOfDelayedUnassigned(stateWithDelayedShard)); @@ -469,7 +467,7 @@ public class DelayedAllocationServiceTests extends ESAllocationTestCase { clusterState = ClusterState.builder(stateWithDelayedShard).nodes( DiscoveryNodes.builder(stateWithDelayedShard.nodes()).remove(nodeIdOfBarReplica)).build(); ClusterState stateWithShorterDelay = ClusterState.builder(clusterState).routingResult( - allocationService.reroute(clusterState, "fake node left")).build(); + allocationService.deassociateDeadNodes(clusterState, true, "fake node left")).build(); delayedAllocationService.setNanoTimeOverride(clusterChangeEventTimestampNanos); delayedAllocationService.clusterChanged( new ClusterChangedEvent("fake node left", stateWithShorterDelay, stateWithDelayedShard)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java index 32072282d6f..9e81f81e43b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryTermsTests.java @@ -90,7 +90,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < this.numberOfReplicas + 1; i++) { - discoBuilder = discoBuilder.put(newNode("node" + i)); + discoBuilder = discoBuilder.add(newNode("node" + i)); } this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build(); RoutingAllocation.Result rerouteResult = allocationService.reroute(clusterState, "reroute"); @@ -161,7 +161,7 @@ public class PrimaryTermsTests extends ESAllocationTestCase { final int newNodes = randomInt(10); logger.info("adding [{}] nodes", newNodes); for (int i = 0; i < newNodes; i++) { - nodesBuilder.put(newNode("extra_" + i)); + nodesBuilder.add(newNode("extra_" + i)); } this.clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); RoutingAllocation.Result rerouteResult = allocationService.reroute(this.clusterState, "nodes added"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 9da5e76ed1f..0f3ad8001c3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -83,7 +83,7 @@ public class RoutingTableTests extends ESAllocationTestCase { logger.info("adding {} nodes and performing rerouting", this.numberOfReplicas + 1); Builder discoBuilder = DiscoveryNodes.builder(); for (int i = 0; i < this.numberOfReplicas + 1; i++) { - discoBuilder = discoBuilder.put(newNode("node" + i)); + discoBuilder = discoBuilder.add(newNode("node" + i)); } this.clusterState = ClusterState.builder(clusterState).nodes(discoBuilder).build(); RoutingAllocation.Result rerouteResult = ALLOCATION_SERVICE.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 0854d27e208..f35bd6d5597 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -26,18 +26,18 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.io.stream.ByteBufferStreamInput; -import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.snapshots.Snapshot; +import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.test.ESAllocationTestCase; import java.io.IOException; @@ -175,7 +175,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index(index)).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); // starting primaries clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build(); @@ -215,7 +215,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); // starting primaries clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build(); @@ -224,7 +224,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build(); // verify that NODE_LEAVE is the reason for meta assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); @@ -244,7 +244,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); // starting primaries clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING))).build(); @@ -294,7 +294,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -305,7 +305,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { // remove node2 and reroute clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); // make sure both replicas are marked as delayed (i.e. not reallocated) - clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build(); assertThat(clusterState.prettyPrint(), UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(2)); } @@ -322,7 +322,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -334,7 +334,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { final long baseTime = System.nanoTime(); allocation.setNanoTimeOverride(baseTime); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node2")).build(); - clusterState = ClusterState.builder(clusterState).routingResult(allocation.reroute(clusterState, "reroute")).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.deassociateDeadNodes(clusterState, true, "reroute")).build(); final long delta = randomBoolean() ? 0 : randomInt((int) expectMinDelaySettingsNanos - 1); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java index b3679208276..9cfac5da16b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java @@ -55,8 +55,8 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase { .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding three nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put( - newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add( + newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); @@ -83,7 +83,7 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove("node1")) .build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); assertThat(clusterState.metaData().index("test").activeAllocationIds(0).size(), equalTo(2)); @@ -92,7 +92,7 @@ public class ActiveAllocationIdTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) .remove("node2").remove("node3")) .build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); // active allocation ids should not be updated diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index cf90379a50f..687343b16d9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -108,7 +108,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("now, start one more node, check that rebalancing will happen because we set it to always"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - nodes.put(newNode("node2")); + nodes.add(newNode("node2")); clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -178,7 +178,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("now, start one more node, check that rebalancing will happen because we set it to always"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - nodes.put(newNode("node2")); + nodes.add(newNode("node2")); clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -257,7 +257,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("now, start [{}] more node, check that rebalancing will happen because we set it to always", numNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); for (int i = 0; i < numNodes; i++) { - nodes.put(newNode("node" + (i + nodeOffset))); + nodes.add(newNode("node" + (i + nodeOffset))); } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); @@ -304,7 +304,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("start {} nodes", numberOfNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes; i++) { - nodes.put(newNode("node" + i)); + nodes.add(newNode("node" + i)); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); routingTable = service.reroute(clusterState, "reroute").routingTable(); @@ -397,6 +397,8 @@ public class AddIncrementallyTests extends ESAllocationTestCase { } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + clusterState = ClusterState.builder(clusterState) + .routingResult(service.deassociateDeadNodes(clusterState, true, "reroute")).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); logger.info("start all the primary shards, replicas will start initializing"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 79880d9f4d2..ed91d98e532 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -51,11 +51,8 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardNotFoundException; -import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.test.ESAllocationTestCase; -import java.util.Collections; - import static java.util.Collections.singleton; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; @@ -82,7 +79,7 @@ public class AllocationCommandsTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -141,10 +138,10 @@ public class AllocationCommandsTests extends ESAllocationTestCase { logger.info("--> adding 3 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) - .put(newNode("node4", singleton(DiscoveryNode.Role.MASTER))) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) + .add(newNode("node4", singleton(DiscoveryNode.Role.MASTER))) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -263,9 +260,9 @@ public class AllocationCommandsTests extends ESAllocationTestCase { logger.info("--> adding 3 nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java index 86f3bd5704b..9b5b8db7acf 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -67,7 +67,7 @@ public class AllocationPriorityTests extends ESAllocationTestCase { .build(); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index d78156b2217..13ade5265a6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -72,8 +72,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -91,7 +91,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -111,7 +111,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -140,9 +140,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) - .put(newNode("node3", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node3", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -160,7 +160,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "2"))) + .add(newNode("node4", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -180,7 +180,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5", singletonMap("rack_id", "3"))) + .add(newNode("node5", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -214,8 +214,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -239,7 +239,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -264,7 +264,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, some more relocation should happen"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -305,8 +305,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -324,7 +324,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -353,7 +353,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, some more relocation should happen"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -396,8 +396,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -415,7 +415,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -435,7 +435,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, we will have another relocation"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -474,10 +474,10 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) - .put(newNode("node3", singletonMap("rack_id", "1"))) - .put(newNode("node4", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node3", singletonMap("rack_id", "1"))) + .add(newNode("node4", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -495,7 +495,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5", singletonMap("rack_id", "2"))) + .add(newNode("node5", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -515,7 +515,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, we will have another relocation"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node6", singletonMap("rack_id", "3"))) + .add(newNode("node6", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -555,8 +555,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -572,7 +572,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -592,7 +592,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -622,9 +622,9 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) - .put(newNode("node3", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node3", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -640,7 +640,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "2"))) + .add(newNode("node4", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -660,7 +660,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, make sure nothing moves"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5", singletonMap("rack_id", "3"))) + .add(newNode("node5", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); assertThat(routingTable, sameInstance(clusterState.routingTable())); @@ -697,8 +697,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("rack_id", "1"))) - .put(newNode("node2", singletonMap("rack_id", "1"))) + .add(newNode("node1", singletonMap("rack_id", "1"))) + .add(newNode("node2", singletonMap("rack_id", "1"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -712,7 +712,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node with a new rack and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", singletonMap("rack_id", "2"))) + .add(newNode("node3", singletonMap("rack_id", "2"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -736,7 +736,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add another node with a new rack, some more relocation should happen"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4", singletonMap("rack_id", "3"))) + .add(newNode("node4", singletonMap("rack_id", "3"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -776,8 +776,8 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes in different zones and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("A-0", singletonMap("zone", "a"))) - .put(newNode("B-0", singletonMap("zone", "b"))) + .add(newNode("A-0", singletonMap("zone", "a"))) + .add(newNode("B-0", singletonMap("zone", "b"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -798,7 +798,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> add a new node in zone 'a' and reroute"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("A-1", singletonMap("zone", "a"))) + .add(newNode("A-1", singletonMap("zone", "a"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -837,12 +837,12 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { logger.info("--> adding 5 nodes in different zones and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("A-0", singletonMap("zone", "a"))) - .put(newNode("A-1", singletonMap("zone", "a"))) - .put(newNode("A-2", singletonMap("zone", "a"))) - .put(newNode("A-3", singletonMap("zone", "a"))) - .put(newNode("A-4", singletonMap("zone", "a"))) - .put(newNode("B-0", singletonMap("zone", "b"))) + .add(newNode("A-0", singletonMap("zone", "a"))) + .add(newNode("A-1", singletonMap("zone", "a"))) + .add(newNode("A-2", singletonMap("zone", "a"))) + .add(newNode("A-3", singletonMap("zone", "a"))) + .add(newNode("A-4", singletonMap("zone", "a"))) + .add(newNode("B-0", singletonMap("zone", "b"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 9d0b27eac1a..2e2eabf5063 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -46,6 +46,7 @@ import org.hamcrest.Matchers; import java.util.HashMap; import java.util.Map; + import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; @@ -129,7 +130,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { logger.info("start " + numberOfNodes + " nodes"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < numberOfNodes; i++) { - nodes.put(newNode("node" + i)); + nodes.add(newNode("node" + i)); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -165,7 +166,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { private ClusterState addNode(ClusterState clusterState, AllocationService strategy) { logger.info("now, start 1 more node, check that rebalancing will happen because we set it to always"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node" + numberOfNodes))) + .add(newNode("node" + numberOfNodes))) .build(); RoutingTable routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -191,11 +192,18 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { logger.info("Removing half the nodes (" + (numberOfNodes + 1) / 2 + ")"); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + boolean removed = false; for (int i = (numberOfNodes + 1) / 2; i <= numberOfNodes; i++) { nodes.remove("node" + i); + removed = true; } clusterState = ClusterState.builder(clusterState).nodes(nodes.build()).build(); + if (removed) { + clusterState = ClusterState.builder(clusterState).routingResult( + strategy.deassociateDeadNodes(clusterState, randomBoolean(), "removed nodes") + ).build(); + } RoutingNodes routingNodes = clusterState.getRoutingNodes(); logger.info("start all the primary shards, replicas will start initializing"); @@ -378,7 +386,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); for (int i = 0; i < 4; i++) { DiscoveryNode node = newNode("node" + i); - nodes.put(node); + nodes.add(node); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java index fe264e17378..8640868bd2d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/CatAllocationTestCase.java @@ -118,7 +118,7 @@ public abstract class CatAllocationTestCase extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); DiscoveryNodes.Builder builderDiscoNodes = DiscoveryNodes.builder(); for (String node : nodes) { - builderDiscoNodes.put(newNode(node)); + builderDiscoNodes.add(newNode(node)); } ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).nodes(builderDiscoNodes.build()).build(); if (balanceFirst()) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 458432ff78e..e52738fcc4d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -64,7 +64,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -121,7 +121,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will happen (for test1) because we set it to always"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -150,7 +150,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -226,7 +226,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to primaries_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -254,7 +254,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -311,7 +311,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to primaries_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -338,7 +338,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -433,7 +433,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing happen (for test1) because we set it to all_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -461,7 +461,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -518,7 +518,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -545,7 +545,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -621,7 +621,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 1 more node, check that rebalancing will not happen (for test1) because we set it to all_active"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3"))) + .add(newNode("node3"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -665,7 +665,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -686,7 +686,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.debug("now, start 1 more node, check that rebalancing will not happen since we unassigned shards"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node2"))) + .add(newNode("node2"))) .build(); logger.debug("reroute and check that nothing has changed"); RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute"); @@ -764,7 +764,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -785,7 +785,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { logger.debug("now, start 1 more node, check that rebalancing will not happen since we have shard sync going on"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node2"))) + .add(newNode("node2"))) .build(); logger.debug("reroute and check that nothing has changed"); RoutingAllocation.Result reroute = strategy.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java index 5d8cdcb838f..6b330fa738c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ConcurrentRebalanceRoutingTests.java @@ -69,7 +69,7 @@ public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -93,7 +93,7 @@ public class ConcurrentRebalanceRoutingTests extends ESAllocationTestCase { logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10"))) + .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) .build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index 856d8361779..914b6a5d916 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -60,8 +60,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -84,10 +84,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(); String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1"; clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode(nodeIdRemaining)) + .add(newNode(nodeIdRemaining)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); assertThat(clusterState.getRoutingNodes().node(nodeIdRemaining).iterator().next().primary(), equalTo(true)); @@ -111,8 +111,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -133,7 +133,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding additional node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -158,10 +158,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> fail primary shard recovering instance on node3 being initialized by killing node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode(origPrimaryNodeId)) - .put(newNode(origReplicaNodeId)) + .add(newNode(origPrimaryNodeId)) + .add(newNode(origReplicaNodeId)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); assertThat(clusterState.getRoutingNodes().node(origPrimaryNodeId).iterator().next().state(), equalTo(STARTED)); @@ -185,8 +185,8 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -207,7 +207,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> adding additional node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -232,10 +232,10 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { logger.info("--> fail primary shard recovering instance on 'origPrimaryNodeId' being relocated"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node3")) - .put(newNode(origReplicaNodeId)) + .add(newNode("node3")) + .add(newNode(origReplicaNodeId)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); assertThat(clusterState.getRoutingNodes().node(origReplicaNodeId).iterator().next().state(), equalTo(STARTED)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java index e9f487b1e10..bcf5be90d9d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DecisionsImpactOnClusterHealthTests.java @@ -143,7 +143,7 @@ public class DecisionsImpactOnClusterHealthTests extends ESAllocationTestCase { // any allocations on it final DiscoveryNodes.Builder discoveryNodes = DiscoveryNodes.builder(); for (int i = 0; i < numShards; i++) { - discoveryNodes.put(newNode("node" + i)); + discoveryNodes.add(newNode("node" + i)); } clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java index 83a4c17a631..f3aa1a26527 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ElectReplicaAsPrimaryDuringRelocationTests.java @@ -58,7 +58,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -82,7 +82,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(2)); logger.info("Start another node and perform rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -100,7 +100,7 @@ public class ElectReplicaAsPrimaryDuringRelocationTests extends ESAllocationTest logger.info("kill the node [{}] of the primary shard for the relocating replica", indexShardRoutingTable.primaryShard().currentNodeId()); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logger.info("make sure all the primary shards are active"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java index 7d2ffb07ddf..b6a934b0666 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ExpectedShardSizeAllocationTests.java @@ -80,7 +80,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -95,7 +95,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { assertEquals(1, clusterState.getRoutingNodes().unassigned().size()); logger.info("Add another one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -133,7 +133,7 @@ public class ExpectedShardSizeAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b5332eb237..2dae0c6c2d6 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -57,7 +57,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start 4 nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -90,7 +90,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { ) .build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.getRoutingNodes(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 0b9e20b3578..e66e35635e7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -73,8 +73,8 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { logger.info("--> adding 2 nodes on same rack and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); @@ -95,7 +95,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { logger.info("--> adding additional node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingTable(rerouteResult.routingTable()).build(); @@ -163,7 +163,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -241,7 +241,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding single node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -296,7 +296,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { logger.info("Adding {} nodes and performing rerouting", numberOfReplicas + 1); DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(); for (int i = 0; i < numberOfReplicas + 1; i++) { - nodeBuilder.put(newNode("node" + Integer.toString(i))); + nodeBuilder.add(newNode("node" + Integer.toString(i))); } clusterState = ClusterState.builder(clusterState).nodes(nodeBuilder).build(); while (!clusterState.routingTable().shardsWithState(UNASSIGNED).isEmpty()) { @@ -362,7 +362,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -419,7 +419,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -461,7 +461,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { } logger.info("Adding third node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -510,7 +510,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); // add 4 nodes - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); @@ -552,7 +552,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); // add 4 nodes - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).add(newNode("node4"))).build(); clusterState = ClusterState.builder(clusterState).routingTable(allocation.reroute(clusterState, "reroute").routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java index 0255e18e529..a1f5f92e0c7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterAllocationDeciderTests.java @@ -116,7 +116,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); routingTable = service.reroute(clusterState, "reroute", false).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java index ef0a589e50a..84b491dfa7b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FilterRoutingTests.java @@ -66,10 +66,10 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> adding four nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("tag1", "value1"))) - .put(newNode("node2", singletonMap("tag1", "value2"))) - .put(newNode("node3", singletonMap("tag1", "value3"))) - .put(newNode("node4", singletonMap("tag1", "value4"))) + .add(newNode("node1", singletonMap("tag1", "value1"))) + .add(newNode("node2", singletonMap("tag1", "value2"))) + .add(newNode("node3", singletonMap("tag1", "value3"))) + .add(newNode("node4", singletonMap("tag1", "value4"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -114,10 +114,10 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> adding two nodes and performing rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("tag1", "value1"))) - .put(newNode("node2", singletonMap("tag1", "value2"))) - .put(newNode("node3", singletonMap("tag1", "value3"))) - .put(newNode("node4", singletonMap("tag1", "value4"))) + .add(newNode("node1", singletonMap("tag1", "value1"))) + .add(newNode("node2", singletonMap("tag1", "value2"))) + .add(newNode("node3", singletonMap("tag1", "value3"))) + .add(newNode("node4", singletonMap("tag1", "value4"))) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -185,7 +185,7 @@ public class FilterRoutingTests extends ESAllocationTestCase { logger.info("--> adding two nodes and performing rerouting"); DiscoveryNode node1 = newNode("node1", singletonMap("tag1", "value1")); DiscoveryNode node2 = newNode("node2", singletonMap("tag1", "value2")); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(node1).put(node2)).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(node1).add(node2)).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(clusterState.getRoutingNodes().node(node1.getId()).numberOfShardsWithState(INITIALIZING), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index 79236ddd2b7..9b93e556b31 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -82,7 +82,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -211,7 +211,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { } logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -230,7 +230,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -294,7 +294,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -363,7 +363,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java index a5acb11ce4b..133da3c005f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/MaxRetryAllocationDeciderTests.java @@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; -import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; -import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; @@ -66,7 +64,7 @@ public class MaxRetryAllocationDeciderTests extends ESAllocationTestCase { RoutingTable routingTable = routingTableBuilder.build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData).routingTable(routingTable).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute", false).routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 9859cd4d570..9050a91222d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -105,7 +105,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -145,7 +145,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3", VersionUtils.getPreviousVersion()))) + .add(newNode("node3", VersionUtils.getPreviousVersion()))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -161,7 +161,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4"))) + .add(newNode("node4"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -230,7 +230,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { } } for (DiscoveryNode node : nodes) { - nodesBuilder.put(node); + nodesBuilder.add(node); } clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); clusterState = stabilize(clusterState, service); @@ -267,29 +267,29 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(i).shards().get(2).currentNodeId(), nullValue()); } clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("old0", VersionUtils.getPreviousVersion())) - .put(newNode("old1", VersionUtils.getPreviousVersion())) - .put(newNode("old2", VersionUtils.getPreviousVersion()))).build(); + .add(newNode("old0", VersionUtils.getPreviousVersion())) + .add(newNode("old1", VersionUtils.getPreviousVersion())) + .add(newNode("old2", VersionUtils.getPreviousVersion()))).build(); clusterState = stabilize(clusterState, service); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("old0", VersionUtils.getPreviousVersion())) - .put(newNode("old1", VersionUtils.getPreviousVersion())) - .put(newNode("new0"))).build(); + .add(newNode("old0", VersionUtils.getPreviousVersion())) + .add(newNode("old1", VersionUtils.getPreviousVersion())) + .add(newNode("new0"))).build(); clusterState = stabilize(clusterState, service); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node0", VersionUtils.getPreviousVersion())) - .put(newNode("new1")) - .put(newNode("new0"))).build(); + .add(newNode("node0", VersionUtils.getPreviousVersion())) + .add(newNode("new1")) + .add(newNode("new0"))).build(); clusterState = stabilize(clusterState, service); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("new2")) - .put(newNode("new1")) - .put(newNode("new0"))).build(); + .add(newNode("new2")) + .add(newNode("new1")) + .add(newNode("new0"))).build(); clusterState = stabilize(clusterState, service); routingTable = clusterState.routingTable(); @@ -334,7 +334,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(newNode).put(oldNode1).put(oldNode2)).build(); + .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build(); AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[] {new NodeVersionAllocationDecider(Settings.EMPTY)}); AllocationService strategy = new MockAllocationService(Settings.EMPTY, allocationDeciders, @@ -365,7 +365,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()) - .nodes(DiscoveryNodes.builder().put(newNode).put(oldNode1).put(oldNode2)).build(); + .nodes(DiscoveryNodes.builder().add(newNode).add(oldNode1).add(oldNode2)).build(); AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{ new ReplicaAfterPrimaryActiveAllocationDecider(Settings.EMPTY), new NodeVersionAllocationDecider(Settings.EMPTY)}); @@ -383,7 +383,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { private ClusterState stabilize(ClusterState clusterState, AllocationService service) { logger.trace("RoutingNodes: {}", clusterState.getRoutingNodes().prettyPrint()); - RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); + RoutingTable routingTable = service.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertRecoveryNodeVersions(routingNodes); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java index 3812a550169..9c24993a723 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferLocalPrimariesToRelocatingPrimariesTests.java @@ -66,8 +66,8 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation logger.info("adding two nodes and performing rerouting till all are allocated"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", singletonMap("tag1", "value1"))) - .put(newNode("node2", singletonMap("tag1", "value2")))).build(); + .add(newNode("node1", singletonMap("tag1", "value1"))) + .add(newNode("node2", singletonMap("tag1", "value2")))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -92,7 +92,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation .build())) .build(); clusterState = ClusterState.builder(clusterState).metaData(metaData).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logger.info("[{}] primaries should be still started but [{}] other primaries should be unassigned", numberOfShards, numberOfShards); @@ -102,7 +102,7 @@ public class PreferLocalPrimariesToRelocatingPrimariesTests extends ESAllocation logger.info("start node back up"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node1", singletonMap("tag1", "value1")))).build(); + .add(newNode("node1", singletonMap("tag1", "value1")))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java index decf0ed0ea3..b9ac52c69a4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PreferPrimaryAllocationTests.java @@ -62,7 +62,7 @@ public class PreferPrimaryAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("adding two nodes and performing rerouting till all are allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java index b5f1ab7235c..cca0a5345d0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryElectionRoutingTests.java @@ -58,11 +58,11 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(result).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); result = strategy.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(result).build(); @@ -77,9 +77,9 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).routingResult(result).build(); logger.info("Adding third node and reroute and kill first node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3")).remove("node1")).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3")).remove("node1")).build(); RoutingTable prevRoutingTable = clusterState.routingTable(); - result = strategy.reroute(clusterState, "reroute"); + result = strategy.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(result).build(); routingNodes = clusterState.getRoutingNodes(); routingTable = clusterState.routingTable(); @@ -111,7 +111,7 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingAllocation.Result rerouteResult = allocation.reroute(clusterState, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); @@ -131,9 +131,9 @@ public class PrimaryElectionRoutingTests extends ESAllocationTestCase { String nodeIdToFail = clusterState.routingTable().index("test").shard(0).primaryShard().currentNodeId(); String nodeIdRemaining = nodeIdToFail.equals("node1") ? "node2" : "node1"; clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode(nodeIdRemaining)) + .add(newNode(nodeIdRemaining)) ).build(); - rerouteResult = allocation.reroute(clusterState, "reroute"); + rerouteResult = allocation.deassociateDeadNodes(clusterState, true, "reroute"); clusterState = ClusterState.builder(clusterState).routingResult(rerouteResult).build(); routingNodes = clusterState.getRoutingNodes(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java index 264a009650e..609d8324561 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/PrimaryNotRelocatedWhileBeingRecoveredTests.java @@ -61,7 +61,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -73,7 +73,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(5)); logger.info("start another node, replica will start recovering form primary"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -81,7 +81,7 @@ public class PrimaryNotRelocatedWhileBeingRecoveredTests extends ESAllocationTes assertThat(routingTable.shardsWithState(INITIALIZING).size(), equalTo(5)); logger.info("start another node, make sure the primary is not relocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java index 7f62cced708..061aa901885 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RandomAllocationDeciderTests.java @@ -93,19 +93,25 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { int numNodes = scaledRandomIntBetween(1, 3); for (int j = 0; j < numNodes; j++) { logger.info("adding node [{}]", nodeIdCounter); - newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++))); + newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++))); } } + boolean nodesRemoved = false; if (nodeIdCounter > 1 && rarely()) { int nodeId = scaledRandomIntBetween(0, nodeIdCounter - 2); logger.info("removing node [{}]", nodeId); newNodesBuilder.remove("NODE_" + nodeId); + nodesRemoved = true; } stateBuilder.nodes(newNodesBuilder.build()); clusterState = stateBuilder.build(); - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + if (nodesRemoved) { + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); + } else { + routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + } clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); if (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size() > 0) { routingTable = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)) @@ -119,7 +125,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase { DiscoveryNodes.Builder newNodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); for (int j = 0; j < (maxNumReplicas - clusterState.nodes().getSize()); j++) { logger.info("adding node [{}]", nodeIdCounter); - newNodesBuilder.put(newNode("NODE_" + (nodeIdCounter++))); + newNodesBuilder.add(newNode("NODE_" + (nodeIdCounter++))); } stateBuilder.nodes(newNodesBuilder.build()); clusterState = stateBuilder.build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index 0d743da39f3..b1d83b767b0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -99,7 +99,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { } logger.info("start two nodes and fully start the shards"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -126,7 +126,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { logger.info("now, start 8 more nodes, and check that no rebalancing/relocation have happened"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")).put(newNode("node4")).put(newNode("node5")).put(newNode("node6")).put(newNode("node7")).put(newNode("node8")).put(newNode("node9")).put(newNode("node10"))) + .add(newNode("node3")).add(newNode("node4")).add(newNode("node5")).add(newNode("node6")).add(newNode("node7")).add(newNode("node8")).add(newNode("node9")).add(newNode("node10"))) .build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java index aab3c8d6c2b..440d651f77b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ReplicaAllocatedAfterPrimaryTests.java @@ -68,7 +68,7 @@ public class ReplicaAllocatedAfterPrimaryTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue()); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index 4283cb8475c..cd31f75b50b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -63,7 +63,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -133,7 +133,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -141,7 +141,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -169,7 +169,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -226,7 +226,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("Adding three node and performing rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))).build(); + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))).build(); RoutingNodes routingNodes = clusterState.getRoutingNodes(); assertThat(assertShardStats(routingNodes), equalTo(true)); @@ -371,7 +371,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { logger.info("kill one node"); IndexShardRoutingTable indexShardRoutingTable = routingTable.index("test").shard(0); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove(indexShardRoutingTable.primaryShard().currentNodeId())).build(); - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); routingNodes = clusterState.getRoutingNodes(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java index e09d9790651..331adcd1468 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SameShardRoutingTests.java @@ -63,9 +63,9 @@ public class SameShardRoutingTests extends ESAllocationTestCase { logger.info("--> adding two nodes with the same host"); clusterState = ClusterState.builder(clusterState).nodes( DiscoveryNodes.builder() - .put(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node1", "node1", "node1", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT)) - .put(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node2", "node2", "node2", "test1", "test1", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -82,7 +82,7 @@ public class SameShardRoutingTests extends ESAllocationTestCase { logger.info("--> add another node, with a different host, replicas will be allocating"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(), + .add(new DiscoveryNode("node3", "node3", "node3", "test2", "test2", LocalTransportAddress.buildUnique(), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index 29162aabb60..d50e44c48d9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -57,7 +57,7 @@ public class ShardVersioningTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start two nodes"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 67fc93b600b..a90b88fa9df 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -63,7 +63,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -107,7 +107,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -170,7 +170,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("Adding one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -195,7 +195,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).metaData(metaData).routingTable(routingTable).build(); logger.info("Add another one node and reroute"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index ad9c0b8eef5..1b4d35d44d7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -80,7 +80,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -112,7 +112,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node1")); logger.info("Starting another node and making sure nothing changed"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -128,7 +128,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(routingTable != prevRoutingTable, equalTo(true)); @@ -139,7 +139,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), equalTo("node2")); logger.info("Start another node, make sure that things remain the same (shard is in node2 and initializing)"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -181,7 +181,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(0).currentNodeId(), nullValue()); logger.info("Adding one node and rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -244,7 +244,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); List nodes = new ArrayList<>(); for (int i = 0; i < (numberOfIndices / 2); i++) { - nodesBuilder.put(newNode("node" + i)); + nodesBuilder.add(newNode("node" + i)); } RoutingTable prevRoutingTable = routingTable; clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); @@ -282,7 +282,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { logger.info("Adding additional " + (numberOfIndices / 2) + " nodes, nothing should change"); nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()); for (int i = (numberOfIndices / 2); i < numberOfIndices; i++) { - nodesBuilder.put(newNode("node" + i)); + nodesBuilder.add(newNode("node" + i)); } prevRoutingTable = routingTable; clusterState = ClusterState.builder(clusterState).nodes(nodesBuilder).build(); @@ -348,7 +348,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { logger.info("Starting 3 nodes and rerouting"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3"))) .build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -369,7 +369,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { logger.info("Start two more nodes, things should remain the same"); clusterState = ClusterState.builder(clusterState) - .nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4")).put(newNode("node5"))) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4")).add(newNode("node5"))) .build(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java index 5c791b954f9..0eb317d198b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardOneReplicaRoutingTests.java @@ -67,7 +67,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).shards().get(1).currentNodeId(), nullValue()); logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -84,7 +84,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).replicaShards().get(0).currentNodeId(), nullValue()); logger.info("Add another node and perform rerouting, nothing will happen since primary shards not started"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -134,7 +134,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).remove("node1")).build(); prevRoutingTable = routingTable; - routingTable = strategy.reroute(clusterState, "reroute").routingTable(); + routingTable = strategy.deassociateDeadNodes(clusterState, true, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(prevRoutingTable != routingTable, equalTo(true)); @@ -150,7 +150,7 @@ public class SingleShardOneReplicaRoutingTests extends ESAllocationTestCase { logger.info("Start another node, backup shard should start initializing"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java index 4e32cd1e2cc..df169e3b893 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/StartedShardsRoutingTests.java @@ -51,7 +51,7 @@ public class StartedShardsRoutingTests extends ESAllocationTestCase { .build(); final Index index = indexMetaData.getIndex(); ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) - .nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))) .metaData(MetaData.builder().put(indexMetaData, false)); final ShardRouting initShard = TestShardRouting.newShardRouting(new ShardId(index, 0), "node1", true, ShardRoutingState.INITIALIZING); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index becd61e6eb2..556c97a6ef4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -80,7 +80,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { } logger.info("Adding one node and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -99,7 +99,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { } logger.info("Add another node and perform rerouting, nothing will happen since primary not started"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -152,7 +152,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { assertThat(routingNodes.node("node2").numberOfShardsWithState(STARTED), equalTo(10)); logger.info("Add another node and perform rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java index 0760367445d..b94aec95ca0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ThrottlingAllocationTests.java @@ -66,7 +66,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 3 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -125,7 +125,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 3 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -150,7 +150,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(5)); logger.info("start another node, replicas should start being allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -193,7 +193,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 5 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat(routingTable.shardsWithState(STARTED).size(), equalTo(0)); @@ -213,7 +213,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); logger.info("start another 2 nodes, 5 shards should be relocating - at most 5 are allowed per node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2")).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2")).add(newNode("node3"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -256,7 +256,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); logger.info("start one node, do reroute, only 1 should initialize"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -273,7 +273,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertThat(routingTable.shardsWithState(UNASSIGNED).size(), equalTo(2)); logger.info("start one more node, first non-primary should start being allocated"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node2"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -291,7 +291,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 0); logger.info("start one more node, initializing second non-primary"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -301,7 +301,7 @@ public class ThrottlingAllocationTests extends ESAllocationTestCase { assertEquals(clusterState.getRoutingNodes().getOutgoingRecoveries("node1"), 1); logger.info("start one more node"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node4"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node4"))).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java index 47142062809..103e9027386 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/UpdateNumberOfReplicasTests.java @@ -70,7 +70,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { logger.info("Adding two nodes and performing rerouting"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); RoutingTable prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -121,7 +121,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase { assertThat(routingTable.index("test").shard(0).replicaShards().get(1).state(), equalTo(UNASSIGNED)); logger.info("Add another node and start the added replica"); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).put(newNode("node3"))).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("node3"))).build(); prevRoutingTable = routingTable; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index 87e5fd24a04..02a603fed63 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -123,8 +123,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -156,7 +156,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -244,7 +244,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node4"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4")) + .add(newNode("node4")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -324,8 +324,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node1 and node2 node"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -395,7 +395,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -483,7 +483,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node4"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node4")) + .add(newNode("node4")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -511,7 +511,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node5"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node5")) + .add(newNode("node5")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -592,8 +592,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .routingTable(routingTable).build(); logger.info("--> adding node1"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters + .add(newNode("node1")) + .add(newNode("node2")) // node2 is added because DiskThresholdDecider automatically ignore single-node clusters ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -662,8 +662,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { .routingTable(routingTable).build(); logger.info("--> adding node1"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters + .add(newNode("node1")) + .add(newNode("node3")) // node3 is added because DiskThresholdDecider automatically ignore single-node clusters ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -770,8 +770,8 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -791,7 +791,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { logger.info("--> adding node3"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(newNode("node3")) + .add(newNode("node3")) ).build(); AllocationCommand relocate1 = new MoveAllocationCommand("test", 0, "node2", "node3"); @@ -852,7 +852,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { MASTER_DATA_ROLES, Version.CURRENT); DiscoveryNode discoveryNode2 = new DiscoveryNode("node2", new LocalTransportAddress("2"), emptyMap(), MASTER_DATA_ROLES, Version.CURRENT); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -969,7 +969,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", new LocalTransportAddress("2"), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT); - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(discoveryNode1).add(discoveryNode2).build(); ClusterState baseClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) @@ -1035,7 +1035,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", new LocalTransportAddress("3"), emptyMap(), singleton(DiscoveryNode.Role.DATA), Version.CURRENT); ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes()) - .put(discoveryNode3)).build(); + .add(discoveryNode3)).build(); firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, null, true, ShardRoutingState.STARTED); secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", null, true, ShardRoutingState.RELOCATING); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index 7ede869f0a6..80309004fff 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -121,8 +121,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(node_0) - .put(node_1) + .add(node_0) + .add(node_1) ).build(); // actual test -- after all that bloat :) @@ -186,8 +186,8 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { logger.info("--> adding two nodes"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(node_0) - .put(node_1) + .add(node_0) + .add(node_1) ).build(); // actual test -- after all that bloat :) @@ -317,7 +317,7 @@ public class DiskThresholdDeciderUnitTests extends ESAllocationTestCase { .metaData(metaData).routingTable(routingTableBuilder.build()).build(); AllocationService allocationService = createAllocationService(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().put(newNode("node1"))) + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1"))) .build(); RoutingAllocation.Result result = allocationService.reroute(clusterState, "foo"); clusterState = ClusterState.builder(clusterState).routingTable(result.routingTable()).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 53932eef390..e880b09806c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -72,8 +72,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -100,8 +100,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -134,8 +134,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding two nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -179,8 +179,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -198,9 +198,9 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) ).build(); ClusterState prevState = clusterState; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -279,8 +279,8 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); @@ -293,9 +293,9 @@ public class EnableAllocationTests extends ESAllocationTestCase { logger.info("--> adding one nodes and do rerouting"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) ).build(); ClusterState prevState = clusterState; routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 4fa6615ac45..5f7e8bbfa2b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -47,7 +47,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { .addAsNew(metaData.index("test")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).localNodeId("node1").masterNodeId("node2").build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).localNodeId("node1").masterNodeId("node2").build(); ClusterState clusterState = ClusterState.builder(new ClusterName("clusterName1")).nodes(nodes).metaData(metaData).routingTable(routingTable).build(); @@ -70,7 +70,7 @@ public class ClusterSerializationTests extends ESAllocationTestCase { .addAsNew(metaData.index("test")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().put(newNode("node1")).put(newNode("node2")).put(newNode("node3")).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")).add(newNode("node3")).build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes) .metaData(metaData).routingTable(routingTable).build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java index 9957a6d3603..a0b6f7f0400 100644 --- a/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/serialization/ClusterStateToStringTests.java @@ -50,7 +50,7 @@ public class ClusterStateToStringTests extends ESAllocationTestCase { .addAsNew(metaData.index("test_idx")) .build(); - DiscoveryNodes nodes = DiscoveryNodes.builder().put(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(), + DiscoveryNodes nodes = DiscoveryNodes.builder().add(new DiscoveryNode("node_foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)).localNodeId("node_foo").masterNodeId("node_foo").build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(nodes) diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index b8ebe9e6308..8dbda2838aa 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardShuffler; import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -247,8 +246,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase { node2Attributes.put("rack_id", "rack_2"); node2Attributes.put("zone", "zone2"); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1", unmodifiableMap(node1Attributes))) - .put(newNode("node2", unmodifiableMap(node2Attributes))) + .add(newNode("node1", unmodifiableMap(node1Attributes))) + .add(newNode("node2", unmodifiableMap(node2Attributes))) .localNodeId("node1") ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -295,8 +294,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("fred", "node1", singletonMap("disk", "ebs"))) - .put(newNode("barney", "node2", singletonMap("disk", "ephemeral"))) + .add(newNode("fred", "node1", singletonMap("disk", "ebs"))) + .add(newNode("barney", "node2", singletonMap("disk", "ephemeral"))) .localNodeId("node1") ).build(); @@ -369,8 +368,8 @@ public class RoutingIteratorTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) + .add(newNode("node1")) + .add(newNode("node2")) .localNodeId("node1") ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); @@ -442,9 +441,9 @@ public class RoutingIteratorTests extends ESAllocationTestCase { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() - .put(newNode("node1")) - .put(newNode("node2")) - .put(newNode("node3")) + .add(newNode("node1")) + .add(newNode("node2")) + .add(newNode("node3")) .localNodeId("node1") ).build(); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); diff --git a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java index 895ae6638e6..b1b7749d88c 100644 --- a/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/ZenFaultDetectionTests.java @@ -155,8 +155,8 @@ public class ZenFaultDetectionTests extends ESTestCase { private DiscoveryNodes buildNodesForA(boolean master) { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - builder.put(nodeA); - builder.put(nodeB); + builder.add(nodeA); + builder.add(nodeB); builder.localNodeId(nodeA.getId()); builder.masterNodeId(master ? nodeA.getId() : nodeB.getId()); return builder.build(); @@ -164,8 +164,8 @@ public class ZenFaultDetectionTests extends ESTestCase { private DiscoveryNodes buildNodesForB(boolean master) { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - builder.put(nodeA); - builder.put(nodeB); + builder.add(nodeA); + builder.add(nodeB); builder.localNodeId(nodeB.getId()); builder.masterNodeId(master ? nodeB.getId() : nodeA.getId()); return builder.build(); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index 2a0410f272b..b71310e2f6e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -18,16 +18,22 @@ */ package org.elasticsearch.discovery.zen; +import com.carrotsearch.randomizedtesting.annotations.Repeat; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NotMasterException; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.AllocationService; -import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; -import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -38,6 +44,7 @@ import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.membership.MembershipAction; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -51,6 +58,7 @@ import org.junit.BeforeClass; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -63,13 +71,21 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.shuffle; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; import static org.elasticsearch.test.ClusterServiceUtils.setState; +import static org.elasticsearch.test.ESAllocationTestCase.createAllocationService; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -101,7 +117,7 @@ public class NodeJoinControllerTests extends ESTestCase { // make sure we have a master setState(clusterService, ClusterState.builder(clusterService.state()).nodes( DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId()))); - nodeJoinController = new NodeJoinController(clusterService, new NoopAllocationService(Settings.EMPTY), + nodeJoinController = new NodeJoinController(clusterService, createAllocationService(Settings.EMPTY), new ElectMasterService(Settings.EMPTY), new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); } @@ -412,7 +428,7 @@ public class NodeJoinControllerTests extends ESTestCase { final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); final DiscoveryNode other_node = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT); - nodesBuilder.put(other_node); + nodesBuilder.add(other_node); setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder)); state = clusterService.state(); @@ -524,60 +540,137 @@ public class NodeJoinControllerTests extends ESTestCase { } public void testRejectingJoinWithSameAddressButDifferentId() throws InterruptedException, ExecutionException { + addNodes(randomInt(5)); ClusterState state = clusterService.state(); - final DiscoveryNode other_node = new DiscoveryNode("other_node", state.nodes().getLocalNode().getAddress(), - emptyMap(), emptySet(), Version.CURRENT); + final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList())); + final DiscoveryNode other_node = new DiscoveryNode("other_node", existing.getAddress(), emptyMap(), emptySet(), Version.CURRENT); ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); assertThat(e.getMessage(), containsString("found existing node")); } - public void testRejectingJoinWithSameIdButDifferentAddress() throws InterruptedException, ExecutionException { + public void testRejectingJoinWithSameIdButDifferentNode() throws InterruptedException, ExecutionException { + addNodes(randomInt(5)); ClusterState state = clusterService.state(); - final DiscoveryNode other_node = new DiscoveryNode(state.nodes().getLocalNode().getId(), - new LocalTransportAddress(randomAsciiOfLength(20)), emptyMap(), emptySet(), Version.CURRENT); - - ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); - assertThat(e.getMessage(), containsString("found existing node")); - } - - public void testJoinWithSameIdSameAddressButDifferentMeta() throws InterruptedException, ExecutionException { - ClusterState state = clusterService.state(); - final DiscoveryNode localNode = state.nodes().getLocalNode(); + final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList())); final DiscoveryNode other_node = new DiscoveryNode( - randomBoolean() ? localNode.getName() : "other_name", - localNode.getId(), localNode.getAddress(), - randomBoolean() ? localNode.getAttributes() : Collections.singletonMap("attr", "other"), - randomBoolean() ? localNode.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), - randomBoolean() ? localNode.getVersion() : VersionUtils.randomVersion(random())); + randomBoolean() ? existing.getName() : "other_name", + existing.getId(), + randomBoolean() ? existing.getAddress() : LocalTransportAddress.buildUnique(), + randomBoolean() ? existing.getAttributes() : Collections.singletonMap("attr", "other"), + randomBoolean() ? existing.getRoles() : new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), + randomBoolean() ? existing.getVersion() : VersionUtils.randomVersion(random())); - joinNode(other_node); - - assertThat(clusterService.localNode(), equalTo(other_node)); + ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); + assertThat(e.getMessage(), containsString("found existing node")); } - static class NoopAllocationService extends AllocationService { + public void testRejectingRestartedNodeJoinsBeforeProcessingNodeLeft() throws InterruptedException, ExecutionException { + addNodes(randomInt(5)); + ClusterState state = clusterService.state(); + final DiscoveryNode existing = randomFrom(StreamSupport.stream(state.nodes().spliterator(), false).collect(Collectors.toList())); + joinNode(existing); // OK - public NoopAllocationService(Settings settings) { - super(settings, null, null, null, null); + final DiscoveryNode other_node = new DiscoveryNode(existing.getId(), existing.getAddress(), existing.getAttributes(), + existing.getRoles(), Version.CURRENT); + + ExecutionException e = expectThrows(ExecutionException.class, () -> joinNode(other_node)); + assertThat(e.getMessage(), containsString("found existing node")); + } + + /** + * Tests tha node can become a master, even though the last cluster state it knows contains + * nodes that conflict with the joins it got and needs to become a master + */ + public void testElectionBasedOnConflictingNodes() throws InterruptedException, ExecutionException { + final DiscoveryNode masterNode = clusterService.localNode(); + final DiscoveryNode otherNode = new DiscoveryNode("other_node", LocalTransportAddress.buildUnique(), emptyMap(), + EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); + // simulate master going down with stale nodes in it's cluster state (for example when min master nodes is set to 2) + // also add some shards to that node + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); + discoBuilder.masterNodeId(null); + discoBuilder.add(otherNode); + ClusterState.Builder stateBuilder = ClusterState.builder(clusterService.state()).nodes(discoBuilder); + if (randomBoolean()) { + IndexMetaData indexMetaData = IndexMetaData.builder("test").settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + stateBuilder.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(indexMetaData.getIndex()); + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + final ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + + final DiscoveryNode primaryNode = randomBoolean() ? masterNode : otherNode; + final DiscoveryNode replicaNode = primaryNode.equals(masterNode) ? otherNode : masterNode; + final boolean primaryStarted = randomBoolean(); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, primaryNode.getId(), null, null, true, + primaryStarted ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, + primaryStarted ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "getting there"))); + if (primaryStarted) { + boolean replicaStared = randomBoolean(); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, replicaNode.getId(), null, null, false, + replicaStared ? ShardRoutingState.STARTED : ShardRoutingState.INITIALIZING, + replicaStared ? null : new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "getting there"))); + } else { + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting("test", 0, null, null, null, false, + ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "life sucks"))); + } + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + stateBuilder.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder.build()).build()); } - @Override - public RoutingAllocation.Result applyStartedShards(ClusterState clusterState, List startedShards, - boolean withReroute) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); - } + setState(clusterService, stateBuilder.build()); - @Override - public RoutingAllocation.Result applyFailedShards(ClusterState clusterState, - List failedShards) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); - } + final DiscoveryNode restartedNode = new DiscoveryNode(otherNode.getId(), + randomBoolean() ? otherNode.getAddress() : LocalTransportAddress.buildUnique(), otherNode.getAttributes(), + otherNode.getRoles(), Version.CURRENT); - @Override - protected RoutingAllocation.Result reroute(ClusterState clusterState, String reason, boolean debug) { - return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData()); + nodeJoinController.startElectionContext(); + final SimpleFuture joinFuture = joinNodeAsync(restartedNode); + final CountDownLatch elected = new CountDownLatch(1); + nodeJoinController.waitToBeElectedAsMaster(1, TimeValue.timeValueHours(5), new NodeJoinController.ElectionCallback() { + @Override + public void onElectedAsMaster(ClusterState state) { + elected.countDown(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("failed to be elected as master", t); + throw new AssertionError("failed to be elected as master", t); + } + }); + + elected.await(); + + joinFuture.get(); // throw any exception + + final ClusterState finalState = clusterService.state(); + final DiscoveryNodes finalNodes = finalState.nodes(); + assertTrue(finalNodes.isLocalNodeElectedMaster()); + assertThat(finalNodes.getLocalNode(), equalTo(masterNode)); + assertThat(finalNodes.getSize(), equalTo(2)); + assertThat(finalNodes.get(restartedNode.getId()), equalTo(restartedNode)); + List activeShardsOnRestartedNode = + StreamSupport.stream(finalState.getRoutingNodes().node(restartedNode.getId()).spliterator(), false) + .filter(ShardRouting::active).collect(Collectors.toList()); + assertThat(activeShardsOnRestartedNode, empty()); + } + + + private void addNodes(int count) { + ClusterState state = clusterService.state(); + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(state.nodes()); + for (int i = 0;i< count;i++) { + final DiscoveryNode node = new DiscoveryNode("node_" + state.nodes().getSize() + i, LocalTransportAddress.buildUnique(), + emptyMap(), new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))), Version.CURRENT); + nodesBuilder.add(node); } + setState(clusterService, ClusterState.builder(state).nodes(nodesBuilder)); } protected void assertNodesInCurrentState(List expectedNodes) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java index 667ca6fbccb..35335a8ede4 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeRemovalClusterStateTaskExecutorTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; @@ -56,13 +55,13 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); final int nodes = randomIntBetween(2, 16); for (int i = 0; i < nodes; i++) { - builder.put(node(i)); + builder.add(node(i)); } final ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(builder).build(); final DiscoveryNodes.Builder removeBuilder = DiscoveryNodes.builder(); for (int i = nodes; i < nodes + randomIntBetween(1, 16); i++) { - removeBuilder.put(node(i)); + removeBuilder.add(node(i)); } final List tasks = StreamSupport @@ -106,7 +105,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { boolean first = true; for (int i = 0; i < nodes; i++) { final DiscoveryNode node = node(i); - builder.put(node); + builder.add(node); if (first || randomBoolean()) { tasks.add(new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(node, randomBoolean() ? "left" : "failed")); } @@ -134,7 +133,8 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { when(electMasterService.hasEnoughMasterNodes(any(Iterable.class))).thenReturn(true); final AllocationService allocationService = mock(AllocationService.class); - when(allocationService.reroute(any(ClusterState.class), any(String.class))).thenReturn(mock(RoutingAllocation.Result.class)); + when(allocationService.deassociateDeadNodes(any(ClusterState.class), eq(true), any(String.class))) + .thenReturn(mock(RoutingAllocation.Result.class)); final BiFunction rejoin = (cs, r) -> { fail("rejoin should not be invoked"); @@ -158,7 +158,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { boolean first = true; for (int i = 0; i < nodes; i++) { final DiscoveryNode node = node(i); - builder.put(node); + builder.add(node); if (first || randomBoolean()) { tasks.add(new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(node, randomBoolean() ? "left" : "failed")); } @@ -171,7 +171,7 @@ public class NodeRemovalClusterStateTaskExecutorTests extends ESTestCase { verify(electMasterService).hasEnoughMasterNodes(eq(remainingNodesClusterState.get().nodes())); verifyNoMoreInteractions(electMasterService); - verify(allocationService).reroute(eq(remainingNodesClusterState.get()), any(String.class)); + verify(allocationService).deassociateDeadNodes(eq(remainingNodesClusterState.get()), eq(true), any(String.class)); for (final ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task task : tasks) { assertNull(result.resultingState.nodes().get(task.node().getId())); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 3d0d9ddd8b1..6248df7370f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -210,7 +210,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assert node != null; DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(state.nodes()) - .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), + .add(new DiscoveryNode("abc", new LocalTransportAddress("abc"), emptyMap(), emptySet(), Version.CURRENT)).masterNodeId("abc"); ClusterState.Builder builder = ClusterState.builder(state); builder.nodes(nodes); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java index 9db83f48f0e..ba4c14c2058 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTests.java @@ -50,9 +50,9 @@ public class ZenDiscoveryUnitTests extends ESTestCase { ClusterName clusterName = new ClusterName("abc"); DiscoveryNodes.Builder currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + currentNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); DiscoveryNodes.Builder newNodes = DiscoveryNodes.builder(); - newNodes.masterNodeId("a").put(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + newNodes.masterNodeId("a").add(new DiscoveryNode("a", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); ClusterState.Builder currentState = ClusterState.builder(clusterName); currentState.nodes(currentNodes); @@ -70,7 +70,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase { assertFalse("should not ignore, because new state's version is higher to current state's version", shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build())); currentNodes = DiscoveryNodes.builder(); - currentNodes.masterNodeId("b").put(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); + currentNodes.masterNodeId("b").add(new DiscoveryNode("b", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT)); ; // version isn't taken into account, so randomize it to ensure this. if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java index 7b81c38f1a1..ea5779c33bb 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPingIT.java @@ -92,7 +92,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingA.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleA.node).localNodeId("UZP_A").build(); + return DiscoveryNodes.builder().add(handleA.node).localNodeId("UZP_A").build(); } @Override @@ -106,7 +106,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingB.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleB.node).localNodeId("UZP_B").build(); + return DiscoveryNodes.builder().add(handleB.node).localNodeId("UZP_B").build(); } @Override @@ -126,7 +126,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingC.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleC.node).localNodeId("UZP_C").build(); + return DiscoveryNodes.builder().add(handleC.node).localNodeId("UZP_C").build(); } @Override @@ -140,7 +140,7 @@ public class UnicastZenPingIT extends ESTestCase { zenPingD.setPingContextProvider(new PingContextProvider() { @Override public DiscoveryNodes nodes() { - return DiscoveryNodes.builder().put(handleD.node).localNodeId("UZP_D").build(); + return DiscoveryNodes.builder().add(handleD.node).localNodeId("UZP_D").build(); } @Override diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java index 42aa792c95f..9bb8bf801f1 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PendingClusterStatesQueueTests.java @@ -237,7 +237,7 @@ public class PendingClusterStatesQueueTests extends ESTestCase { ClusterState state = lastClusterStatePerMaster[masterIndex]; if (state == null) { state = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).nodes(DiscoveryNodes.builder() - .put(new DiscoveryNode(masters[masterIndex], LocalTransportAddress.buildUnique(), + .add(new DiscoveryNode(masters[masterIndex], LocalTransportAddress.buildUnique(), emptyMap(), emptySet(),Version.CURRENT)).masterNodeId(masters[masterIndex]).build() ).build(); } else { diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index 7d72fa5c4dc..b1658845afd 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -105,7 +105,7 @@ public class PublishClusterStateActionTests extends ESTestCase { this.service = service; this.listener = listener; this.logger = logger; - this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder().put(discoveryNode).localNodeId(discoveryNode.getId()).build()).build(); + this.clusterState = ClusterState.builder(CLUSTER_NAME).nodes(DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build()).build(); } public MockNode setAsMaster() { @@ -260,7 +260,7 @@ public class PublishClusterStateActionTests extends ESTestCase { ClusterState clusterState = nodeA.clusterState; // cluster state update - add nodeB - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(clusterState.nodes()).put(nodeB.discoveryNode).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(clusterState.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -286,7 +286,7 @@ public class PublishClusterStateActionTests extends ESTestCase { // cluster state update 3 - register node C previousClusterState = clusterState; - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build(); + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).add(nodeC.discoveryNode).build(); clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); assertSameStateFromDiff(nodeB.clusterState, clusterState); @@ -318,9 +318,9 @@ public class PublishClusterStateActionTests extends ESTestCase { // node B becomes the master and sends a version of the cluster state that goes back discoveryNodes = DiscoveryNodes.builder(discoveryNodes) - .put(nodeA.discoveryNode) - .put(nodeB.discoveryNode) - .put(nodeC.discoveryNode) + .add(nodeA.discoveryNode) + .add(nodeB.discoveryNode) + .add(nodeC.discoveryNode) .masterNodeId(nodeB.discoveryNode.getId()) .localNodeId(nodeB.discoveryNode.getId()) .build(); @@ -339,7 +339,7 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -370,11 +370,11 @@ public class PublishClusterStateActionTests extends ESTestCase { }); // Initial cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.getId()).masterNodeId(nodeA.discoveryNode.getId()).build(); ClusterState clusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); // cluster state update - add nodeB - discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -408,7 +408,7 @@ public class PublishClusterStateActionTests extends ESTestCase { assertProperMetaDataForVersion(event.state().metaData(), event.state().version()); } }); - discoveryNodesBuilder.put(node.discoveryNode); + discoveryNodesBuilder.add(node.discoveryNode); } AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations]; @@ -447,7 +447,7 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state - DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build(); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); ClusterState previousClusterState = ClusterState.builder(CLUSTER_NAME).nodes(discoveryNodes).build(); ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); publishStateAndWait(nodeA.action, clusterState, previousClusterState); @@ -488,14 +488,14 @@ public class PublishClusterStateActionTests extends ESTestCase { final int masterNodes = randomIntBetween(1, 10); MockNode master = createMockNode("master"); - DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode); + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(master.discoveryNode); for (int i = 1; i < masterNodes; i++) { - discoveryNodesBuilder.put(createMockNode("node" + i).discoveryNode); + discoveryNodesBuilder.add(createMockNode("node" + i).discoveryNode); } final int dataNodes = randomIntBetween(0, 5); final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(); for (int i = 0; i < dataNodes; i++) { - discoveryNodesBuilder.put(createMockNode("data_" + i, dataSettings).discoveryNode); + discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings).discoveryNode); } discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId()); DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); @@ -536,10 +536,10 @@ public class PublishClusterStateActionTests extends ESTestCase { } Collections.shuffle(Arrays.asList(nodeTypes), random()); - DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().put(master.discoveryNode); + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(master.discoveryNode); for (int i = 0; i < nodeTypes.length; i++) { final MockNode mockNode = createMockNode("node" + i); - discoveryNodesBuilder.put(mockNode.discoveryNode); + discoveryNodesBuilder.add(mockNode.discoveryNode); switch (nodeTypes[i]) { case 1: mockNode.action.errorOnSend.set(true); @@ -552,7 +552,7 @@ public class PublishClusterStateActionTests extends ESTestCase { final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter for (int i = 0; i < dataNodes; i++) { final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()); - discoveryNodesBuilder.put(mockNode.discoveryNode); + discoveryNodesBuilder.add(mockNode.discoveryNode); if (randomBoolean()) { // we really don't care - just chaos monkey mockNode.action.errorOnCommit.set(randomBoolean()); @@ -638,7 +638,7 @@ public class PublishClusterStateActionTests extends ESTestCase { try { MockNode otherNode = createMockNode("otherNode"); state = ClusterState.builder(node.clusterState).nodes( - DiscoveryNodes.builder(node.nodes()).put(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build() + DiscoveryNodes.builder(node.nodes()).add(otherNode.discoveryNode).localNodeId(otherNode.discoveryNode.getId()).build() ).incrementVersion().build(); node.action.validateIncomingState(state, node.clusterState); fail("node accepted state with existent but wrong local node"); @@ -729,7 +729,7 @@ public class PublishClusterStateActionTests extends ESTestCase { MockNode master = createMockNode("master", settings); MockNode node = createMockNode("node", settings); ClusterState state = ClusterState.builder(master.clusterState) - .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).put(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); + .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); for (int i = 0; i < 10; i++) { state = ClusterState.builder(state).incrementVersion().build(); diff --git a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java index 092e6eaff8a..043eaa2708f 100644 --- a/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/AsyncShardFetchTests.java @@ -71,7 +71,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testClose() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going @@ -93,7 +93,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testFullCircleSingleNodeSuccess() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); test.addSimulation(node1.getId(), response1); // first fetch, no data, still on going @@ -112,7 +112,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testFullCircleSingleNodeFailure() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); // add a failed response for node1 test.addSimulation(node1.getId(), failure1); @@ -144,7 +144,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testTwoNodesOnSetup() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); test.addSimulation(node1.getId(), response1); test.addSimulation(node2.getId(), response2); @@ -172,7 +172,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testTwoNodesOnSetupAndFailure() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).put(node2).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); test.addSimulation(node1.getId(), response1); test.addSimulation(node2.getId(), failure2); @@ -198,7 +198,7 @@ public class AsyncShardFetchTests extends ESTestCase { } public void testTwoNodesAddedInBetween() throws Exception { - DiscoveryNodes nodes = DiscoveryNodes.builder().put(node1).build(); + DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build(); test.addSimulation(node1.getId(), response1); // no fetched data, 2 requests still on going @@ -210,7 +210,7 @@ public class AsyncShardFetchTests extends ESTestCase { test.fireSimulationAndWait(node1.getId()); // now, add a second node to the nodes, it should add it to the ongoing requests - nodes = DiscoveryNodes.builder(nodes).put(node2).build(); + nodes = DiscoveryNodes.builder(nodes).add(node2).build(); test.addSimulation(node2.getId(), response2); // no fetch data, has a new node introduced fetchData = test.fetchData(nodes, emptySet()); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index f41b55a5ea5..a0e813663b8 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -166,8 +166,8 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { Set dataOnlyRoles = Collections.singleton(DiscoveryNode.Role.DATA); - return DiscoveryNodes.builder().put(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles)) - .put(newNode("master_node", MASTER_DATA_ROLES)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node"); + return DiscoveryNodes.builder().add(newNode("node1", masterEligible ? MASTER_DATA_ROLES : dataOnlyRoles)) + .add(newNode("master_node", MASTER_DATA_ROLES)).localNodeId("node1").masterNodeId(masterEligible ? "node1" : "master_node"); } public void assertState(ClusterChangedEvent event, diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index e86fa6f014b..f893f73433e 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -367,7 +367,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } @@ -450,7 +450,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } @@ -468,7 +468,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false); boolean changed = testAllocator.allocateUnassigned(allocation); @@ -512,7 +512,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state, null, System.nanoTime(), false); boolean changed = testAllocator.allocateUnassigned(allocation); @@ -556,7 +556,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTableBuilder.build()) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, null, System.nanoTime(), false); } diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index dedd26d68b5..549b4e786cb 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -306,7 +306,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false); } @@ -328,7 +328,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state, ClusterInfo.EMPTY, System.nanoTime(), false); } diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 152be45d558..c3429edc39b 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -55,6 +55,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RandomAllocationDeciderTests; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; @@ -96,6 +97,7 @@ import static org.mockito.Mockito.when; public class ClusterStateChanges extends AbstractComponent { + private final AllocationService allocationService; private final ClusterService clusterService; private final ShardStateAction.ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; private final ShardStateAction.ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; @@ -111,7 +113,7 @@ public class ClusterStateChanges extends AbstractComponent { public ClusterStateChanges() { super(Settings.builder().put(PATH_HOME_SETTING.getKey(), "dummy").build()); - final AllocationService allocationService = new AllocationService(settings, new AllocationDeciders(settings, + allocationService = new AllocationService(settings, new AllocationDeciders(settings, new HashSet<>(Arrays.asList(new SameShardAllocationDecider(settings), new ReplicaAfterPrimaryActiveAllocationDecider(settings), new RandomAllocationDeciderTests.RandomAllocationDecider(getRandom())))), @@ -204,6 +206,11 @@ public class ClusterStateChanges extends AbstractComponent { return execute(transportClusterRerouteAction, request, state); } + public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { + RoutingAllocation.Result rerouteResult = allocationService.deassociateDeadNodes(clusterState, reroute, reason); + return ClusterState.builder(clusterState).routingResult(rerouteResult).build(); + } + public ClusterState applyFailedShards(ClusterState clusterState, List failedShards) { List entries = failedShards.stream().map(failedShard -> new ShardStateAction.ShardEntry(failedShard.routingEntry.shardId(), failedShard.routingEntry.allocationId().getId(), diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 4477974d118..44231f29ba3 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -294,7 +294,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice if (randomBoolean()) { // add node if (state.nodes().getSize() < 10) { - DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).put(createNode()).build(); + DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node leave updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); @@ -306,7 +306,7 @@ public class IndicesClusterStateServiceRandomUpdatesTests extends AbstractIndice if (discoveryNode.equals(state.nodes().getMasterNode()) == false) { DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build(); state = ClusterState.builder(state).nodes(newNodes).build(); - state = cluster.reroute(state, new ClusterRerouteRequest()); // always reroute after node join + state = cluster.deassociateDeadNodes(state, true, "removed and added a node"); updateNodes(state, clusterStateServiceMap, indicesServiceSupplier); } } diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 2ad8ebb52f9..67fe440a292 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -125,7 +125,7 @@ public class RareClusterStateIT extends ESIntegTestCase { public ClusterState execute(ClusterState currentState) throws Exception { // inject a node ClusterState.Builder builder = ClusterState.builder(currentState); - builder.nodes(DiscoveryNodes.builder(currentState.nodes()).put(new DiscoveryNode("_non_existent", + builder.nodes(DiscoveryNodes.builder(currentState.nodes()).add(new DiscoveryNode("_non_existent", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT))); // open index @@ -154,12 +154,11 @@ public class RareClusterStateIT extends ESIntegTestCase { clusterService.submitStateUpdateTask("test-remove-injected-node", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { - // inject a node ClusterState.Builder builder = ClusterState.builder(currentState); builder.nodes(DiscoveryNodes.builder(currentState.nodes()).remove("_non_existent")); currentState = builder.build(); - RoutingAllocation.Result result = allocationService.reroute(currentState, "reroute"); + RoutingAllocation.Result result = allocationService.deassociateDeadNodes(currentState, true, "reroute"); return ClusterState.builder(currentState).routingResult(result).build(); } diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java index 96af4ef3671..d2e603ffd42 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreTests.java @@ -141,7 +141,7 @@ public class IndicesStoreTests extends ESTestCase { ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz", + clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); int localShardId = randomInt(numShards - 1); @@ -164,7 +164,7 @@ public class IndicesStoreTests extends ESTestCase { ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode)); + clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode)); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { String relocatingNodeId = randomBoolean() ? null : "def"; @@ -186,7 +186,7 @@ public class IndicesStoreTests extends ESTestCase { final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random()); ClusterState.Builder clusterState = ClusterState.builder(new ClusterName("test")); clusterState.metaData(MetaData.builder().put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(numShards).numberOfReplicas(numReplicas))); - clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).put(localNode).put(new DiscoveryNode("xyz", + clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()).add(localNode).add(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), nodeVersion))); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) { @@ -209,9 +209,9 @@ public class IndicesStoreTests extends ESTestCase { final Version nodeVersion = randomBoolean() ? CURRENT : randomVersion(random()); clusterState.nodes(DiscoveryNodes.builder().localNodeId(localNode.getId()) - .put(localNode) - .put(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)) - .put(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test + .add(localNode) + .add(new DiscoveryNode("xyz", new LocalTransportAddress("xyz"), emptyMap(), emptySet(), Version.CURRENT)) + .add(new DiscoveryNode("def", new LocalTransportAddress("def"), emptyMap(), emptySet(), nodeVersion) // <-- only set relocating, since we're testing that in this test )); IndexShardRoutingTable.Builder routingTable = new IndexShardRoutingTable.Builder(new ShardId("test", "_na_", 1)); for (int i = 0; i < numShards; i++) {