Remove usage of max_local_storage_nodes in test infrastructure (#41652)

Moves the test infrastructure away from using node.max_local_storage_nodes, allowing us in a
follow-up PR to deprecate this setting in 7.x and to remove it in 8.0.

This also changes the behavior of InternalTestCluster so that starting up nodes will not automatically
reuse data folders of previously stopped nodes. If this behavior is desired, it needs to be explicitly
done by passing the data path from the stopped node to the new node that is started.
This commit is contained in:
Yannick Welsch 2019-05-22 10:25:11 +02:00
parent b1c413ea63
commit 770d8e9e39
24 changed files with 266 additions and 207 deletions

View File

@ -410,7 +410,6 @@ class ClusterFormationTasks {
// so we need to bail quicker than the default 30s for the cluster to form in time. // so we need to bail quicker than the default 30s for the cluster to form in time.
esConfig['discovery.zen.master_election.wait_for_joins_timeout'] = '5s' esConfig['discovery.zen.master_election.wait_for_joins_timeout'] = '5s'
} }
esConfig['node.max_local_storage_nodes'] = node.config.numNodes
esConfig['http.port'] = node.config.httpPort esConfig['http.port'] = node.config.httpPort
if (node.nodeVersion.onOrAfter('6.7.0')) { if (node.nodeVersion.onOrAfter('6.7.0')) {
esConfig['transport.port'] = node.config.transportPort esConfig['transport.port'] = node.config.transportPort

View File

@ -91,9 +91,15 @@ public class IngestRestartIT extends ESIntegTestCase {
checkPipelineExists.accept(pipelineIdWithoutScript); checkPipelineExists.accept(pipelineIdWithoutScript);
internalCluster().stopCurrentMasterNode(); internalCluster().restartNode(internalCluster().getMasterName(), new InternalTestCluster.RestartCallback() {
internalCluster().startNode(Settings.builder().put("script.allowed_types", "none"));
@Override
public Settings onNodeStopped(String nodeName) {
return Settings.builder().put("script.allowed_types", "none").build();
}
});
checkPipelineExists.accept(pipelineIdWithoutScript); checkPipelineExists.accept(pipelineIdWithoutScript);
checkPipelineExists.accept(pipelineIdWithScript); checkPipelineExists.accept(pipelineIdWithScript);

View File

@ -277,6 +277,8 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
nodes.remove(primaryNodeName); nodes.remove(primaryNodeName);
logger.info("--> shutting down all nodes except the one that holds the primary"); logger.info("--> shutting down all nodes except the one that holds the primary");
Settings node0DataPathSettings = internalCluster().dataPathSettings(nodes.get(0));
Settings node1DataPathSettings = internalCluster().dataPathSettings(nodes.get(1));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0)));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1)));
ensureStableCluster(1); ensureStableCluster(1);
@ -286,8 +288,8 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
Settings.builder().put("index.routing.allocation.include._name", primaryNodeName)).get(); Settings.builder().put("index.routing.allocation.include._name", primaryNodeName)).get();
logger.info("--> restarting the stopped nodes"); logger.info("--> restarting the stopped nodes");
internalCluster().startNode(Settings.builder().put("node.name", nodes.get(0)).build()); internalCluster().startNode(Settings.builder().put("node.name", nodes.get(0)).put(node0DataPathSettings).build());
internalCluster().startNode(Settings.builder().put("node.name", nodes.get(1)).build()); internalCluster().startNode(Settings.builder().put("node.name", nodes.get(1)).put(node1DataPathSettings).build());
ensureStableCluster(3); ensureStableCluster(3);
boolean includeYesDecisions = randomBoolean(); boolean includeYesDecisions = randomBoolean();
@ -1017,6 +1019,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
// start replica node first, so it's path will be used first when we start a node after // start replica node first, so it's path will be used first when we start a node after
// stopping all of them at end of test. // stopping all of them at end of test.
final String replicaNode = internalCluster().startNode(); final String replicaNode = internalCluster().startNode();
Settings replicaDataPathSettings = internalCluster().dataPathSettings(replicaNode);
final String primaryNode = internalCluster().startNode(); final String primaryNode = internalCluster().startNode();
prepareIndex(IndexMetaData.State.OPEN, 1, 1, prepareIndex(IndexMetaData.State.OPEN, 1, 1,
@ -1057,7 +1060,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode));
logger.info("--> restart the node with the stale replica"); logger.info("--> restart the node with the stale replica");
String restartedNode = internalCluster().startDataOnlyNode(); String restartedNode = internalCluster().startDataOnlyNode(replicaDataPathSettings);
ensureClusterSizeConsistency(); // wait for the master to finish processing join. ensureClusterSizeConsistency(); // wait for the master to finish processing join.
// wait until the system has fetched shard data and we know there is no valid shard copy // wait until the system has fetched shard data and we know there is no valid shard copy

View File

@ -36,7 +36,6 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
@ -105,15 +104,6 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
} }
} }
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
// manual collection or upon cluster forming.
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.build();
}
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class); return Arrays.asList(TestPlugin.class, MockTransportService.TestPlugin.class);

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -43,10 +44,12 @@ import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions;
import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.test.transport.MockTransportService;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
@ -124,6 +127,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected");
client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{otherNode})).get(); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{otherNode})).get();
logger.info("--> stop master node, no master block should appear"); logger.info("--> stop master node, no master block should appear");
Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode));
awaitBusy(() -> { awaitBusy(() -> {
@ -137,7 +141,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
assertThat(state.nodes().getMasterNode(), equalTo(null)); assertThat(state.nodes().getMasterNode(), equalTo(null));
logger.info("--> starting the previous master node again..."); logger.info("--> starting the previous master node again...");
node2Name = internalCluster().startNode(settings); node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build());
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
.setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet(); .setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet();
@ -169,6 +173,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); logger.info("--> add voting config exclusion for master node, to be sure it's not elected");
client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{masterNode})).get(); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(new String[]{masterNode})).get();
logger.info("--> stop non-master node, no master block should appear"); logger.info("--> stop non-master node, no master block should appear");
Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNode)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNode));
assertBusy(() -> { assertBusy(() -> {
@ -177,7 +182,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
}); });
logger.info("--> starting the previous master node again..."); logger.info("--> starting the previous master node again...");
internalCluster().startNode(settings); internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build());
ensureGreen(); ensureGreen();
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
@ -252,6 +257,10 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100); assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
} }
List<String> nonMasterNodes = new ArrayList<>(Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()),
Collections.singleton(internalCluster().getMasterName())));
Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonMasterNodes.get(0));
Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonMasterNodes.get(1));
internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode();
@ -263,7 +272,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
}); });
logger.info("--> start back the 2 nodes "); logger.info("--> start back the 2 nodes ");
internalCluster().startNodes(2, settings); internalCluster().startNodes(nonMasterDataPathSettings1, nonMasterDataPathSettings2);
internalCluster().validateClusterFormed(); internalCluster().validateClusterFormed();
ensureGreen(); ensureGreen();

View File

@ -65,6 +65,7 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
.execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName));
logger.info("--> stop master node"); logger.info("--> stop master node");
Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName());
internalCluster().stopCurrentMasterNode(); internalCluster().stopCurrentMasterNode();
try { try {
@ -75,9 +76,10 @@ public class SpecificMasterNodesIT extends ESIntegTestCase {
// all is well, no master elected // all is well, no master elected
} }
logger.info("--> start master node"); logger.info("--> start previous master node again");
final String nextMasterEligibleNodeName = internalCluster() final String nextMasterEligibleNodeName = internalCluster()
.startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)
.put(masterDataPathSettings));
assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState()
.execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName));
assertThat(internalCluster().masterClient().admin().cluster().prepareState() assertThat(internalCluster().masterClient().admin().cluster().prepareState()

View File

@ -75,26 +75,26 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
return terminal; return terminal;
} }
private MockTerminal unsafeBootstrap(Environment environment, int nodeOrdinal, boolean abort) throws Exception { private MockTerminal unsafeBootstrap(Environment environment, boolean abort) throws Exception {
final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, nodeOrdinal, abort); final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort);
assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG));
assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG)); assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG));
return terminal; return terminal;
} }
private MockTerminal detachCluster(Environment environment, int nodeOrdinal, boolean abort) throws Exception { private MockTerminal detachCluster(Environment environment, boolean abort) throws Exception {
final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, nodeOrdinal, abort); final MockTerminal terminal = executeCommand(new DetachClusterCommand(), environment, 0, abort);
assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG)); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.CONFIRMATION_MSG));
assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG)); assertThat(terminal.getOutput(), containsString(DetachClusterCommand.NODE_DETACHED_MSG));
return terminal; return terminal;
} }
private MockTerminal unsafeBootstrap(Environment environment) throws Exception { private MockTerminal unsafeBootstrap(Environment environment) throws Exception {
return unsafeBootstrap(environment, 0, false); return unsafeBootstrap(environment, false);
} }
private MockTerminal detachCluster(Environment environment) throws Exception { private MockTerminal detachCluster(Environment environment) throws Exception {
return detachCluster(environment, 0, false); return detachCluster(environment, false);
} }
private void expectThrows(ThrowingRunnable runnable, String message) { private void expectThrows(ThrowingRunnable runnable, String message) {
@ -147,7 +147,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
} }
public void testBootstrapNotBootstrappedCluster() throws Exception { public void testBootstrapNotBootstrappedCluster() throws Exception {
internalCluster().startNode( String node = internalCluster().startNode(
Settings.builder() Settings.builder()
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup
.build()); .build());
@ -157,14 +157,17 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
}); });
Settings dataPathSettings = internalCluster().dataPathSettings(node);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG);
} }
public void testDetachNotBootstrappedCluster() throws Exception { public void testDetachNotBootstrappedCluster() throws Exception {
internalCluster().startNode( String node = internalCluster().startNode(
Settings.builder() Settings.builder()
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") // to ensure quick node startup
.build()); .build());
@ -174,19 +177,24 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID));
}); });
Settings dataPathSettings = internalCluster().dataPathSettings(node);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.GLOBAL_GENERATION_MISSING_MSG);
} }
public void testBootstrapNoManifestFile() throws IOException { public void testBootstrapNoManifestFile() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths());
expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG);
@ -194,11 +202,13 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
public void testDetachNoManifestFile() throws IOException { public void testDetachNoManifestFile() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); Manifest.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths());
expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_MANIFEST_FILE_FOUND_MSG);
@ -206,12 +216,14 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
public void testBootstrapNoMetaData() throws IOException { public void testBootstrapNoMetaData() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths());
expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); expectThrows(() -> unsafeBootstrap(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG);
@ -219,12 +231,14 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
public void testDetachNoMetaData() throws IOException { public void testDetachNoMetaData() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths()); MetaData.FORMAT.cleanupOldFiles(-1, nodeEnvironment.nodeDataPaths());
expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG); expectThrows(() -> detachCluster(environment), ElasticsearchNodeCommand.NO_GLOBAL_METADATA_MSG);
@ -232,22 +246,26 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
public void testBootstrapAbortedByUser() throws IOException { public void testBootstrapAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
expectThrows(() -> unsafeBootstrap(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
expectThrows(() -> unsafeBootstrap(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG);
} }
public void testDetachAbortedByUser() throws IOException { public void testDetachAbortedByUser() throws IOException {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
ensureStableCluster(1); ensureStableCluster(1);
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environment = TestEnvironment.newEnvironment(
expectThrows(() -> detachCluster(environment, 0, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG); Settings.builder().put(internalCluster().getDefaultSettings()).put(dataPathSettings).build());
expectThrows(() -> detachCluster(environment, true), ElasticsearchNodeCommand.ABORTED_BY_USER_MSG);
} }
public void test3MasterNodes2Failed() throws Exception { public void test3MasterNodes2Failed() throws Exception {
@ -274,6 +292,11 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
createIndex("test"); createIndex("test");
ensureGreen("test"); ensureGreen("test");
Settings master1DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(0));
Settings master2DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(1));
Settings master3DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(2));
Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode);
logger.info("--> stop 2nd and 3d master eligible node"); logger.info("--> stop 2nd and 3d master eligible node");
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1)));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2)));
@ -286,8 +309,9 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
}); });
logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held"); logger.info("--> try to unsafely bootstrap 1st master-eligible node, while node lock is held");
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); Environment environmentMaster1 = TestEnvironment.newEnvironment(
expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build());
expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG);
logger.info("--> stop 1st master-eligible node and data-only node"); logger.info("--> stop 1st master-eligible node and data-only node");
NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class);
@ -295,20 +319,22 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
logger.info("--> unsafely-bootstrap 1st master-eligible node"); logger.info("--> unsafely-bootstrap 1st master-eligible node");
MockTerminal terminal = unsafeBootstrap(environment); MockTerminal terminal = unsafeBootstrap(environmentMaster1);
MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths()); MetaData metaData = MetaData.FORMAT.loadLatestState(logger, xContentRegistry(), nodeEnvironment.nodeDataPaths());
assertThat(terminal.getOutput(), containsString( assertThat(terminal.getOutput(), containsString(
String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT, String.format(Locale.ROOT, UnsafeBootstrapMasterCommand.CLUSTER_STATE_TERM_VERSION_MSG_FORMAT,
metaData.coordinationMetaData().term(), metaData.version()))); metaData.coordinationMetaData().term(), metaData.version())));
logger.info("--> start 1st master-eligible node"); logger.info("--> start 1st master-eligible node");
internalCluster().startMasterOnlyNode(); internalCluster().startMasterOnlyNode(master1DataPathSettings);
logger.info("--> detach-cluster on data-only node"); logger.info("--> detach-cluster on data-only node");
detachCluster(environment, 1, false); Environment environmentData = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build());
detachCluster(environmentData, false);
logger.info("--> start data-only node"); logger.info("--> start data-only node");
String dataNode2 = internalCluster().startDataOnlyNode(); String dataNode2 = internalCluster().startDataOnlyNode(dataNodeDataPathSettings);
logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state"); logger.info("--> ensure there is no NO_MASTER_BLOCK and unsafe-bootstrap is reflected in cluster state");
assertBusy(() -> { assertBusy(() -> {
@ -322,11 +348,16 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
ensureGreen("test"); ensureGreen("test");
logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes"); logger.info("--> detach-cluster on 2nd and 3rd master-eligible nodes");
detachCluster(environment, 2, false); Environment environmentMaster2 = TestEnvironment.newEnvironment(
detachCluster(environment, 3, false); Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build());
detachCluster(environmentMaster2, false);
Environment environmentMaster3 = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(master3DataPathSettings).build());
detachCluster(environmentMaster3, false);
logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster"); logger.info("--> start 2nd and 3rd master-eligible nodes and ensure 4 nodes stable cluster");
internalCluster().startMasterOnlyNodes(2); internalCluster().startMasterOnlyNode(master2DataPathSettings);
internalCluster().startMasterOnlyNode(master3DataPathSettings);
ensureStableCluster(4); ensureStableCluster(4);
} }
@ -349,9 +380,11 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true)); assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
logger.info("--> stop data-only node and detach it from the old cluster"); logger.info("--> stop data-only node and detach it from the old cluster");
Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNode));
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); final Environment environment = TestEnvironment.newEnvironment(
detachCluster(environment, 1, false); Settings.builder().put(internalCluster().getDefaultSettings()).put(dataNodeDataPathSettings).build());
detachCluster(environment, false);
logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form"); logger.info("--> stop master-eligible node, clear its data and start it again - new cluster should form");
internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback(){ internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback(){
@ -362,7 +395,7 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
}); });
logger.info("--> start data-only only node and ensure 2 nodes stable cluster"); logger.info("--> start data-only only node and ensure 2 nodes stable cluster");
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode(dataNodeDataPathSettings);
ensureStableCluster(2); ensureStableCluster(2);
logger.info("--> verify that the dangling index exists and has green status"); logger.info("--> verify that the dangling index exists and has green status");
@ -377,15 +410,18 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
public void testNoInitialBootstrapAfterDetach() throws Exception { public void testNoInitialBootstrapAfterDetach() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startMasterOnlyNode(); String masterNode = internalCluster().startMasterOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
internalCluster().stopCurrentMasterNode(); internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build());
detachCluster(environment); detachCluster(environment);
String node = internalCluster().startMasterOnlyNode(Settings.builder() String node = internalCluster().startMasterOnlyNode(Settings.builder()
// give the cluster 2 seconds to elect the master (it should not) // give the cluster 2 seconds to elect the master (it should not)
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s") .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s")
.put(masterNodeDataPathSettings)
.build()); .build());
ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true) ClusterState state = internalCluster().client().admin().cluster().prepareState().setLocal(true)
@ -397,7 +433,8 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData() throws Exception { public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetaData() throws Exception {
internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().setBootstrapMasterNodeIndex(0);
internalCluster().startMasterOnlyNode(); String masterNode = internalCluster().startMasterOnlyNode();
Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings( ClusterUpdateSettingsRequest req = new ClusterUpdateSettingsRequest().persistentSettings(
Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb")); Settings.builder().put(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "1234kb"));
internalCluster().client().admin().cluster().updateSettings(req).get(); internalCluster().client().admin().cluster().updateSettings(req).get();
@ -408,11 +445,12 @@ public class UnsafeBootstrapAndDetachCommandIT extends ESIntegTestCase {
internalCluster().stopCurrentMasterNode(); internalCluster().stopCurrentMasterNode();
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(masterNodeDataPathSettings).build());
detachCluster(environment); detachCluster(environment);
unsafeBootstrap(environment); unsafeBootstrap(environment);
internalCluster().startMasterOnlyNode(); internalCluster().startMasterOnlyNode(masterNodeDataPathSettings);
ensureGreen(); ensureGreen();
state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState();

View File

@ -105,6 +105,8 @@ public class AllocationIdIT extends ESIntegTestCase {
internalCluster().assertSameDocIdsOnShards(); internalCluster().assertSameDocIdsOnShards();
// initial set up is done // initial set up is done
Settings node1DataPathSettings = internalCluster().dataPathSettings(node1);
Settings node2DataPathSettings = internalCluster().dataPathSettings(node2);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1));
// index more docs to node2 that marks node1 as stale // index more docs to node2 that marks node1 as stale
@ -117,7 +119,7 @@ public class AllocationIdIT extends ESIntegTestCase {
putFakeCorruptionMarker(indexSettings, shardId, indexPath); putFakeCorruptionMarker(indexSettings, shardId, indexPath);
// thanks to master node1 is out of sync // thanks to master node1 is out of sync
node1 = internalCluster().startNode(); node1 = internalCluster().startNode(node1DataPathSettings);
// there is only _stale_ primary // there is only _stale_ primary
checkNoValidShardCopy(indexName, shardId); checkNoValidShardCopy(indexName, shardId);
@ -157,7 +159,7 @@ public class AllocationIdIT extends ESIntegTestCase {
ensureYellow(indexName); ensureYellow(indexName);
// bring node2 back // bring node2 back
node2 = internalCluster().startNode(); node2 = internalCluster().startNode(node2DataPathSettings);
ensureGreen(indexName); ensureGreen(indexName);
assertThat(historyUUID(node1, indexName), not(equalTo(historyUUID))); assertThat(historyUUID(node1, indexName), not(equalTo(historyUUID)));

View File

@ -67,11 +67,13 @@ public class DelayedAllocationIT extends ESIntegTestCase {
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))).get(); .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1))).get();
ensureGreen("test"); ensureGreen("test");
indexRandomData(); indexRandomData();
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); String nodeWithShard = findNodeWithShard();
Settings nodeWithShardDataPathSettings = internalCluster().dataPathSettings(nodeWithShard);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeWithShard));
assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState() assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState()
.getRoutingNodes().unassigned().size() > 0, equalTo(true))); .getRoutingNodes().unassigned().size() > 0, equalTo(true)));
assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1));
internalCluster().startNode(); // this will use the same data location as the stopped node internalCluster().startNode(nodeWithShardDataPathSettings); // this will use the same data location as the stopped node
ensureGreen("test"); ensureGreen("test");
} }

View File

@ -118,7 +118,8 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
assertThat(bulkResponse.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.UPDATED)); assertThat(bulkResponse.getItems()[1].getResponse().getResult(), equalTo(DocWriteResponse.Result.UPDATED));
} }
private void createStaleReplicaScenario(String master) throws Exception { // returns data paths settings of in-sync shard copy
private Settings createStaleReplicaScenario(String master) throws Exception {
client().prepareIndex("test", "type1").setSource(jsonBuilder() client().prepareIndex("test", "type1").setSource(jsonBuilder()
.startObject().field("field", "value1").endObject()).get(); .startObject().field("field", "value1").endObject()).get();
refresh(); refresh();
@ -150,6 +151,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
.startObject().field("field", "value1").endObject()).get(); .startObject().field("field", "value1").endObject()).get();
logger.info("--> shut down node that has new acknowledged document"); logger.info("--> shut down node that has new acknowledged document");
final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
ensureStableCluster(1, master); ensureStableCluster(1, master);
@ -167,6 +169,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
// kick reroute a second time and check that all shards are unassigned // kick reroute a second time and check that all shards are unassigned
assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(),
equalTo(2)); equalTo(2));
return inSyncDataPathSettings;
} }
public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception {
@ -177,10 +180,10 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
.setSettings(Settings.builder().put("index.number_of_shards", 1) .setSettings(Settings.builder().put("index.number_of_shards", 1)
.put("index.number_of_replicas", 1)).get()); .put("index.number_of_replicas", 1)).get());
ensureGreen(); ensureGreen();
createStaleReplicaScenario(master); final Settings inSyncDataPathSettings = createStaleReplicaScenario(master);
logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); logger.info("--> starting node that reuses data folder with the up-to-date primary shard");
internalCluster().startDataOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(inSyncDataPathSettings);
logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available"); logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available");
ensureYellow("test"); ensureYellow("test");
@ -373,6 +376,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
.put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); .put("index.unassigned.node_left.delayed_timeout", "0ms")).get());
String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
ensureGreen("test"); ensureGreen("test");
final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
ensureYellow("test"); ensureYellow("test");
assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test") assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test")
@ -390,7 +394,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
.metaData().index("test").inSyncAllocationIds(0).size()); .metaData().index("test").inSyncAllocationIds(0).size());
logger.info("--> starting node that reuses data folder with the up-to-date shard"); logger.info("--> starting node that reuses data folder with the up-to-date shard");
internalCluster().startDataOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(inSyncDataPathSettings);
ensureGreen("test"); ensureGreen("test");
} }
@ -402,6 +406,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get());
String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
ensureGreen("test"); ensureGreen("test");
final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
ensureYellow("test"); ensureYellow("test");
assertEquals(2, client().admin().cluster().prepareState().get().getState() assertEquals(2, client().admin().cluster().prepareState().get().getState()
@ -424,7 +429,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
.metaData().index("test").inSyncAllocationIds(0).size()); .metaData().index("test").inSyncAllocationIds(0).size());
logger.info("--> starting node that reuses data folder with the up-to-date shard"); logger.info("--> starting node that reuses data folder with the up-to-date shard");
internalCluster().startDataOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(inSyncDataPathSettings);
assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState()
.getRoutingTable().index("test").allPrimaryShardsUnassigned())); .getRoutingTable().index("test").allPrimaryShardsUnassigned()));
} }

View File

@ -39,7 +39,8 @@ public class NodeEnvironmentIT extends ESIntegTestCase {
final String indexName = "test-fail-on-data"; final String indexName = "test-fail-on-data";
logger.info("--> starting one node"); logger.info("--> starting one node");
internalCluster().startNode(); String node = internalCluster().startNode();
Settings dataPathSettings = internalCluster().dataPathSettings(node);
logger.info("--> creating index"); logger.info("--> creating index");
prepareCreate(indexName, Settings.builder() prepareCreate(indexName, Settings.builder()
@ -69,13 +70,12 @@ public class NodeEnvironmentIT extends ESIntegTestCase {
+ Node.NODE_MASTER_SETTING.getKey() + Node.NODE_MASTER_SETTING.getKey()
+ "=false, but has index metadata")); + "=false, but has index metadata"));
// client() also starts the node logger.info("--> start the node again with node.data=true and node.master=true");
internalCluster().startNode(dataPathSettings);
logger.info("--> indexing a simple document"); logger.info("--> indexing a simple document");
client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get();
logger.info("--> restarting the node with node.data=true and node.master=true");
internalCluster().restartRandomDataNode();
logger.info("--> restarting the node with node.data=false"); logger.info("--> restarting the node with node.data=false");
ex = expectThrows(IllegalStateException.class, ex = expectThrows(IllegalStateException.class,
"Node started with node.data=false while having existing shard data must fail", "Node started with node.data=false while having existing shard data must fail",

View File

@ -37,8 +37,8 @@ public class NodeRepurposeCommandIT extends ESIntegTestCase {
final String indexName = "test-repurpose"; final String indexName = "test-repurpose";
logger.info("--> starting two nodes"); logger.info("--> starting two nodes");
internalCluster().startMasterOnlyNode(); final String masterNode = internalCluster().startMasterOnlyNode();
internalCluster().startDataOnlyNode(); final String dataNode = internalCluster().startDataOnlyNode();
logger.info("--> creating index"); logger.info("--> creating index");
prepareCreate(indexName, Settings.builder() prepareCreate(indexName, Settings.builder()
@ -54,31 +54,44 @@ public class NodeRepurposeCommandIT extends ESIntegTestCase {
assertTrue(client().prepareGet(indexName, "type1", "1").get().isExists()); assertTrue(client().prepareGet(indexName, "type1", "1").get().isExists());
final Settings masterNodeDataPathSettings = internalCluster().dataPathSettings(masterNode);
final Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode);
final Settings noMasterNoDataSettings = Settings.builder() final Settings noMasterNoDataSettings = Settings.builder()
.put(Node.NODE_DATA_SETTING.getKey(), false) .put(Node.NODE_DATA_SETTING.getKey(), false)
.put(Node.NODE_MASTER_SETTING.getKey(), false) .put(Node.NODE_MASTER_SETTING.getKey(), false)
.build(); .build();
final Settings noMasterNoDataSettingsForMasterNode = Settings.builder()
.put(noMasterNoDataSettings)
.put(masterNodeDataPathSettings)
.build();
final Settings noMasterNoDataSettingsForDataNode = Settings.builder()
.put(noMasterNoDataSettings)
.put(dataNodeDataPathSettings)
.build();
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
// verify test setup // verify test setup
logger.info("--> restarting node with node.data=false and node.master=false"); logger.info("--> restarting node with node.data=false and node.master=false");
IllegalStateException ex = expectThrows(IllegalStateException.class, IllegalStateException ex = expectThrows(IllegalStateException.class,
"Node started with node.data=false and node.master=false while having existing index metadata must fail", "Node started with node.data=false and node.master=false while having existing index metadata must fail",
() -> internalCluster().startCoordinatingOnlyNode(Settings.EMPTY) () -> internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings)
); );
logger.info("--> Repurposing node 1"); logger.info("--> Repurposing node 1");
executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 1, 1); executeRepurposeCommand(noMasterNoDataSettingsForDataNode, indexUUID, 1);
ElasticsearchException lockedException = expectThrows(ElasticsearchException.class, ElasticsearchException lockedException = expectThrows(ElasticsearchException.class,
() -> executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 1) () -> executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 1)
); );
assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG)); assertThat(lockedException.getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG));
logger.info("--> Starting node after repurpose"); logger.info("--> Starting node after repurpose");
internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings);
assertTrue(indexExists(indexName)); assertTrue(indexExists(indexName));
expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "type1", "1").get()); expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "type1", "1").get());
@ -88,12 +101,12 @@ public class NodeRepurposeCommandIT extends ESIntegTestCase {
internalCluster().stopRandomNode(s -> true); internalCluster().stopRandomNode(s -> true);
internalCluster().stopRandomNode(s -> true); internalCluster().stopRandomNode(s -> true);
executeRepurposeCommandForOrdinal(noMasterNoDataSettings, indexUUID, 0, 0); executeRepurposeCommand(noMasterNoDataSettingsForMasterNode, indexUUID, 0);
// by restarting as master and data node, we can check that the index definition was really deleted and also that the tool // by restarting as master and data node, we can check that the index definition was really deleted and also that the tool
// does not mess things up so much that the nodes cannot boot as master or data node any longer. // does not mess things up so much that the nodes cannot boot as master or data node any longer.
internalCluster().startMasterOnlyNode(); internalCluster().startMasterOnlyNode(masterNodeDataPathSettings);
internalCluster().startDataOnlyNode(); internalCluster().startDataOnlyNode(dataNodeDataPathSettings);
ensureGreen(); ensureGreen();
@ -101,8 +114,7 @@ public class NodeRepurposeCommandIT extends ESIntegTestCase {
assertFalse(indexExists(indexName)); assertFalse(indexExists(indexName));
} }
private void executeRepurposeCommandForOrdinal(Settings settings, String indexUUID, int ordinal, private void executeRepurposeCommand(Settings settings, String indexUUID, int expectedShardCount) throws Exception {
int expectedShardCount) throws Exception {
boolean verbose = randomBoolean(); boolean verbose = randomBoolean();
Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build(); Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build();
int expectedIndexCount = TestEnvironment.newEnvironment(settingsWithPath).dataFiles().length; int expectedIndexCount = TestEnvironment.newEnvironment(settingsWithPath).dataFiles().length;
@ -111,6 +123,6 @@ public class NodeRepurposeCommandIT extends ESIntegTestCase {
not(contains(NodeRepurposeCommand.PRE_V7_MESSAGE)), not(contains(NodeRepurposeCommand.PRE_V7_MESSAGE)),
NodeRepurposeCommandTests.conditionalNot(containsString(indexUUID), verbose == false)); NodeRepurposeCommandTests.conditionalNot(containsString(indexUUID), verbose == false));
NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher,
verbose, ordinal); verbose);
} }
} }

View File

@ -190,14 +190,10 @@ public class NodeRepurposeCommandTests extends ESTestCase {
} }
} }
private static void verifySuccess(Settings settings, Matcher<String> outputMatcher, boolean verbose) throws Exception { static void verifySuccess(Settings settings, Matcher<String> outputMatcher, boolean verbose) throws Exception {
verifySuccess(settings, outputMatcher, verbose, 0);
}
static void verifySuccess(Settings settings, Matcher<String> outputMatcher, boolean verbose, int ordinal) throws Exception {
withTerminal(verbose, outputMatcher, terminal -> { withTerminal(verbose, outputMatcher, terminal -> {
terminal.addTextInput(randomFrom("y", "Y")); terminal.addTextInput(randomFrom("y", "Y"));
executeRepurposeCommand(terminal, settings, ordinal); executeRepurposeCommand(terminal, settings, 0);
assertThat(terminal.getOutput(), containsString("Node successfully repurposed")); assertThat(terminal.getOutput(), containsString("Node successfully repurposed"));
}); });
} }

View File

@ -64,6 +64,7 @@ import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.stream.IntStream; import java.util.stream.IntStream;
@ -339,7 +340,9 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
public void testLatestVersionLoaded() throws Exception { public void testLatestVersionLoaded() throws Exception {
// clean two nodes // clean two nodes
internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); List<String> nodes = internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build());
Settings node1DataPathSettings = internalCluster().dataPathSettings(nodes.get(0));
Settings node2DataPathSettings = internalCluster().dataPathSettings(nodes.get(1));
assertAcked(client().admin().indices().prepareCreate("test")); assertAcked(client().admin().indices().prepareCreate("test"));
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute() client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).execute()
@ -393,7 +396,9 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
logger.info("--> starting the two nodes back"); logger.info("--> starting the two nodes back");
internalCluster().startNodes(2, Settings.builder().put("gateway.recover_after_nodes", 2).build()); internalCluster().startNodes(
Settings.builder().put(node1DataPathSettings).put("gateway.recover_after_nodes", 2).build(),
Settings.builder().put(node2DataPathSettings).put("gateway.recover_after_nodes", 2).build());
logger.info("--> running cluster_health (wait for the shards to startup)"); logger.info("--> running cluster_health (wait for the shards to startup)");
ensureGreen(); ensureGreen();

View File

@ -140,7 +140,10 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
final MockTerminal terminal = new MockTerminal(); final MockTerminal terminal = new MockTerminal();
final OptionParser parser = command.getParser(); final OptionParser parser = command.getParser();
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); final Settings nodePathSettings = internalCluster().dataPathSettings(node);
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(nodePathSettings).build());
final OptionSet options = parser.parse("-index", indexName, "-shard-id", "0"); final OptionSet options = parser.parse("-index", indexName, "-shard-id", "0");
// Try running it before the node is stopped (and shard is closed) // Try running it before the node is stopped (and shard is closed)
@ -305,6 +308,9 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
logger.info("--> performed extra flushing on replica"); logger.info("--> performed extra flushing on replica");
} }
final Settings node1PathSettings = internalCluster().dataPathSettings(node1);
final Settings node2PathSettings = internalCluster().dataPathSettings(node2);
// shut down the replica node to be tested later // shut down the replica node to be tested later
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node2)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node2));
@ -348,8 +354,8 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
} }
}); });
final Settings defaultSettings = internalCluster().getDefaultSettings(); final Environment environment = TestEnvironment.newEnvironment(
final Environment environment = TestEnvironment.newEnvironment(defaultSettings); Settings.builder().put(internalCluster().getDefaultSettings()).put(node1PathSettings).build());
terminal.addTextInput("y"); terminal.addTextInput("y");
OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString()); OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString());
@ -411,7 +417,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocsToKeep); assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocsToKeep);
logger.info("--> starting the replica node to test recovery"); logger.info("--> starting the replica node to test recovery");
internalCluster().startNode(); internalCluster().startNode(node2PathSettings);
ensureGreen(indexName); ensureGreen(indexName);
for (String node : internalCluster().nodesInclude(indexName)) { for (String node : internalCluster().nodesInclude(indexName)) {
SearchRequestBuilder q = client().prepareSearch(indexName).setPreference("_only_nodes:" + node).setQuery(matchAllQuery()); SearchRequestBuilder q = client().prepareSearch(indexName).setPreference("_only_nodes:" + node).setQuery(matchAllQuery());
@ -473,7 +479,10 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
final ShardId shardId = new ShardId(resolveIndex(indexName), 0); final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
final Set<Path> translogDirs = getDirs(node2, shardId, ShardPath.TRANSLOG_FOLDER_NAME); final Set<Path> translogDirs = getDirs(node2, shardId, ShardPath.TRANSLOG_FOLDER_NAME);
// stop data nodes. After the restart the 1st node will be primary and the 2nd node will be replica final Settings node1PathSettings = internalCluster().dataPathSettings(node1);
final Settings node2PathSettings = internalCluster().dataPathSettings(node2);
// stop data nodes
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode();
@ -481,53 +490,32 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
logger.info("--> corrupting translog"); logger.info("--> corrupting translog");
TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs); TestTranslog.corruptRandomTranslogFile(logger, random(), translogDirs);
// Restart the single node // Start the node with the non-corrupted data path
logger.info("--> starting node"); logger.info("--> starting node");
internalCluster().startNode(); internalCluster().startNode(node1PathSettings);
ensureYellow(); ensureYellow();
// Run a search and make sure it succeeds // Run a search and make sure it succeeds
assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), totalDocs); assertHitCount(client().prepareSearch(indexName).setQuery(matchAllQuery()).get(), totalDocs);
// check replica corruption
final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand();
final MockTerminal terminal = new MockTerminal(); final MockTerminal terminal = new MockTerminal();
final OptionParser parser = command.getParser(); final OptionParser parser = command.getParser();
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings()); for (Path translogDir : translogDirs) {
final Environment environment = TestEnvironment.newEnvironment(
internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { Settings.builder().put(internalCluster().getDefaultSettings()).put(node2PathSettings).build());
@Override terminal.addTextInput("y");
public Settings onNodeStopped(String nodeName) throws Exception { OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString());
logger.info("--> node {} stopped", nodeName); logger.info("--> running command for [{}]", translogDir.toAbsolutePath());
for (Path translogDir : translogDirs) { command.execute(terminal, options, environment);
final Path idxLocation = translogDir.getParent().resolve(ShardPath.INDEX_FOLDER_NAME); logger.info("--> output:\n{}", terminal.getOutput());
assertBusy(() -> { }
logger.info("--> checking that lock has been released for {}", idxLocation);
try (Directory dir = FSDirectory.open(idxLocation, NativeFSLockFactory.INSTANCE);
Lock writeLock = dir.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
// Great, do nothing, we just wanted to obtain the lock
} catch (LockObtainFailedException lofe) {
logger.info("--> failed acquiring lock for {}", idxLocation);
fail("still waiting for lock release at [" + idxLocation + "]");
} catch (IOException ioe) {
fail("Got an IOException: " + ioe);
}
});
terminal.addTextInput("y");
OptionSet options = parser.parse("-d", translogDir.toAbsolutePath().toString());
logger.info("--> running command for [{}]", translogDir.toAbsolutePath());
command.execute(terminal, options, environment);
logger.info("--> output:\n{}", terminal.getOutput());
}
return super.onNodeStopped(nodeName);
}
});
logger.info("--> starting the replica node to test recovery"); logger.info("--> starting the replica node to test recovery");
internalCluster().startNode(); internalCluster().startNode(node2PathSettings);
ensureGreen(indexName); ensureGreen(indexName);
for (String node : internalCluster().nodesInclude(indexName)) { for (String node : internalCluster().nodesInclude(indexName)) {
assertHitCount(client().prepareSearch(indexName) assertHitCount(client().prepareSearch(indexName)
@ -574,15 +562,18 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand(); final RemoveCorruptedShardDataCommand command = new RemoveCorruptedShardDataCommand();
final OptionParser parser = command.getParser(); final OptionParser parser = command.getParser();
final Environment environment = TestEnvironment.newEnvironment(internalCluster().getDefaultSettings());
final Map<String, Path> indexPathByNodeName = new HashMap<>(); final Map<String, Path> indexPathByNodeName = new HashMap<>();
final Map<String, Environment> environmentByNodeName = new HashMap<>();
for (String nodeName : nodeNames) { for (String nodeName : nodeNames) {
final String nodeId = nodeNameToNodeId.get(nodeName); final String nodeId = nodeNameToNodeId.get(nodeName);
final Set<Path> indexDirs = getDirs(nodeId, shardId, ShardPath.INDEX_FOLDER_NAME); final Set<Path> indexDirs = getDirs(nodeId, shardId, ShardPath.INDEX_FOLDER_NAME);
assertThat(indexDirs, hasSize(1)); assertThat(indexDirs, hasSize(1));
indexPathByNodeName.put(nodeName, indexDirs.iterator().next()); indexPathByNodeName.put(nodeName, indexDirs.iterator().next());
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(internalCluster().getDefaultSettings()).put(internalCluster().dataPathSettings(nodeName)).build());
environmentByNodeName.put(nodeName, environment);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeName)); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeName));
logger.info(" -- stopped {}", nodeName); logger.info(" -- stopped {}", nodeName);
} }
@ -590,7 +581,7 @@ public class RemoveCorruptedShardDataCommandIT extends ESIntegTestCase {
for (String nodeName : nodeNames) { for (String nodeName : nodeNames) {
final Path indexPath = indexPathByNodeName.get(nodeName); final Path indexPath = indexPathByNodeName.get(nodeName);
final OptionSet options = parser.parse("--dir", indexPath.toAbsolutePath().toString()); final OptionSet options = parser.parse("--dir", indexPath.toAbsolutePath().toString());
command.findAndProcessShardPath(options, environment, command.findAndProcessShardPath(options, environmentByNodeName.get(nodeName),
shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath))); shardPath -> assertThat(shardPath.resolveIndex(), equalTo(indexPath)));
} }
} }

View File

@ -22,7 +22,6 @@ package org.elasticsearch.indices.memory.breaker;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
@ -41,7 +40,6 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase {
@Override @Override
protected Settings nodeSettings(int nodeOrdinal) { protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop")
// This is set low, because if the "noop" is not a noop, it will break // This is set low, because if the "noop" is not a noop, it will break
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b")

View File

@ -853,8 +853,12 @@ public class IndexRecoveryIT extends ESIntegTestCase {
flush(indexName); flush(indexName);
} }
internalCluster().stopRandomNode(s -> true); String firstNodeToStop = randomFrom(internalCluster().getNodeNames());
internalCluster().stopRandomNode(s -> true); Settings firstNodeToStopDataPathSettings = internalCluster().dataPathSettings(firstNodeToStop);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(firstNodeToStop));
String secondNodeToStop = randomFrom(internalCluster().getNodeNames());
Settings secondNodeToStopDataPathSettings = internalCluster().dataPathSettings(secondNodeToStop);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(secondNodeToStop));
final long desyncNanoTime = System.nanoTime(); final long desyncNanoTime = System.nanoTime();
while (System.nanoTime() <= desyncNanoTime) { while (System.nanoTime() <= desyncNanoTime) {
@ -871,7 +875,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
assertAcked(client().admin().indices().prepareUpdateSettings(indexName) assertAcked(client().admin().indices().prepareUpdateSettings(indexName)
.setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))); .setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)));
internalCluster().startNode(); internalCluster().startNode(randomFrom(firstNodeToStopDataPathSettings, secondNodeToStopDataPathSettings));
ensureGreen(indexName); ensureGreen(indexName);
final RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(indexName)).get(); final RecoveryResponse recoveryResponse = client().admin().indices().recoveries(new RecoveryRequest(indexName)).get();

View File

@ -74,7 +74,7 @@ import static org.hamcrest.Matchers.equalTo;
public class IndicesStoreIntegrationIT extends ESIntegTestCase { public class IndicesStoreIntegrationIT extends ESIntegTestCase {
@Override @Override
protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path
return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(Environment.PATH_DATA_SETTING.getKey(), "") return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())
// by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here
// which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout.
// to prevent this we are setting the timeout here to something highish ie. the default in practice // to prevent this we are setting the timeout here to something highish ie. the default in practice
@ -335,8 +335,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")));
logger.debug("--> shutting down two random nodes"); logger.debug("--> shutting down two random nodes");
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); List<String> nodesToShutDown = randomSubsetOf(2, node1, node2, node3);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); Settings node1DataPathSettings = internalCluster().dataPathSettings(nodesToShutDown.get(0));
Settings node2DataPathSettings = internalCluster().dataPathSettings(nodesToShutDown.get(1));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesToShutDown.get(0)));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodesToShutDown.get(1)));
logger.debug("--> verifying index is red"); logger.debug("--> verifying index is red");
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").get(); ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").get();
@ -369,7 +372,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
logger.debug("--> starting the two old nodes back"); logger.debug("--> starting the two old nodes back");
internalCluster().startDataOnlyNodes(2); internalCluster().startNodes(node1DataPathSettings, node2DataPathSettings);
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut()); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("5").get().isTimedOut());

View File

@ -110,7 +110,6 @@ import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.ElectMasterService;
import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexModule;
@ -1805,7 +1804,6 @@ public abstract class ESIntegTestCase extends ESTestCase {
*/ */
protected Settings nodeSettings(int nodeOrdinal) { protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.builder() Settings.Builder builder = Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE)
// Default the watermarks to absurdly low to prevent the tests // Default the watermarks to absurdly low to prevent the tests
// from failing on nodes without enough disk space // from failing on nodes without enough disk space
.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b") .put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b")

View File

@ -141,6 +141,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function; import java.util.function.Function;
import java.util.function.Predicate; import java.util.function.Predicate;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream; import java.util.stream.Stream;
import static java.util.Collections.emptyList; import static java.util.Collections.emptyList;
@ -244,6 +245,8 @@ public final class InternalTestCluster extends TestCluster {
private final boolean forbidPrivateIndexSettings; private final boolean forbidPrivateIndexSettings;
private final int numDataPaths;
/** /**
* All nodes started by the cluster will have their name set to nodePrefix followed by a positive number * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number
*/ */
@ -359,20 +362,8 @@ public final class InternalTestCluster extends TestCluster {
numSharedDedicatedMasterNodes, numSharedDataNodes, numSharedCoordOnlyNodes, numSharedDedicatedMasterNodes, numSharedDataNodes, numSharedCoordOnlyNodes,
autoManageMinMasterNodes ? "auto-managed" : "manual"); autoManageMinMasterNodes ? "auto-managed" : "manual");
this.nodeConfigurationSource = nodeConfigurationSource; this.nodeConfigurationSource = nodeConfigurationSource;
numDataPaths = random.nextInt(5) == 0 ? 2 + random.nextInt(3) : 1;
Builder builder = Settings.builder(); Builder builder = Settings.builder();
if (random.nextInt(5) == 0) { // sometimes set this
// randomize (multi/single) data path, special case for 0, don't set it at all...
final int numOfDataPaths = random.nextInt(5);
if (numOfDataPaths > 0) {
StringBuilder dataPath = new StringBuilder();
for (int i = 0; i < numOfDataPaths; i++) {
dataPath.append(baseDir.resolve("d" + i).toAbsolutePath()).append(',');
}
builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString());
}
}
builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE);
builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom"));
builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir); builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir);
builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos")); builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos"));
builder.put(TransportSettings.PORT.getKey(), 0); builder.put(TransportSettings.PORT.getKey(), 0);
@ -640,11 +631,24 @@ public final class InternalTestCluster extends TestCluster {
final String name = buildNodeName(nodeId, settings); final String name = buildNodeName(nodeId, settings);
final Settings.Builder updatedSettings = Settings.builder() final Settings.Builder updatedSettings = Settings.builder();
.put(Environment.PATH_HOME_SETTING.getKey(), baseDir) // allow overriding path.home
.put(settings) updatedSettings.put(Environment.PATH_HOME_SETTING.getKey(), baseDir);
.put("node.name", name)
.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); if (numDataPaths > 1) {
updatedSettings.putList(Environment.PATH_DATA_SETTING.getKey(), IntStream.range(0, numDataPaths).mapToObj(i ->
baseDir.resolve(name).resolve("d" + i).toString()).collect(Collectors.toList()));
} else {
updatedSettings.put(Environment.PATH_DATA_SETTING.getKey(), baseDir.resolve(name));
}
updatedSettings.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve(name + "-shared"));
// allow overriding the above
updatedSettings.put(settings);
// force certain settings
updatedSettings.put("node.name", name);
updatedSettings.put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed);
final String discoveryType = DISCOVERY_TYPE_SETTING.get(updatedSettings.build()); final String discoveryType = DISCOVERY_TYPE_SETTING.get(updatedSettings.build());
final boolean usingSingleNodeDiscovery = discoveryType.equals("single-node"); final boolean usingSingleNodeDiscovery = discoveryType.equals("single-node");
@ -1003,7 +1007,7 @@ public final class InternalTestCluster extends TestCluster {
if (closed.get() == false) { if (closed.get() == false) {
throw new IllegalStateException("node " + name + " should be closed before recreating it"); throw new IllegalStateException("node " + name + " should be closed before recreating it");
} }
// use a new seed to make sure we have new node id // use a new seed to make sure we generate a fresh new node id if the data folder has been wiped
final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1; final long newIdSeed = NodeEnvironment.NODE_ID_SEED_SETTING.get(node.settings()) + 1;
Settings finalSettings = Settings.builder() Settings finalSettings = Settings.builder()
.put(originalNodeSettings) .put(originalNodeSettings)
@ -1590,6 +1594,12 @@ public final class InternalTestCluster extends TestCluster {
return node.injector().getInstance(clazz); return node.injector().getInstance(clazz);
} }
public Settings dataPathSettings(String node) {
return nodes.values().stream().filter(nc -> nc.name.equals(node)).findFirst().get().node().settings()
.filter(key ->
key.equals(Environment.PATH_DATA_SETTING.getKey()) || key.equals(Environment.PATH_SHARED_DATA_SETTING.getKey()));
}
@Override @Override
public int size() { public int size() {
return nodes.size(); return nodes.size();

View File

@ -20,7 +20,6 @@ package org.elasticsearch.test.test;
import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkModule;
@ -29,13 +28,13 @@ import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.MockHttpTransport; import org.elasticsearch.test.MockHttpTransport;
import org.elasticsearch.test.NodeConfigurationSource; import org.elasticsearch.test.NodeConfigurationSource;
import org.elasticsearch.transport.TransportSettings;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
@ -91,20 +90,21 @@ public class InternalTestClusterTests extends ESTestCase {
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes,
randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, randomBoolean(), minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes,
nodePrefix, Collections.emptyList(), Function.identity()); nodePrefix, Collections.emptyList(), Function.identity());
// TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way assertClusters(cluster0, cluster1, true);
assertClusters(cluster0, cluster1, false);
} }
/** /**
* a set of settings that are expected to have different values betweem clusters, even they have been initialized with the same * a set of settings that are expected to have different values between clusters, even they have been initialized with the same
* base settings. * base settings.
*/ */
static final Set<String> clusterUniqueSettings = new HashSet<>(); static final Set<String> clusterUniqueSettings = new HashSet<>();
static { static {
clusterUniqueSettings.add(ClusterName.CLUSTER_NAME_SETTING.getKey()); clusterUniqueSettings.add(Environment.PATH_HOME_SETTING.getKey());
clusterUniqueSettings.add(TransportSettings.PORT.getKey()); clusterUniqueSettings.add(Environment.PATH_DATA_SETTING.getKey());
clusterUniqueSettings.add("http.port"); clusterUniqueSettings.add(Environment.PATH_REPO_SETTING.getKey());
clusterUniqueSettings.add(Environment.PATH_SHARED_DATA_SETTING.getKey());
clusterUniqueSettings.add(Environment.PATH_LOGS_SETTING.getKey());
} }
public static void assertClusters(InternalTestCluster cluster0, InternalTestCluster cluster1, boolean checkClusterUniqueSettings) { public static void assertClusters(InternalTestCluster cluster0, InternalTestCluster cluster1, boolean checkClusterUniqueSettings) {
@ -112,9 +112,6 @@ public class InternalTestClusterTests extends ESTestCase {
Settings defaultSettings1 = cluster1.getDefaultSettings(); Settings defaultSettings1 = cluster1.getDefaultSettings();
assertSettings(defaultSettings0, defaultSettings1, checkClusterUniqueSettings); assertSettings(defaultSettings0, defaultSettings1, checkClusterUniqueSettings);
assertThat(cluster0.numDataNodes(), equalTo(cluster1.numDataNodes())); assertThat(cluster0.numDataNodes(), equalTo(cluster1.numDataNodes()));
if (checkClusterUniqueSettings) {
assertThat(cluster0.getClusterName(), equalTo(cluster1.getClusterName()));
}
} }
public static void assertSettings(Settings left, Settings right, boolean checkClusterUniqueSettings) { public static void assertSettings(Settings left, Settings right, boolean checkClusterUniqueSettings) {
@ -127,7 +124,7 @@ public class InternalTestClusterTests extends ESTestCase {
continue; continue;
} }
assertTrue("key [" + key + "] is missing in " + keys1, keys1.contains(key)); assertTrue("key [" + key + "] is missing in " + keys1, keys1.contains(key));
assertEquals(right.get(key), left.get(key)); assertEquals(key, right.get(key), left.get(key));
} }
} }
@ -151,16 +148,11 @@ public class InternalTestClusterTests extends ESTestCase {
bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1); bootstrapMasterNodeIndex = maxNumDataNodes == 0 ? -1 : randomIntBetween(0, maxNumDataNodes - 1);
} }
final int numClientNodes = randomIntBetween(0, 2); final int numClientNodes = randomIntBetween(0, 2);
final String clusterName1 = "shared1";
final String clusterName2 = "shared2";
String transportClient = getTestTransportType(); String transportClient = getTestTransportType();
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() { NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
final Settings.Builder settings = Settings.builder() final Settings.Builder settings = Settings.builder()
.put(
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes))
.put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .put(DiscoveryModule.DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file")
.putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey())
.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType());
@ -185,14 +177,13 @@ public class InternalTestClusterTests extends ESTestCase {
String nodePrefix = "foobar"; String nodePrefix = "foobar";
Path baseDir = createTempDir(); InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes,
InternalTestCluster cluster0 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes,
autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes,
nodePrefix, mockPlugins(), Function.identity()); nodePrefix, mockPlugins(), Function.identity());
cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); cluster0.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex);
InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, baseDir, masterNodes, InternalTestCluster cluster1 = new InternalTestCluster(clusterSeed, createTempDir(), masterNodes,
autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, autoManageMinMasterNodes, minNumDataNodes, maxNumDataNodes, "clustername", nodeConfigurationSource, numClientNodes,
nodePrefix, mockPlugins(), Function.identity()); nodePrefix, mockPlugins(), Function.identity());
cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex); cluster1.setBootstrapMasterNodeIndex(bootstrapMasterNodeIndex);
@ -234,9 +225,6 @@ public class InternalTestClusterTests extends ESTestCase {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)
.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType())
.putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file")
.putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey())
@ -269,12 +257,9 @@ public class InternalTestClusterTests extends ESTestCase {
String poorNode = randomValueOtherThanMany(n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()), String poorNode = randomValueOtherThanMany(n -> originalMasterCount == 1 && n.equals(cluster.getMasterName()),
() -> randomFrom(cluster.getNodeNames())); () -> randomFrom(cluster.getNodeNames()));
Path dataPath = getNodePaths(cluster, poorNode)[0]; Path dataPath = getNodePaths(cluster, poorNode)[0];
final Settings poorNodeDataPathSettings = cluster.dataPathSettings(poorNode);
final Path testMarker = dataPath.resolve("testMarker"); final Path testMarker = dataPath.resolve("testMarker");
Files.createDirectories(testMarker); Files.createDirectories(testMarker);
int expectedMasterCount = originalMasterCount;
if (cluster.getInstance(ClusterService.class, poorNode).localNode().isMasterNode()) {
expectedMasterCount--;
}
cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode)); cluster.stopRandomNode(InternalTestCluster.nameFilter(poorNode));
assertFileExists(testMarker); // stopping a node half way shouldn't clean data assertFileExists(testMarker); // stopping a node half way shouldn't clean data
@ -285,15 +270,15 @@ public class InternalTestClusterTests extends ESTestCase {
Files.createDirectories(stableTestMarker); Files.createDirectories(stableTestMarker);
final String newNode1 = cluster.startNode(); final String newNode1 = cluster.startNode();
expectedMasterCount++; assertThat(getNodePaths(cluster, newNode1)[0], not(dataPath));
assertThat(getNodePaths(cluster, newNode1)[0], equalTo(dataPath));
assertFileExists(testMarker); // starting a node should re-use data folders and not clean it assertFileExists(testMarker); // starting a node should re-use data folders and not clean it
final String newNode2 = cluster.startNode(); final String newNode2 = cluster.startNode();
expectedMasterCount++;
final Path newDataPath = getNodePaths(cluster, newNode2)[0]; final Path newDataPath = getNodePaths(cluster, newNode2)[0];
final Path newTestMarker = newDataPath.resolve("newTestMarker"); final Path newTestMarker = newDataPath.resolve("newTestMarker");
assertThat(newDataPath, not(dataPath)); assertThat(newDataPath, not(dataPath));
Files.createDirectories(newTestMarker); Files.createDirectories(newTestMarker);
final String newNode3 = cluster.startNode(poorNodeDataPathSettings);
assertThat(getNodePaths(cluster, newNode3)[0], equalTo(dataPath));
cluster.beforeTest(random(), 0.0); cluster.beforeTest(random(), 0.0);
assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made assertFileNotExists(newTestMarker); // the cluster should be reset for a new test, cleaning up the extra path we made
assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned assertFileNotExists(testMarker); // a new unknown node used this path, it should be cleaned
@ -333,7 +318,6 @@ public class InternalTestClusterTests extends ESTestCase {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes)
.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType())
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0) .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0)
.putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file")
@ -417,7 +401,6 @@ public class InternalTestClusterTests extends ESTestCase {
@Override @Override
public Settings nodeSettings(int nodeOrdinal) { public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder() return Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType())
.putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file") .putList(DISCOVERY_SEED_PROVIDERS_SETTING.getKey(), "file")
.putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey()) .putList(SettingsBasedSeedHostsProvider.DISCOVERY_SEED_HOSTS_SETTING.getKey())

View File

@ -46,7 +46,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.DocIdSeqNoAndSource;
@ -213,7 +212,6 @@ public abstract class CcrIntegTestCase extends ESTestCase {
private NodeConfigurationSource createNodeConfigurationSource(final String leaderSeedAddress, final boolean leaderCluster) { private NodeConfigurationSource createNodeConfigurationSource(final String leaderSeedAddress, final boolean leaderCluster) {
Settings.Builder builder = Settings.builder(); Settings.Builder builder = Settings.builder();
builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE);
// Default the watermarks to absurdly low to prevent the tests // Default the watermarks to absurdly low to prevent the tests
// from failing on nodes without enough disk space // from failing on nodes without enough disk space
builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b"); builder.put(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b");

View File

@ -22,6 +22,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.MlTasks;
import org.elasticsearch.xpack.core.ml.action.CloseJobAction; import org.elasticsearch.xpack.core.ml.action.CloseJobAction;
@ -331,7 +332,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase {
internalCluster().ensureAtMostNumDataNodes(0); internalCluster().ensureAtMostNumDataNodes(0);
// start non ml node that will hold the state and results indices // start non ml node that will hold the state and results indices
logger.info("Start non ml node:"); logger.info("Start non ml node:");
internalCluster().startNode(Settings.builder() String nonMLNode = internalCluster().startNode(Settings.builder()
.put("node.data", true) .put("node.data", true)
.put("node.attr.ml-indices", "state-and-results") .put("node.attr.ml-indices", "state-and-results")
.put(MachineLearning.ML_ENABLED.getKey(), false)); .put(MachineLearning.ML_ENABLED.getKey(), false));
@ -389,7 +390,8 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase {
assertEquals(0, tasks.taskMap().size()); assertEquals(0, tasks.taskMap().size());
}); });
logger.info("Stop non ml node"); logger.info("Stop non ml node");
internalCluster().stopRandomNode(settings -> settings.getAsBoolean(MachineLearning.ML_ENABLED.getKey(), false) == false); Settings nonMLNodeDataPathSettings = internalCluster().dataPathSettings(nonMLNode);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nonMLNode));
ensureStableCluster(1); ensureStableCluster(1);
Exception e = expectThrows(ElasticsearchStatusException.class, Exception e = expectThrows(ElasticsearchStatusException.class,
@ -406,6 +408,7 @@ public class BasicDistributedJobsIT extends BaseMlIntegTestCase {
logger.info("Start data node"); logger.info("Start data node");
String nonMlNode = internalCluster().startNode(Settings.builder() String nonMlNode = internalCluster().startNode(Settings.builder()
.put(nonMLNodeDataPathSettings)
.put("node.data", true) .put("node.data", true)
.put(MachineLearning.ML_ENABLED.getKey(), false)); .put(MachineLearning.ML_ENABLED.getKey(), false));
ensureStableCluster(2, mlNode); ensureStableCluster(2, mlNode);

View File

@ -89,7 +89,8 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase {
ensureStableClusterOnAllNodes(2); ensureStableClusterOnAllNodes(2);
run("lose-dedicated-master-node-job", () -> { run("lose-dedicated-master-node-job", () -> {
logger.info("Stopping dedicated master node"); logger.info("Stopping dedicated master node");
internalCluster().stopRandomNode(settings -> settings.getAsBoolean("node.master", false)); Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName());
internalCluster().stopCurrentMasterNode();
assertBusy(() -> { assertBusy(() -> {
ClusterState state = client(mlAndDataNode).admin().cluster().prepareState() ClusterState state = client(mlAndDataNode).admin().cluster().prepareState()
.setLocal(true).get().getState(); .setLocal(true).get().getState();
@ -97,6 +98,7 @@ public class MlDistributedFailureIT extends BaseMlIntegTestCase {
}); });
logger.info("Restarting dedicated master node"); logger.info("Restarting dedicated master node");
internalCluster().startNode(Settings.builder() internalCluster().startNode(Settings.builder()
.put(masterDataPathSettings)
.put("node.master", true) .put("node.master", true)
.put("node.data", false) .put("node.data", false)
.put("node.ml", false) .put("node.ml", false)