Prevent merging nodes' data paths (#42665)

Today Elasticsearch does not prevent you from reconfiguring a node's
`path.data` to point to data paths that previously belonged to more than one
node. There's no good reason to be able to do this, and the consequences can be
quietly disastrous. Furthermore, #42489 might result in a user trying to split
up a previously-shared collection of data paths by hand and there's definitely
scope for mixing the paths up across nodes when doing this.

This change adds a check during startup to ensure that each data path belongs
to the same node.
This commit is contained in:
David Turner 2019-05-30 16:54:01 +01:00
parent b5527b3278
commit d14799f0a5
2 changed files with 49 additions and 1 deletions

View File

@ -70,6 +70,7 @@ import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@ -403,10 +404,25 @@ public final class NodeEnvironment implements Closeable {
private static NodeMetaData loadOrCreateNodeMetaData(Settings settings, Logger logger,
NodePath... nodePaths) throws IOException {
final Path[] paths = Arrays.stream(nodePaths).map(np -> np.path).toArray(Path[]::new);
final Set<String> nodeIds = new HashSet<>();
for (final Path path : paths) {
final NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path);
if (metaData != null) {
nodeIds.add(metaData.nodeId());
}
}
if (nodeIds.size() > 1) {
throw new IllegalStateException(
"data paths " + Arrays.toString(paths) + " belong to multiple nodes with IDs " + nodeIds);
}
NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, paths);
if (metaData == null) {
assert nodeIds.isEmpty() : nodeIds;
metaData = new NodeMetaData(generateNodeId(settings), Version.CURRENT);
} else {
assert nodeIds.equals(Collections.singleton(metaData.nodeId())) : nodeIds + " doesn't match " + metaData;
metaData = metaData.upgradeToCurrentVersion();
}

View File

@ -26,7 +26,10 @@ import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString;
@ -35,7 +38,7 @@ import static org.hamcrest.Matchers.startsWith;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
public class NodeEnvironmentIT extends ESIntegTestCase {
public void testStartFailureOnDataForNonDataNode() throws Exception {
public void testStartFailureOnDataForNonDataNode() {
final String indexName = "test-fail-on-data";
logger.info("--> starting one node");
@ -123,4 +126,33 @@ public class NodeEnvironmentIT extends ESIntegTestCase {
assertThat(illegalStateException.getMessage(),
allOf(startsWith("cannot upgrade a node from version ["), endsWith("] directly to version [" + Version.CURRENT + "]")));
}
public void testFailsToStartOnDataPathsFromMultipleNodes() throws IOException {
final List<String> nodes = internalCluster().startNodes(2);
ensureStableCluster(2);
final List<String> node0DataPaths = Environment.PATH_DATA_SETTING.get(internalCluster().dataPathSettings(nodes.get(0)));
final List<String> node1DataPaths = Environment.PATH_DATA_SETTING.get(internalCluster().dataPathSettings(nodes.get(1)));
final List<String> allDataPaths = new ArrayList<>(node0DataPaths);
allDataPaths.addAll(node1DataPaths);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(1)));
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes.get(0)));
final IllegalStateException illegalStateException = expectThrows(IllegalStateException.class,
() -> internalCluster().startNode(Settings.builder().putList(Environment.PATH_DATA_SETTING.getKey(), allDataPaths)));
assertThat(illegalStateException.getMessage(), containsString("belong to multiple nodes with IDs"));
final List<String> node0DataPathsPlusOne = new ArrayList<>(node0DataPaths);
node0DataPathsPlusOne.add(createTempDir().toString());
internalCluster().startNode(Settings.builder().putList(Environment.PATH_DATA_SETTING.getKey(), node0DataPathsPlusOne));
final List<String> node1DataPathsPlusOne = new ArrayList<>(node1DataPaths);
node1DataPathsPlusOne.add(createTempDir().toString());
internalCluster().startNode(Settings.builder().putList(Environment.PATH_DATA_SETTING.getKey(), node1DataPathsPlusOne));
ensureStableCluster(2);
}
}