Merge pull request #20433 from dakrone/remove-cluster-name-folder-fallback

No longer allow cluster name in data path
This commit is contained in:
Lee Hinman 2016-09-12 17:01:49 -05:00 committed by GitHub
commit 44278db1bc
8 changed files with 36 additions and 80 deletions

View File

@ -257,11 +257,6 @@ final class Security {
for (Path path : environment.dataFiles()) {
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
// TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder
// https://github.com/elastic/elasticsearch/issues/20391
for (Path path : environment.dataWithClusterFiles()) {
addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
for (Path path : environment.repoFiles()) {
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
}

View File

@ -209,13 +209,6 @@ public final class NodeEnvironment implements Closeable {
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
Path dataDir = environment.dataFiles()[dirIndex];
// TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory
if (readFromDataPathWithClusterName(dataDirWithClusterName)) {
DeprecationLogger deprecationLogger = new DeprecationLogger(startupTraceLogger);
deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " +
"Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir);
dataDir = dataDirWithClusterName;
}
Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
Files.createDirectories(dir);
@ -289,25 +282,6 @@ public final class NodeEnvironment implements Closeable {
}
}
// Visible for testing
/** Returns true if data should be read from the data path that includes the cluster name (ie, it has data in it) */
static boolean readFromDataPathWithClusterName(Path dataPathWithClusterName) throws IOException {
if (Files.exists(dataPathWithClusterName) == false || // If it doesn't exist
Files.isDirectory(dataPathWithClusterName) == false || // Or isn't a directory
dirEmpty(dataPathWithClusterName)) { // Or if it's empty
// No need to read from cluster-name folder!
return false;
}
// The "nodes" directory inside of the cluster name
Path nodesPath = dataPathWithClusterName.resolve(NODES_FOLDER);
if (Files.isDirectory(nodesPath)) {
// The cluster has data in the "nodes" so we should read from the cluster-named folder for now
return true;
}
// Hey the nodes directory didn't exist, so we can safely use whatever directory we feel appropriate
return false;
}
private static void releaseAndNullLocks(Lock[] locks) {
for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) {

View File

@ -396,49 +396,6 @@ public class NodeEnvironmentTests extends ESTestCase {
env.close();
}
public void testWhetherClusterFolderShouldBeUsed() throws Exception {
Path tempNoCluster = createTempDir();
Path tempDataPath = tempNoCluster.toAbsolutePath();
Path tempPath = tempNoCluster.resolve("foo"); // "foo" is the cluster name
Path tempClusterPath = tempPath.toAbsolutePath();
assertFalse("non-existent directory should not be used", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
Settings settings = Settings.builder()
.put("cluster.name", "foo")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
Path nodeDataPath = env.nodeDataPaths()[0];
assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
}
IOUtils.rm(tempNoCluster);
Files.createDirectories(tempPath);
assertFalse("empty directory should not be read from", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
settings = Settings.builder()
.put("cluster.name", "foo")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
Path nodeDataPath = env.nodeDataPaths()[0];
assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
}
IOUtils.rm(tempNoCluster);
// Create a directory for the cluster name
Files.createDirectories(tempPath.resolve(NodeEnvironment.NODES_FOLDER));
assertTrue("there is data in the directory", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
settings = Settings.builder()
.put("cluster.name", "foo")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.put(Environment.PATH_DATA_SETTING.getKey(), tempClusterPath.toString()).build();
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
Path nodeDataPath = env.nodeDataPaths()[0];
assertEquals(nodeDataPath, tempClusterPath.resolve("nodes").resolve("0"));
}
}
public void testPersistentNodeId() throws IOException {
String[] paths = tmpPaths();
NodeEnvironment env = newNodeEnvironment(paths, Settings.builder()

View File

@ -27,6 +27,8 @@ way to reindex old indices is to use the `reindex` API.
* <<breaking_60_mapping_changes>>
* <<breaking_60_rest_changes>>
* <<breaking_60_search_changes>>
* <<breaking_60_docs_changes>>
* <<breaking_60_cluster_changes>>
include::migrate_6_0/mapping.asciidoc[]
@ -35,3 +37,5 @@ include::migrate_6_0/rest.asciidoc[]
include::migrate_6_0/search.asciidoc[]
include::migrate_6_0/docs.asciidoc[]
include::migrate_6_0/cluster.asciidoc[]

View File

@ -0,0 +1,27 @@
[[breaking_60_cluster_changes]]
=== Cluster changes
==== Cluster name no longer allowed in path.data
Previously the cluster name could be used in the `path.data` setting with a
warning. This is now no longer allowed. For instance, in the previous version
this was valid:
[source,sh]
--------------------------------------------------
# Assuming path.data is /tmp/mydata
# No longer supported:
$ tree /tmp/mydata
/tmp/mydata
├── <cluster_name>
│   └── nodes
│   └── 0
│   └── <etc>
# Should be changed to:
$ tree /tmp/mydata
/tmp/mydata
├── nodes
│   └── 0
│   └── <etc>
--------------------------------------------------

View File

@ -1,4 +1,4 @@
[[breaking_60_document_api_changes]]
[[breaking_60_docs_changes]]
=== Document API changes
==== version type 'force' removed

View File

@ -143,15 +143,14 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase {
}
private void setupNode() throws Exception {
Path dataDir = createTempDir();
Path clusterDir = Files.createDirectory(dataDir.resolve(cluster().getClusterName()));
Path clusterDir = createTempDir();
try (InputStream stream = PercolatorBackwardsCompatibilityTests.class.
getResourceAsStream("/indices/percolator/bwc_index_2.0.0.zip")) {
TestUtil.unzip(stream, clusterDir);
}
Settings.Builder nodeSettings = Settings.builder()
.put(Environment.PATH_DATA_SETTING.getKey(), dataDir);
.put(Environment.PATH_DATA_SETTING.getKey(), clusterDir);
internalCluster().startNode(nodeSettings.build());
ensureGreen(INDEX_NAME);
}

View File

@ -2064,8 +2064,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
}
throw new IllegalStateException(builder.toString());
}
Path src = list[0];
Path dest = dataDir.resolve(internalCluster().getClusterName());
Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER);
Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER);
assertTrue(Files.exists(src));
Files.move(src, dest);
assertFalse(Files.exists(src));