Merge remote-tracking branch 'dakrone/no-cluster-name-in-path'
This commit is contained in:
commit
32bd869b28
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.elasticsearch.SecureSM;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
|
@ -256,8 +257,10 @@ final class Security {
|
|||
for (Path path : environment.dataFiles()) {
|
||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
// TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder
|
||||
assert Version.CURRENT.major < 6 : "cluster name is no longer used in data path";
|
||||
for (Path path : environment.dataWithClusterFiles()) {
|
||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
for (Path path : environment.repoFiles()) {
|
||||
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
|
@ -318,6 +321,27 @@ final class Security {
|
|||
policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions));
|
||||
}
|
||||
|
||||
/**
|
||||
* Add access to a directory iff it exists already
|
||||
* @param policy current policy to add permissions to
|
||||
* @param configurationName the configuration name associated with the path (for error messages only)
|
||||
* @param path the path itself
|
||||
* @param permissions set of filepermissions to grant to the path
|
||||
*/
|
||||
static void addPathIfExists(Permissions policy, String configurationName, Path path, String permissions) {
|
||||
if (Files.isDirectory(path)) {
|
||||
// add each path twice: once for itself, again for files underneath it
|
||||
policy.add(new FilePermission(path.toString(), permissions));
|
||||
policy.add(new FilePermission(path.toString() + path.getFileSystem().getSeparator() + "-", permissions));
|
||||
try {
|
||||
path.getFileSystem().provider().checkAccess(path.toRealPath(), AccessMode.READ);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Unable to access '" + configurationName + "' (" + path + ")", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Ensures configured directory {@code path} exists.
|
||||
* @throws IOException if {@code path} exists, but is not a directory, not accessible, or broken symbolic link.
|
||||
|
|
|
@ -195,7 +195,11 @@ public class Environment {
|
|||
|
||||
/**
|
||||
* The data location with the cluster name as a sub directory.
|
||||
*
|
||||
* @deprecated Used to upgrade old data paths to new ones that do not include the cluster name, should not be used to write files to and
|
||||
* will be removed in ES 6.0
|
||||
*/
|
||||
@Deprecated
|
||||
public Path[] dataWithClusterFiles() {
|
||||
return dataWithClusterFiles;
|
||||
}
|
||||
|
|
|
@ -32,9 +32,12 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -63,6 +66,7 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -161,6 +165,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
public static final String NODES_FOLDER = "nodes";
|
||||
public static final String INDICES_FOLDER = "indices";
|
||||
public static final String NODE_LOCK_FILENAME = "node.lock";
|
||||
public static final String UPGRADE_LOCK_FILENAME = "upgrade.lock";
|
||||
|
||||
@Inject
|
||||
public NodeEnvironment(Settings settings, Environment environment) throws IOException {
|
||||
|
@ -175,7 +180,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
localNodeId = -1;
|
||||
return;
|
||||
}
|
||||
final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
|
||||
final NodePath[] nodePaths = new NodePath[environment.dataFiles().length];
|
||||
final Lock[] locks = new Lock[nodePaths.length];
|
||||
boolean success = false;
|
||||
|
||||
|
@ -185,8 +190,17 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
IOException lastException = null;
|
||||
int maxLocalStorageNodes = MAX_LOCAL_STORAGE_NODES_SETTING.get(settings);
|
||||
for (int possibleLockId = 0; possibleLockId < maxLocalStorageNodes; possibleLockId++) {
|
||||
for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) {
|
||||
Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
|
||||
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
|
||||
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
|
||||
Path dataDir = environment.dataFiles()[dirIndex];
|
||||
// TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory
|
||||
if (readFromDataPathWithClusterName(dataDirWithClusterName)) {
|
||||
DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
|
||||
deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " +
|
||||
"Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir);
|
||||
dataDir = dataDirWithClusterName;
|
||||
}
|
||||
Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
|
||||
Files.createDirectories(dir);
|
||||
|
||||
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
|
||||
|
@ -218,7 +232,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
|
||||
if (locks[0] == null) {
|
||||
throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: "
|
||||
+ Arrays.toString(environment.dataWithClusterFiles()), lastException);
|
||||
+ Arrays.toString(environment.dataFiles()), lastException);
|
||||
}
|
||||
|
||||
this.localNodeId = localNodeId;
|
||||
|
@ -242,6 +256,32 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns true if the directory is empty */
|
||||
private static boolean dirEmpty(final Path path) throws IOException {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
|
||||
return stream.iterator().hasNext() == false;
|
||||
}
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
/** Returns true if data should be read from the data path that includes the cluster name (ie, it has data in it) */
|
||||
static boolean readFromDataPathWithClusterName(Path dataPathWithClusterName) throws IOException {
|
||||
if (Files.exists(dataPathWithClusterName) == false || // If it doesn't exist
|
||||
Files.isDirectory(dataPathWithClusterName) == false || // Or isn't a directory
|
||||
dirEmpty(dataPathWithClusterName)) { // Or if it's empty
|
||||
// No need to read from cluster-name folder!
|
||||
return false;
|
||||
}
|
||||
// The "nodes" directory inside of the cluster name
|
||||
Path nodesPath = dataPathWithClusterName.resolve(NODES_FOLDER);
|
||||
if (Files.isDirectory(nodesPath)) {
|
||||
// The cluster has data in the "nodes" so we should read from the cluster-named folder for now
|
||||
return true;
|
||||
}
|
||||
// Hey the nodes directory didn't exist, so we can safely use whatever directory we feel appropriate
|
||||
return false;
|
||||
}
|
||||
|
||||
private static void releaseAndNullLocks(Lock[] locks) {
|
||||
for (int i = 0; i < locks.length; i++) {
|
||||
if (locks[i] != null) {
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.index.SegmentInfos;
|
|||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
|
@ -47,6 +48,8 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileNotExists;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.arrayWithSize;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -382,10 +385,10 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
|
||||
assertThat("shard paths with a custom data_path should contain only regular paths",
|
||||
env.availableShardPaths(sid),
|
||||
equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID() + "/0")));
|
||||
equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0")));
|
||||
|
||||
assertThat("index paths uses the regular template",
|
||||
env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID())));
|
||||
env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID())));
|
||||
|
||||
env.close();
|
||||
NodeEnvironment env2 = newNodeEnvironment(dataPaths, "/tmp",
|
||||
|
@ -396,14 +399,57 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
|
||||
assertThat("shard paths with a custom data_path should contain only regular paths",
|
||||
env2.availableShardPaths(sid),
|
||||
equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID() + "/0")));
|
||||
equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID() + "/0")));
|
||||
|
||||
assertThat("index paths uses the regular template",
|
||||
env2.indexPaths(index), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID())));
|
||||
env2.indexPaths(index), equalTo(stringsToPaths(dataPaths, "nodes/0/indices/" + index.getUUID())));
|
||||
|
||||
env2.close();
|
||||
}
|
||||
|
||||
public void testWhetherClusterFolderShouldBeUsed() throws Exception {
|
||||
Path tempNoCluster = createTempDir();
|
||||
String tempDataPathString = tempNoCluster.toAbsolutePath().toString();
|
||||
|
||||
Path tempPath = tempNoCluster.resolve("foo"); // "foo" is the cluster name
|
||||
String tempClusterPathString = tempPath.toAbsolutePath().toString();
|
||||
|
||||
assertFalse("non-existent directory should not be used", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
|
||||
Settings settings = Settings.builder()
|
||||
.put("cluster.name", "foo")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPathString).build();
|
||||
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
|
||||
Path nodeDataPath = env.nodeDataPaths()[0];
|
||||
assertThat(nodeDataPath.toString(), equalTo(tempDataPathString + "/nodes/0"));
|
||||
}
|
||||
IOUtils.rm(tempNoCluster);
|
||||
|
||||
Files.createDirectories(tempPath);
|
||||
assertFalse("empty directory should not be read from", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
|
||||
settings = Settings.builder()
|
||||
.put("cluster.name", "foo")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPathString).build();
|
||||
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
|
||||
Path nodeDataPath = env.nodeDataPaths()[0];
|
||||
assertThat(nodeDataPath.toString(), equalTo(tempDataPathString + "/nodes/0"));
|
||||
}
|
||||
IOUtils.rm(tempNoCluster);
|
||||
|
||||
// Create a directory for the cluster name
|
||||
Files.createDirectories(tempPath.resolve(NodeEnvironment.NODES_FOLDER));
|
||||
assertTrue("there is data in the directory", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
|
||||
settings = Settings.builder()
|
||||
.put("cluster.name", "foo")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPathString).build();
|
||||
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
|
||||
Path nodeDataPath = env.nodeDataPaths()[0];
|
||||
assertThat(nodeDataPath.toString(), equalTo(tempClusterPathString + "/nodes/0"));
|
||||
}
|
||||
}
|
||||
|
||||
/** Converts an array of Strings to an array of Paths, adding an additional child if specified */
|
||||
private Path[] stringsToPaths(String[] strings, String additional) {
|
||||
Path[] locations = new Path[strings.length];
|
||||
|
|
|
@ -8,3 +8,18 @@ there is nothing to worry about since this is only address space consumption
|
|||
and the actual memory usage of Elasticsearch will stay similar to what it was
|
||||
in 2.x. See http://blog.thetaphi.de/2012/07/use-lucenes-mmapdirectory-on-64bit.html
|
||||
for more information.
|
||||
|
||||
=== Path to data on disk
|
||||
|
||||
In prior versions of Elasticsearch, the `path.data` directory included a folder
|
||||
for the cluster name, so that data was in a folder such as
|
||||
`$DATA_DIR/$CLUSTER_NAME/nodes/$nodeOrdinal`. In 5.0 the cluster name as a
|
||||
directory is deprecated. Data will now be stored in
|
||||
`$DATA_DIR/nodes/$nodeOrdinal` if there is no existing data. Upon startup,
|
||||
Elasticsearch will check to see if the cluster folder exists and has data, and
|
||||
will read from it if necessary. In Elasticsearch 6.0 this backwards-compatible
|
||||
behavior will be removed.
|
||||
|
||||
If you are using a multi-cluster setup with both instances of Elasticsearch
|
||||
pointing to the same data path, you will need to add the cluster name to the
|
||||
data path so that different clusters do not overwrite data.
|
||||
|
|
|
@ -68,6 +68,7 @@ public class EvilSecurityTests extends ESTestCase {
|
|||
}
|
||||
|
||||
/** test generated permissions for all configured paths */
|
||||
@SuppressWarnings("deprecation") // needs to check settings for deprecated path
|
||||
public void testEnvironmentPaths() throws Exception {
|
||||
Path path = createTempDir();
|
||||
// make a fake ES home and ensure we only grant permissions to that.
|
||||
|
|
|
@ -63,7 +63,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
|||
assumeTrue("posix filesystem", isPosix);
|
||||
final String[] tempPaths = tmpPaths();
|
||||
Path path = PathUtils.get(randomFrom(tempPaths));
|
||||
Path fooIndex = path.resolve("elasticsearch").resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER)
|
||||
Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER)
|
||||
.resolve("foo");
|
||||
Files.createDirectories(fooIndex);
|
||||
try (PosixPermissionsResetter attr = new PosixPermissionsResetter(fooIndex)) {
|
||||
|
@ -83,7 +83,7 @@ public class NodeEnvironmentEvilTests extends ESTestCase {
|
|||
assumeTrue("posix filesystem", isPosix);
|
||||
final String[] tempPaths = tmpPaths();
|
||||
Path path = PathUtils.get(randomFrom(tempPaths));
|
||||
Path fooIndex = path.resolve("elasticsearch").resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER)
|
||||
Path fooIndex = path.resolve("nodes").resolve("0").resolve(NodeEnvironment.INDICES_FOLDER)
|
||||
.resolve("foo");
|
||||
Path fooShard = fooIndex.resolve("0");
|
||||
Path fooShardIndex = fooShard.resolve("index");
|
||||
|
|
Loading…
Reference in New Issue