Create specific `data` location for indices (move from work), closes #473.

This commit is contained in:
kimchy 2010-11-03 14:41:29 +02:00
parent 92b3ae3f73
commit 6804c02e97
12 changed files with 51 additions and 19 deletions

View File

@ -7,6 +7,8 @@
<content url="file://$MODULE_DIR$/../..">
<excludeFolder url="file://$MODULE_DIR$/../../.gradle" />
<excludeFolder url="file://$MODULE_DIR$/../../build" />
<excludeFolder url="file://$MODULE_DIR$/../../data" />
<excludeFolder url="file://$MODULE_DIR$/../../logs" />
<excludeFolder url="file://$MODULE_DIR$/../../modules/jarjar/build" />
<excludeFolder url="file://$MODULE_DIR$/../../test-output" />
<excludeFolder url="file://$MODULE_DIR$/../../work" />

View File

@ -46,6 +46,10 @@ public class Environment {
private final File workWithClusterFile;
private final File dataFile;
private final File dataWithClusterFile;
private final File configFile;
private final File pluginsFile;
@ -82,10 +86,17 @@ public class Environment {
}
workWithClusterFile = new File(workFile, ClusterName.clusterNameFromSettings(settings).value());
if (settings.get("path.data") != null) {
dataFile = new File(cleanPath(settings.get("path.data")));
} else {
dataFile = new File(homeFile, "data");
}
dataWithClusterFile = new File(dataFile, ClusterName.clusterNameFromSettings(settings).value());
if (settings.get("path.logs") != null) {
logsFile = new File(cleanPath(settings.get("path.logs")));
} else {
logsFile = new File(workFile, "logs");
logsFile = new File(homeFile, "logs");
}
}
@ -103,6 +114,27 @@ public class Environment {
return workFile;
}
/**
* The work location with the cluster name as a sub directory.
*/
public File workWithClusterFile() {
return workWithClusterFile;
}
/**
* The data location.
*/
public File dataFile() {
return dataFile;
}
/**
* The data location with the cluster name as a sub directory.
*/
public File dataWithClusterFile() {
return dataWithClusterFile;
}
/**
* The config location.
*/
@ -114,10 +146,6 @@ public class Environment {
return pluginsFile;
}
public File workWithClusterFile() {
return workWithClusterFile;
}
public File logsFile() {
return logsFile;
}

View File

@ -55,7 +55,7 @@ public class NodeEnvironment extends AbstractComponent {
File dir = null;
int localNodeId = -1;
for (int i = 0; i < 100; i++) {
dir = new File(new File(environment.workWithClusterFile(), "nodes"), Integer.toString(i));
dir = new File(new File(environment.dataWithClusterFile(), "nodes"), Integer.toString(i));
if (!dir.exists()) {
dir.mkdirs();
}
@ -91,7 +91,7 @@ public class NodeEnvironment extends AbstractComponent {
return nodeFile != null && lock != null;
}
public File nodeLocation() {
public File nodeDataLocation() {
if (nodeFile == null || lock == null) {
throw new ElasticSearchIllegalStateException("node is not configured to store local location");
}

View File

@ -47,7 +47,7 @@ public class FsGateway extends BlobStoreGateway {
String location = componentSettings.get("location");
if (location == null) {
logger.warn("using local fs location for gateway, should be changed to be a shared location across nodes");
gatewayFile = new File(environment.workFile(), "gateway");
gatewayFile = new File(environment.dataFile(), "gateway");
} else {
gatewayFile = new File(location);
}

View File

@ -208,7 +208,7 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
}
@Override public void reset() throws Exception {
FileSystemUtils.deleteRecursively(nodeEnv.nodeLocation());
FileSystemUtils.deleteRecursively(nodeEnv.nodeDataLocation());
}
@Override public void clusterChanged(final ClusterChangedEvent event) {
@ -346,7 +346,7 @@ public class LocalGateway extends AbstractLifecycleComponent<Gateway> implements
location = null;
} else {
// create the location where the state will be stored
this.location = new File(nodeEnv.nodeLocation(), "_state");
this.location = new File(nodeEnv.nodeDataLocation(), "_state");
this.location.mkdirs();
if (clusterService.localNode().masterNode()) {

View File

@ -43,7 +43,7 @@ public abstract class FsIndexStore extends AbstractIndexStore {
public FsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, NodeEnvironment nodeEnv) {
super(index, indexSettings, indexService);
if (nodeEnv.hasNodeFile()) {
this.location = new File(new File(nodeEnv.nodeLocation(), "indices"), index.name());
this.location = new File(new File(nodeEnv.nodeDataLocation(), "indices"), index.name());
} else {
this.location = null;
}

View File

@ -60,7 +60,7 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog
@Inject public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, NodeEnvironment nodeEnv) {
super(shardId, indexSettings);
this.location = new File(new File(new File(new File(nodeEnv.nodeLocation(), "indices"), shardId.index().name()), Integer.toString(shardId.id())), "translog");
this.location = new File(new File(new File(new File(nodeEnv.nodeDataLocation(), "indices"), shardId.index().name()), Integer.toString(shardId.id())), "translog");
this.location.mkdirs();
this.useStream = componentSettings.getAsBoolean("use_stream", false);
}

View File

@ -153,7 +153,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
if (!storeType.contains("fs")) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
File indexFile = new File(new File(new File(new File(nodeEnv.nodeLocation(), "indices"), shardId.index().name()), Integer.toString(shardId.id())), "index");
File indexFile = new File(new File(new File(new File(nodeEnv.nodeDataLocation(), "indices"), shardId.index().name()), Integer.toString(shardId.id())), "index");
if (!indexFile.exists()) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}

View File

@ -100,6 +100,8 @@ public class InternalSettingsPerparer {
settingsBuilder.put("path.home", cleanPath(environment.homeFile().getAbsolutePath()));
settingsBuilder.put("path.work", cleanPath(environment.workFile().getAbsolutePath()));
settingsBuilder.put("path.work_with_cluster", cleanPath(environment.workWithClusterFile().getAbsolutePath()));
settingsBuilder.put("path.data", cleanPath(environment.dataFile().getAbsolutePath()));
settingsBuilder.put("path.data_with_cluster", cleanPath(environment.dataWithClusterFile().getAbsolutePath()));
settingsBuilder.put("path.logs", cleanPath(environment.logsFile().getAbsolutePath()));
v1 = settingsBuilder.build();

View File

@ -157,8 +157,8 @@ public abstract class AbstractSimpleIndexGatewayTests extends AbstractNodesTests
logger.info("Closing the server");
closeNode("server1");
logger.info("Clearing cluster work dir, so there will be a full recovery from the gateway");
FileSystemUtils.deleteRecursively(environment.workWithClusterFile());
logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
FileSystemUtils.deleteRecursively(environment.dataWithClusterFile());
logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
startNode("server1");
@ -264,8 +264,8 @@ public abstract class AbstractSimpleIndexGatewayTests extends AbstractNodesTests
logger.info("--> closing the server");
closeNode("server1");
if (fullRecovery) {
logger.info("Clearing cluster work dir, so there will be a full recovery from the gateway");
FileSystemUtils.deleteRecursively(environment.workWithClusterFile());
logger.info("Clearing cluster data dir, so there will be a full recovery from the gateway");
FileSystemUtils.deleteRecursively(environment.dataWithClusterFile());
logger.info("Starting the server, should recover from the gateway (both index and translog) without reusing work dir");
}

View File

@ -197,7 +197,7 @@ public class FullRestartStressTest {
client.close();
for (Node node : nodes) {
File nodeWork = ((InternalNode) node).injector().getInstance(NodeEnvironment.class).nodeLocation();
File nodeWork = ((InternalNode) node).injector().getInstance(NodeEnvironment.class).nodeDataLocation();
node.close();
if (clearNodeWork && !settings.get("gateway.type").equals("local")) {
FileSystemUtils.deleteRecursively(nodeWork);

View File

@ -140,7 +140,7 @@ public class RollingRestartStressTest {
// start doing the rolling restart
int nodeIndex = 0;
while (true) {
File nodeWork = ((InternalNode) nodes[nodeIndex]).injector().getInstance(NodeEnvironment.class).nodeLocation();
File nodeWork = ((InternalNode) nodes[nodeIndex]).injector().getInstance(NodeEnvironment.class).nodeDataLocation();
nodes[nodeIndex].close();
if (clearNodeWork) {
FileSystemUtils.deleteRecursively(nodeWork);