Don't create / use the work directory if not needed (for example, on client / non data) nodes, closes #249.

This commit is contained in:
kimchy 2010-07-10 14:29:39 +03:00
parent 7ba3b3a9eb
commit a0ead02299
6 changed files with 56 additions and 19 deletions

View File

@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -83,6 +84,10 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
protected abstract boolean accumulateExceptions(); protected abstract boolean accumulateExceptions();
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
return nodesIds;
}
private class AsyncAction { private class AsyncAction {
@ -112,7 +117,7 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
nodesIds[index++] = node.id(); nodesIds[index++] = node.id();
} }
} }
this.nodesIds = nodesIds; this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
this.responses = new AtomicReferenceArray<Object>(nodesIds.length); this.responses = new AtomicReferenceArray<Object>(nodesIds.length);
} }

View File

@ -62,7 +62,6 @@ public class Environment {
} else { } else {
homeFile = new File(System.getProperty("user.dir")); homeFile = new File(System.getProperty("user.dir"));
} }
homeFile.mkdirs();
if (settings.get("path.conf") != null) { if (settings.get("path.conf") != null) {
configFile = new File(cleanPath(settings.get("path.conf"))); configFile = new File(cleanPath(settings.get("path.conf")));
@ -81,9 +80,7 @@ public class Environment {
} else { } else {
workFile = new File(homeFile, "work"); workFile = new File(homeFile, "work");
} }
workFile.mkdirs();
workWithClusterFile = new File(workFile, ClusterName.clusterNameFromSettings(settings).value()); workWithClusterFile = new File(workFile, ClusterName.clusterNameFromSettings(settings).value());
workWithClusterFile.mkdirs();
if (settings.get("path.logs") != null) { if (settings.get("path.logs") != null) {
logsFile = new File(cleanPath(settings.get("path.logs"))); logsFile = new File(cleanPath(settings.get("path.logs")));

View File

@ -21,6 +21,7 @@ package org.elasticsearch.env;
import org.apache.lucene.store.Lock; import org.apache.lucene.store.Lock;
import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.NativeFSLockFactory;
import org.elasticsearch.ElasticSearchIllegalStateException;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -48,6 +49,12 @@ public class NodeEnvironment extends AbstractComponent {
@Inject public NodeEnvironment(Settings settings, Environment environment) throws IOException { @Inject public NodeEnvironment(Settings settings, Environment environment) throws IOException {
super(settings); super(settings);
if (!settings.getAsBoolean("node.data", true) || settings.getAsBoolean("node.client", false)) {
nodeFile = null;
lock = null;
return;
}
Lock lock = null; Lock lock = null;
File dir = null; File dir = null;
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
@ -77,7 +84,14 @@ public class NodeEnvironment extends AbstractComponent {
} }
} }
public boolean hasNodeFile() {
return nodeFile != null && lock != null;
}
public File nodeFile() { public File nodeFile() {
if (nodeFile == null || lock == null) {
throw new ElasticSearchIllegalStateException("node is not configured to store local location");
}
return nodeFile; return nodeFile;
} }

View File

@ -54,14 +54,10 @@ public abstract class FsIndexStore extends AbstractIndexStore {
public FsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, NodeEnvironment nodeEnv) { public FsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, NodeEnvironment nodeEnv) {
super(index, indexSettings, indexService); super(index, indexSettings, indexService);
this.location = new File(new File(nodeEnv.nodeFile(), "indices"), index.name()); if (nodeEnv.hasNodeFile()) {
this.location = new File(new File(nodeEnv.nodeFile(), "indices"), index.name());
if (!location.exists()) { } else {
for (int i = 0; i < 5; i++) { this.location = null;
if (location.mkdirs()) {
break;
}
}
} }
} }
@ -70,22 +66,31 @@ public abstract class FsIndexStore extends AbstractIndexStore {
} }
@Override public ByteSizeValue backingStoreTotalSpace() { @Override public ByteSizeValue backingStoreTotalSpace() {
if (location == null) {
return new ByteSizeValue(0);
}
long totalSpace = location.getTotalSpace(); long totalSpace = location.getTotalSpace();
if (totalSpace == 0) { if (totalSpace == 0) {
totalSpace = -1; totalSpace = 0;
} }
return new ByteSizeValue(totalSpace); return new ByteSizeValue(totalSpace);
} }
@Override public ByteSizeValue backingStoreFreeSpace() { @Override public ByteSizeValue backingStoreFreeSpace() {
if (location == null) {
return new ByteSizeValue(0);
}
long usableSpace = location.getUsableSpace(); long usableSpace = location.getUsableSpace();
if (usableSpace == 0) { if (usableSpace == 0) {
usableSpace = -1; usableSpace = 0;
} }
return new ByteSizeValue(usableSpace); return new ByteSizeValue(usableSpace);
} }
@Override public void deleteUnallocated(ShardId shardId) throws IOException { @Override public void deleteUnallocated(ShardId shardId) throws IOException {
if (location == null) {
return;
}
if (indexService.hasShard(shardId.id())) { if (indexService.hasShard(shardId.id())) {
throw new ElasticSearchIllegalStateException(shardId + " allocated, can't be deleted"); throw new ElasticSearchIllegalStateException(shardId + " allocated, can't be deleted");
} }
@ -93,6 +98,9 @@ public abstract class FsIndexStore extends AbstractIndexStore {
} }
@Override public StoreFilesMetaData[] listUnallocatedStores() throws IOException { @Override public StoreFilesMetaData[] listUnallocatedStores() throws IOException {
if (location == null) {
return new StoreFilesMetaData[0];
}
File[] shardLocations = location.listFiles(); File[] shardLocations = location.listFiles();
if (shardLocations == null || shardLocations.length == 0) { if (shardLocations == null || shardLocations.length == 0) {
return new StoreFilesMetaData[0]; return new StoreFilesMetaData[0];
@ -108,6 +116,9 @@ public abstract class FsIndexStore extends AbstractIndexStore {
} }
@Override protected StoreFilesMetaData listUnallocatedStoreMetaData(ShardId shardId) throws IOException { @Override protected StoreFilesMetaData listUnallocatedStoreMetaData(ShardId shardId) throws IOException {
if (location == null) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
}
File shardIndexLocation = shardIndexLocation(shardId); File shardIndexLocation = shardIndexLocation(shardId);
if (!shardIndexLocation.exists()) { if (!shardIndexLocation.exists()) {
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of()); return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
@ -139,10 +150,6 @@ public abstract class FsIndexStore extends AbstractIndexStore {
return cachedUnallocatedMd5s.get(shardId); return cachedUnallocatedMd5s.get(shardId);
} }
public File location() {
return location;
}
public File shardLocation(ShardId shardId) { public File shardLocation(ShardId shardId) {
return new File(location, Integer.toString(shardId.id())); return new File(location, Integer.toString(shardId.id()));
} }

View File

@ -25,7 +25,9 @@ import org.elasticsearch.action.support.nodes.*;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.collect.Lists; import org.elasticsearch.common.collect.Lists;
import org.elasticsearch.common.collect.Sets;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -39,6 +41,7 @@ import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceArray; import java.util.concurrent.atomic.AtomicReferenceArray;
/** /**
@ -82,6 +85,18 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
return new NodeStoreFilesMetaData(); return new NodeStoreFilesMetaData();
} }
// only list stores on data node
@Override protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
Set<String> onlyDataNodeIds = Sets.newHashSet();
for (String nodeId : nodesIds) {
if (nodes.nodeExists(nodeId) && nodes.get(nodeId).dataNode()) {
onlyDataNodeIds.add(nodeId);
}
}
return onlyDataNodeIds.toArray(new String[onlyDataNodeIds.size()]);
}
@Override protected NodesStoreFilesMetaData newResponse(Request request, AtomicReferenceArray responses) { @Override protected NodesStoreFilesMetaData newResponse(Request request, AtomicReferenceArray responses) {
final List<NodeStoreFilesMetaData> nodeStoreFilesMetaDatas = Lists.newArrayList(); final List<NodeStoreFilesMetaData> nodeStoreFilesMetaDatas = Lists.newArrayList();
for (int i = 0; i < responses.length(); i++) { for (int i = 0; i < responses.length(); i++) {

View File

@ -72,7 +72,6 @@ public class DumpMonitorService extends AbstractComponent {
} else { } else {
dumpLocationFile = new File(workFile, "dump"); dumpLocationFile = new File(workFile, "dump");
} }
boolean success = dumpLocationFile.mkdirs();
Map<String, DumpContributor> contributorMap = newHashMap(); Map<String, DumpContributor> contributorMap = newHashMap();
if (contributors != null) { if (contributors != null) {