Don't create / use the work directory if not needed (for example, on client / non data) nodes, closes #249.
This commit is contained in:
parent
7ba3b3a9eb
commit
a0ead02299
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.ClusterName;
|
|||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -83,6 +84,10 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
|
|||
|
||||
protected abstract boolean accumulateExceptions();
|
||||
|
||||
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
||||
return nodesIds;
|
||||
}
|
||||
|
||||
|
||||
private class AsyncAction {
|
||||
|
||||
|
@ -112,7 +117,7 @@ public abstract class TransportNodesOperationAction<Request extends NodesOperati
|
|||
nodesIds[index++] = node.id();
|
||||
}
|
||||
}
|
||||
this.nodesIds = nodesIds;
|
||||
this.nodesIds = filterNodeIds(clusterState.nodes(), nodesIds);
|
||||
this.responses = new AtomicReferenceArray<Object>(nodesIds.length);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,6 @@ public class Environment {
|
|||
} else {
|
||||
homeFile = new File(System.getProperty("user.dir"));
|
||||
}
|
||||
homeFile.mkdirs();
|
||||
|
||||
if (settings.get("path.conf") != null) {
|
||||
configFile = new File(cleanPath(settings.get("path.conf")));
|
||||
|
@ -81,9 +80,7 @@ public class Environment {
|
|||
} else {
|
||||
workFile = new File(homeFile, "work");
|
||||
}
|
||||
workFile.mkdirs();
|
||||
workWithClusterFile = new File(workFile, ClusterName.clusterNameFromSettings(settings).value());
|
||||
workWithClusterFile.mkdirs();
|
||||
|
||||
if (settings.get("path.logs") != null) {
|
||||
logsFile = new File(cleanPath(settings.get("path.logs")));
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.env;
|
|||
|
||||
import org.apache.lucene.store.Lock;
|
||||
import org.apache.lucene.store.NativeFSLockFactory;
|
||||
import org.elasticsearch.ElasticSearchIllegalStateException;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -48,6 +49,12 @@ public class NodeEnvironment extends AbstractComponent {
|
|||
@Inject public NodeEnvironment(Settings settings, Environment environment) throws IOException {
|
||||
super(settings);
|
||||
|
||||
if (!settings.getAsBoolean("node.data", true) || settings.getAsBoolean("node.client", false)) {
|
||||
nodeFile = null;
|
||||
lock = null;
|
||||
return;
|
||||
}
|
||||
|
||||
Lock lock = null;
|
||||
File dir = null;
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
@ -77,7 +84,14 @@ public class NodeEnvironment extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public boolean hasNodeFile() {
|
||||
return nodeFile != null && lock != null;
|
||||
}
|
||||
|
||||
public File nodeFile() {
|
||||
if (nodeFile == null || lock == null) {
|
||||
throw new ElasticSearchIllegalStateException("node is not configured to store local location");
|
||||
}
|
||||
return nodeFile;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,14 +54,10 @@ public abstract class FsIndexStore extends AbstractIndexStore {
|
|||
|
||||
public FsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, NodeEnvironment nodeEnv) {
|
||||
super(index, indexSettings, indexService);
|
||||
this.location = new File(new File(nodeEnv.nodeFile(), "indices"), index.name());
|
||||
|
||||
if (!location.exists()) {
|
||||
for (int i = 0; i < 5; i++) {
|
||||
if (location.mkdirs()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
this.location = new File(new File(nodeEnv.nodeFile(), "indices"), index.name());
|
||||
} else {
|
||||
this.location = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,22 +66,31 @@ public abstract class FsIndexStore extends AbstractIndexStore {
|
|||
}
|
||||
|
||||
@Override public ByteSizeValue backingStoreTotalSpace() {
|
||||
if (location == null) {
|
||||
return new ByteSizeValue(0);
|
||||
}
|
||||
long totalSpace = location.getTotalSpace();
|
||||
if (totalSpace == 0) {
|
||||
totalSpace = -1;
|
||||
totalSpace = 0;
|
||||
}
|
||||
return new ByteSizeValue(totalSpace);
|
||||
}
|
||||
|
||||
@Override public ByteSizeValue backingStoreFreeSpace() {
|
||||
if (location == null) {
|
||||
return new ByteSizeValue(0);
|
||||
}
|
||||
long usableSpace = location.getUsableSpace();
|
||||
if (usableSpace == 0) {
|
||||
usableSpace = -1;
|
||||
usableSpace = 0;
|
||||
}
|
||||
return new ByteSizeValue(usableSpace);
|
||||
}
|
||||
|
||||
@Override public void deleteUnallocated(ShardId shardId) throws IOException {
|
||||
if (location == null) {
|
||||
return;
|
||||
}
|
||||
if (indexService.hasShard(shardId.id())) {
|
||||
throw new ElasticSearchIllegalStateException(shardId + " allocated, can't be deleted");
|
||||
}
|
||||
|
@ -93,6 +98,9 @@ public abstract class FsIndexStore extends AbstractIndexStore {
|
|||
}
|
||||
|
||||
@Override public StoreFilesMetaData[] listUnallocatedStores() throws IOException {
|
||||
if (location == null) {
|
||||
return new StoreFilesMetaData[0];
|
||||
}
|
||||
File[] shardLocations = location.listFiles();
|
||||
if (shardLocations == null || shardLocations.length == 0) {
|
||||
return new StoreFilesMetaData[0];
|
||||
|
@ -108,6 +116,9 @@ public abstract class FsIndexStore extends AbstractIndexStore {
|
|||
}
|
||||
|
||||
@Override protected StoreFilesMetaData listUnallocatedStoreMetaData(ShardId shardId) throws IOException {
|
||||
if (location == null) {
|
||||
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
|
||||
}
|
||||
File shardIndexLocation = shardIndexLocation(shardId);
|
||||
if (!shardIndexLocation.exists()) {
|
||||
return new StoreFilesMetaData(false, shardId, ImmutableMap.<String, StoreFileMetaData>of());
|
||||
|
@ -139,10 +150,6 @@ public abstract class FsIndexStore extends AbstractIndexStore {
|
|||
return cachedUnallocatedMd5s.get(shardId);
|
||||
}
|
||||
|
||||
public File location() {
|
||||
return location;
|
||||
}
|
||||
|
||||
public File shardLocation(ShardId shardId) {
|
||||
return new File(location, Integer.toString(shardId.id()));
|
||||
}
|
||||
|
|
|
@ -25,7 +25,9 @@ import org.elasticsearch.action.support.nodes.*;
|
|||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.Lists;
|
||||
import org.elasticsearch.common.collect.Sets;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -39,6 +41,7 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
/**
|
||||
|
@ -82,6 +85,18 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
|
|||
return new NodeStoreFilesMetaData();
|
||||
}
|
||||
|
||||
// only list stores on data node
|
||||
|
||||
@Override protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
|
||||
Set<String> onlyDataNodeIds = Sets.newHashSet();
|
||||
for (String nodeId : nodesIds) {
|
||||
if (nodes.nodeExists(nodeId) && nodes.get(nodeId).dataNode()) {
|
||||
onlyDataNodeIds.add(nodeId);
|
||||
}
|
||||
}
|
||||
return onlyDataNodeIds.toArray(new String[onlyDataNodeIds.size()]);
|
||||
}
|
||||
|
||||
@Override protected NodesStoreFilesMetaData newResponse(Request request, AtomicReferenceArray responses) {
|
||||
final List<NodeStoreFilesMetaData> nodeStoreFilesMetaDatas = Lists.newArrayList();
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
|
|
|
@ -72,7 +72,6 @@ public class DumpMonitorService extends AbstractComponent {
|
|||
} else {
|
||||
dumpLocationFile = new File(workFile, "dump");
|
||||
}
|
||||
boolean success = dumpLocationFile.mkdirs();
|
||||
|
||||
Map<String, DumpContributor> contributorMap = newHashMap();
|
||||
if (contributors != null) {
|
||||
|
|
Loading…
Reference in New Issue