diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml
index 72e10cb9398..13b983c8a5e 100644
--- a/buildSrc/src/main/resources/checkstyle_suppressions.xml
+++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml
@@ -435,14 +435,12 @@
-
-
@@ -602,7 +600,6 @@
-
@@ -1119,7 +1116,6 @@
-
@@ -1140,7 +1136,6 @@
-
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
index 2a8984e59d4..2cb4fb6450a 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java
@@ -135,6 +135,8 @@ final class Bootstrap {
JNANatives.trySetMaxNumberOfThreads();
+ JNANatives.trySetMaxSizeVirtualMemory();
+
// init lucene random seed. it will use /dev/urandom where available:
StringHelper.randomId();
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
index 433dd4498a4..5f4e980d68e 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/BootstrapCheck.java
@@ -123,6 +123,7 @@ final class BootstrapCheck {
if (Constants.LINUX) {
checks.add(new MaxNumberOfThreadsCheck());
}
+ checks.add(new MaxSizeVirtualMemoryCheck());
return Collections.unmodifiableList(checks);
}
@@ -249,4 +250,32 @@ final class BootstrapCheck {
}
+ static class MaxSizeVirtualMemoryCheck implements Check {
+
+ @Override
+ public boolean check() {
+ return getMaxSizeVirtualMemory() != Long.MIN_VALUE && getMaxSizeVirtualMemory() != getRlimInfinity();
+ }
+
+ @Override
+ public String errorMessage() {
+ return String.format(
+ Locale.ROOT,
+ "max size virtual memory [%d] for user [%s] likely too low, increase to [unlimited]",
+ getMaxSizeVirtualMemory(),
+ BootstrapInfo.getSystemProperties().get("user.name"));
+ }
+
+ // visible for testing
+ long getRlimInfinity() {
+ return JNACLibrary.RLIM_INFINITY;
+ }
+
+ // visible for testing
+ long getMaxSizeVirtualMemory() {
+ return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
+ }
+
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java
index 573f3d5be3e..5d1369b21f7 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/JNACLibrary.java
@@ -39,6 +39,7 @@ final class JNACLibrary {
public static final int MCL_CURRENT = 1;
public static final int ENOMEM = 12;
public static final int RLIMIT_MEMLOCK = Constants.MAC_OS_X ? 6 : 8;
+ public static final int RLIMIT_AS = Constants.MAC_OS_X ? 5 : 9;
public static final long RLIM_INFINITY = Constants.MAC_OS_X ? 9223372036854775807L : -1L;
static {
diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java
index 0ea8da6a9be..e55d38a0f72 100644
--- a/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java
+++ b/core/src/main/java/org/elasticsearch/bootstrap/JNANatives.java
@@ -52,6 +52,8 @@ class JNANatives {
// the user ID that owns the running Elasticsearch process
static long MAX_NUMBER_OF_THREADS = -1;
+ static long MAX_SIZE_VIRTUAL_MEMORY = Long.MIN_VALUE;
+
static void tryMlockall() {
int errno = Integer.MIN_VALUE;
String errMsg = null;
@@ -124,6 +126,17 @@ class JNANatives {
}
}
+ static void trySetMaxSizeVirtualMemory() {
+ if (Constants.LINUX || Constants.MAC_OS_X) {
+ final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit();
+ if (JNACLibrary.getrlimit(JNACLibrary.RLIMIT_AS, rlimit) == 0) {
+ MAX_SIZE_VIRTUAL_MEMORY = rlimit.rlim_cur.longValue();
+ } else {
+ logger.warn("unable to retrieve max size virtual memory [" + JNACLibrary.strerror(Native.getLastError()) + "]");
+ }
+ }
+ }
+
static String rlimitToString(long value) {
assert Constants.LINUX || Constants.MAC_OS_X;
if (value == JNACLibrary.RLIM_INFINITY) {
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
index 8bbd6f09d7e..3101a2c04cc 100644
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java
@@ -20,15 +20,12 @@ package org.elasticsearch.cluster.metadata;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.analysis.Analyzer;
-import org.apache.lucene.misc.IndexMergeTool;
import org.elasticsearch.Version;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
-import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
-import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.mapper.MapperService;
@@ -36,10 +33,7 @@ import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.indices.mapper.MapperRegistry;
import java.util.Collections;
-import java.util.Map;
-import java.util.Set;
-import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
/**
@@ -53,13 +47,13 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
public class MetaDataIndexUpgradeService extends AbstractComponent {
private final MapperRegistry mapperRegistry;
- private final IndexScopedSettings indexScopedSettigns;
+ private final IndexScopedSettings indexScopedSettings;
@Inject
public MetaDataIndexUpgradeService(Settings settings, MapperRegistry mapperRegistry, IndexScopedSettings indexScopedSettings) {
super(settings);
this.mapperRegistry = mapperRegistry;
- this.indexScopedSettigns = indexScopedSettings;
+ this.indexScopedSettings = indexScopedSettings;
}
/**
@@ -182,39 +176,13 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
}
}
- private static final String ARCHIVED_SETTINGS_PREFIX = "archived.";
-
IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) {
- Settings settings = indexMetaData.getSettings();
- Settings.Builder builder = Settings.builder();
- boolean changed = false;
- for (Map.Entry entry : settings.getAsMap().entrySet()) {
- try {
- Setting> setting = indexScopedSettigns.get(entry.getKey());
- if (setting != null) {
- setting.get(settings);
- builder.put(entry.getKey(), entry.getValue());
- } else {
- if (indexScopedSettigns.isPrivateSetting(entry.getKey()) || entry.getKey().startsWith(ARCHIVED_SETTINGS_PREFIX)) {
- builder.put(entry.getKey(), entry.getValue());
- } else {
- changed = true;
- logger.warn("[{}] found unknown index setting: {} value: {} - archiving", indexMetaData.getIndex(), entry.getKey(), entry.getValue());
- // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
- // but we want users to be aware that some of their setting are broken and they can research why and what they need to do to replace them.
- builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
- }
- }
- } catch (IllegalArgumentException ex) {
- changed = true;
- logger.warn("[{}] found invalid index setting: {} value: {} - archiving",ex, indexMetaData.getIndex(), entry.getKey(), entry.getValue());
- // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
- // but we want users to be aware that some of their setting sare broken and they can research why and what they need to do to replace them.
- builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
- }
+ final Settings settings = indexMetaData.getSettings();
+ final Settings upgrade = indexScopedSettings.archiveUnknownOrBrokenSettings(settings);
+ if (upgrade != settings) {
+ return IndexMetaData.builder(indexMetaData).settings(upgrade).build();
+ } else {
+ return indexMetaData;
}
-
- return changed ? IndexMetaData.builder(indexMetaData).settings(builder.build()).build() : indexMetaData;
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
index fa9b3492685..54e8535b57e 100644
--- a/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
+++ b/core/src/main/java/org/elasticsearch/cluster/service/ClusterService.java
@@ -1000,4 +1000,8 @@ public class ClusterService extends AbstractLifecycleComponent {
}
}
}
+
+ public ClusterSettings getClusterSettings() {
+ return clusterSettings;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
index 410adc82da1..358706c9d3f 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java
@@ -48,6 +48,7 @@ import java.util.stream.Collectors;
* This service offers transactional application of updates settings.
*/
public abstract class AbstractScopedSettings extends AbstractComponent {
+ public static final String ARCHIVED_SETTINGS_PREFIX = "archived.";
private Settings lastSettingsApplied = Settings.EMPTY;
private final List> settingUpdaters = new CopyOnWriteArrayList<>();
private final Map> complexMatchers;
@@ -478,4 +479,53 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
}
return null;
}
+
+ /**
+ * Archives broken or unknown settings. Any setting that is not recognized or fails
+ * validation will be archived. This means the setting is prefixed with {@value ARCHIVED_SETTINGS_PREFIX}
+ * and remains in the settings object. This can be used to detect broken settings via APIs.
+ */
+ public Settings archiveUnknownOrBrokenSettings(Settings settings) {
+ Settings.Builder builder = Settings.builder();
+ boolean changed = false;
+ for (Map.Entry entry : settings.getAsMap().entrySet()) {
+ try {
+ Setting> setting = get(entry.getKey());
+ if (setting != null) {
+ setting.get(settings);
+ builder.put(entry.getKey(), entry.getValue());
+ } else {
+ if (entry.getKey().startsWith(ARCHIVED_SETTINGS_PREFIX) || isPrivateSetting(entry.getKey())) {
+ builder.put(entry.getKey(), entry.getValue());
+ } else {
+ changed = true;
+ logger.warn("found unknown setting: {} value: {} - archiving", entry.getKey(), entry.getValue());
+ // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
+ // but we want users to be aware that some of their setting are broken and they can research why and what they need to do to replace them.
+ builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
+ }
+ }
+ } catch (IllegalArgumentException ex) {
+ changed = true;
+ logger.warn("found invalid setting: {} value: {} - archiving",ex , entry.getKey(), entry.getValue());
+ // we put them back in here such that tools can check from the outside if there are any indices with broken settings. The setting can remain there
+ // but we want users to be aware that some of their setting sare broken and they can research why and what they need to do to replace them.
+ builder.put(ARCHIVED_SETTINGS_PREFIX + entry.getKey(), entry.getValue());
+ }
+ }
+ if (changed) {
+ return builder.build();
+ } else {
+ return settings;
+ }
+ }
+
+ /**
+ * Returns true
iff the setting is a private setting ie. it should be treated as valid even though it has no internal
+ * representation. Otherwise false
+ */
+ // TODO this should be replaced by Setting.Property.HIDDEN or something like this.
+ protected boolean isPrivateSetting(String key) {
+ return false;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
index 322ac4de799..fb498283d7b 100644
--- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
+++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java
@@ -135,7 +135,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
EngineConfig.INDEX_CODEC_SETTING,
- IndexWarmer.INDEX_NORMS_LOADING_SETTING,
// validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", (s) -> {
Map groups = s.getAsGroups();
@@ -171,7 +170,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
super.validateSettingKey(setting);
}
- public boolean isPrivateSetting(String key) {
+ @Override
+ protected final boolean isPrivateSetting(String key) {
switch (key) {
case IndexMetaData.SETTING_CREATION_DATE:
case IndexMetaData.SETTING_INDEX_UUID:
diff --git a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java
index 3640d3e4bec..221dc234511 100644
--- a/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java
+++ b/core/src/main/java/org/elasticsearch/common/util/IndexFolderUpgrader.java
@@ -24,12 +24,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.common.xcontent.XContentBuilder;
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.NodeEnvironment;
-import org.elasticsearch.gateway.MetaDataStateFormat;
-import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
index 8ca53af186c..2229d45840b 100644
--- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
+++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java
@@ -961,6 +961,10 @@ public final class XContentBuilder implements BytesStream, Releasable {
return this;
}
+ public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, TimeValue timeValue) throws IOException {
+ return timeValueField(rawFieldName, readableFieldName, timeValue.millis(), TimeUnit.MILLISECONDS);
+ }
+
public XContentBuilder timeValueField(String rawFieldName, String readableFieldName, long rawTime, TimeUnit timeUnit) throws
IOException {
if (humanReadable) {
diff --git a/core/src/main/java/org/elasticsearch/gateway/Gateway.java b/core/src/main/java/org/elasticsearch/gateway/Gateway.java
index c879e6ab710..b2cb2d11079 100644
--- a/core/src/main/java/org/elasticsearch/gateway/Gateway.java
+++ b/core/src/main/java/org/elasticsearch/gateway/Gateway.java
@@ -19,6 +19,8 @@
package org.elasticsearch.gateway;
+import com.carrotsearch.hppc.ObjectFloatHashMap;
+import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.cluster.ClusterChangedEvent;
@@ -28,9 +30,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.env.NodeEnvironment;
+import org.elasticsearch.index.Index;
import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.indices.IndicesService;
@@ -84,6 +88,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
}
}
+ ObjectFloatHashMap indices = new ObjectFloatHashMap<>();
MetaData electedGlobalState = null;
int found = 0;
for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) {
@@ -96,34 +101,68 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
} else if (nodeState.metaData().version() > electedGlobalState.version()) {
electedGlobalState = nodeState.metaData();
}
+ for (ObjectCursor cursor : nodeState.metaData().indices().values()) {
+ indices.addTo(cursor.value.getIndex(), 1);
+ }
}
if (found < requiredAllocation) {
listener.onFailure("found [" + found + "] metadata states, required [" + requiredAllocation + "]");
return;
}
- // verify index metadata
- MetaData.Builder metaDataBuilder = MetaData.builder(electedGlobalState);
- for (IndexMetaData indexMetaData : electedGlobalState) {
- try {
- if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
- // verify that we can actually create this index - if not we recover it as closed with lots of warn logs
- indicesService.verifyIndexMetadata(nodeServicesProvider, indexMetaData);
+ // update the global state, and clean the indices, we elect them in the next phase
+ MetaData.Builder metaDataBuilder = MetaData.builder(electedGlobalState).removeAllIndices();
+
+ assert !indices.containsKey(null);
+ final Object[] keys = indices.keys;
+ for (int i = 0; i < keys.length; i++) {
+ if (keys[i] != null) {
+ Index index = (Index) keys[i];
+ IndexMetaData electedIndexMetaData = null;
+ int indexMetaDataCount = 0;
+ for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) {
+ if (nodeState.metaData() == null) {
+ continue;
+ }
+ IndexMetaData indexMetaData = nodeState.metaData().index(index);
+ if (indexMetaData == null) {
+ continue;
+ }
+ if (electedIndexMetaData == null) {
+ electedIndexMetaData = indexMetaData;
+ } else if (indexMetaData.getVersion() > electedIndexMetaData.getVersion()) {
+ electedIndexMetaData = indexMetaData;
+ }
+ indexMetaDataCount++;
+ }
+ if (electedIndexMetaData != null) {
+ if (indexMetaDataCount < requiredAllocation) {
+ logger.debug("[{}] found [{}], required [{}], not adding", index, indexMetaDataCount, requiredAllocation);
+ } // TODO if this logging statement is correct then we are missing an else here
+ try {
+ if (electedIndexMetaData.getState() == IndexMetaData.State.OPEN) {
+ // verify that we can actually create this index - if not we recover it as closed with lots of warn logs
+ indicesService.verifyIndexMetadata(nodeServicesProvider, electedIndexMetaData);
+ }
+ } catch (Exception e) {
+ logger.warn("recovering index {} failed - recovering as closed", e, electedIndexMetaData.getIndex());
+ electedIndexMetaData = IndexMetaData.builder(electedIndexMetaData).state(IndexMetaData.State.CLOSE).build();
+ }
+
+ metaDataBuilder.put(electedIndexMetaData, false);
}
- } catch (Exception e) {
- logger.warn("recovering index {} failed - recovering as closed", e, indexMetaData.getIndex());
- indexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).build();
- metaDataBuilder.put(indexMetaData, true);
}
}
+ final ClusterSettings clusterSettings = clusterService.getClusterSettings();
+ metaDataBuilder.persistentSettings(clusterSettings.archiveUnknownOrBrokenSettings(metaDataBuilder.persistentSettings()));
+ metaDataBuilder.transientSettings(clusterSettings.archiveUnknownOrBrokenSettings(metaDataBuilder.transientSettings()));
ClusterState.Builder builder = ClusterState.builder(clusterService.state().getClusterName());
builder.metaData(metaDataBuilder);
listener.onSuccess(builder.build());
}
-
public void reset() throws Exception {
try {
Path[] dataPaths = nodeEnv.nodeDataPaths();
- logger.trace("removing node data paths: [{}]", (Object) dataPaths);
+ logger.trace("removing node data paths: [{}]", (Object)dataPaths);
IOUtils.rm(dataPaths);
} catch (Exception ex) {
logger.debug("failed to delete shard locations", ex);
diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
index 1da82468997..867b7420107 100644
--- a/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
+++ b/core/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java
@@ -234,8 +234,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
// We successfully checked all indices for backward compatibility and found no non-upgradable indices, which
// means the upgrade can continue. Now it's safe to overwrite index metadata with the new version.
for (IndexMetaData indexMetaData : updateIndexMetaData) {
- // since we still haven't upgraded the index folders, we write index state in the old folder
- metaStateService.writeIndex("upgrade", indexMetaData, nodeEnv.resolveIndexFolder(indexMetaData.getIndex().getUUID()));
+ // since we upgraded the index folders already, write index state in the upgraded index folder
+ metaStateService.writeIndex("upgrade", indexMetaData);
}
}
diff --git a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java
index 0edfb563174..89192c47d09 100644
--- a/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java
+++ b/core/src/main/java/org/elasticsearch/gateway/MetaStateService.java
@@ -121,18 +121,11 @@ public class MetaStateService extends AbstractComponent {
* Writes the index state.
*/
void writeIndex(String reason, IndexMetaData indexMetaData) throws IOException {
- writeIndex(reason, indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex()));
- }
-
- /**
- * Writes the index state in locations
, use {@link #writeGlobalState(String, MetaData)}
- * to write index state in index paths
- */
- void writeIndex(String reason, IndexMetaData indexMetaData, Path[] locations) throws IOException {
final Index index = indexMetaData.getIndex();
logger.trace("[{}] writing state, reason [{}]", index, reason);
try {
- IndexMetaData.FORMAT.write(indexMetaData, indexMetaData.getVersion(), locations);
+ IndexMetaData.FORMAT.write(indexMetaData, indexMetaData.getVersion(),
+ nodeEnv.indexPaths(indexMetaData.getIndex()));
} catch (Throwable ex) {
logger.warn("[{}]: failed to write index state", ex, index);
throw new IOException("failed to write state for [" + index + "]", ex);
diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java
index bb73e212a77..b2c218ae10d 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexService.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexService.java
@@ -44,6 +44,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
@@ -57,7 +58,6 @@ import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.engine.EngineFactory;
-import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
@@ -114,6 +114,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
private volatile AsyncRefreshTask refreshTask;
private volatile AsyncTranslogFSync fsyncTask;
private final SearchSlowLog searchSlowLog;
+ private final ThreadPool threadPool;
+ private final BigArrays bigArrays;
public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv,
SimilarityService similarityService,
@@ -132,9 +134,13 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
this.indexSettings = indexSettings;
this.analysisService = registry.build(indexSettings);
this.similarityService = similarityService;
- this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, IndexService.this::newQueryShardContext);
- this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, nodeServicesProvider.getCircuitBreakerService(), mapperService);
+ this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry,
+ IndexService.this::newQueryShardContext);
+ this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache,
+ nodeServicesProvider.getCircuitBreakerService(), mapperService);
this.shardStoreDeleter = shardStoreDeleter;
+ this.bigArrays = nodeServicesProvider.getBigArrays();
+ this.threadPool = nodeServicesProvider.getThreadPool();
this.eventListener = eventListener;
this.nodeEnv = nodeEnv;
this.nodeServicesProvider = nodeServicesProvider;
@@ -142,7 +148,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
indexFieldData.setListener(new FieldDataCacheListener(this));
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this));
PercolatorQueryCache percolatorQueryCache = new PercolatorQueryCache(indexSettings, IndexService.this::newQueryShardContext);
- this.warmer = new IndexWarmer(indexSettings.getSettings(), nodeServicesProvider.getThreadPool(), bitsetFilterCache.createListener(nodeServicesProvider.getThreadPool()), percolatorQueryCache.createListener(nodeServicesProvider.getThreadPool()));
+ this.warmer = new IndexWarmer(indexSettings.getSettings(), threadPool,
+ bitsetFilterCache.createListener(threadPool),
+ percolatorQueryCache.createListener(threadPool));
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache, percolatorQueryCache);
this.engineFactory = engineFactory;
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
@@ -232,7 +240,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
}
} finally {
- IOUtils.close(bitsetFilterCache, indexCache, mapperService, indexFieldData, analysisService, refreshTask, fsyncTask, cache().getPercolatorQueryCache());
+ IOUtils.close(bitsetFilterCache, indexCache, mapperService, indexFieldData, analysisService, refreshTask, fsyncTask,
+ cache().getPercolatorQueryCache());
}
}
}
@@ -302,7 +311,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
dataPathToShardCount.put(dataPath, curCount + 1);
}
- path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(),
+ path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings,
+ routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE
+ ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(),
dataPathToShardCount);
logger.debug("{} creating using a new path [{}]", shardId, path);
} else {
@@ -317,17 +328,22 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
- final Engine.Warmer engineWarmer = (searcher, toLevel) -> {
+ final Engine.Warmer engineWarmer = (searcher) -> {
IndexShard shard = getShardOrNull(shardId.getId());
if (shard != null) {
- warmer.warm(searcher, shard, IndexService.this.indexSettings, toLevel);
+ warmer.warm(searcher, shard, IndexService.this.indexSettings);
}
};
- store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId)));
+ store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock,
+ new StoreCloseListener(shardId, canDeleteShardContent, () -> eventListener.onStoreClosed(shardId)));
if (useShadowEngine(primary, indexSettings)) {
- indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer); // no indexing listeners - shadow engines don't index
+ indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService,
+ indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, searchSlowLog, engineWarmer);
+ // no indexing listeners - shadow engines don't index
} else {
- indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer, listeners);
+ indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService,
+ indexFieldData, engineFactory, eventListener, searcherWrapper, threadPool, bigArrays, searchSlowLog, engineWarmer,
+ listeners);
}
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
eventListener.afterIndexShardCreated(indexShard);
@@ -372,7 +388,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
// and close the shard so no operations are allowed to it
if (indexShard != null) {
try {
- final boolean flushEngine = deleted.get() == false && closed.get(); // only flush we are we closed (closed index or shutdown) and if we are not deleted
+ // only flush we are we closed (closed index or shutdown) and if we are not deleted
+ final boolean flushEngine = deleted.get() == false && closed.get();
indexShard.close(reason, flushEngine);
} catch (Throwable e) {
logger.debug("[{}] failed to close index shard", e, shardId);
@@ -419,7 +436,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
/**
- * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via {@link QueryShardContext#setTypes(String...)}
+ * Creates a new QueryShardContext. The context has not types set yet, if types are required set them via
+ * {@link QueryShardContext#setTypes(String...)}
*/
public QueryShardContext newQueryShardContext() {
return new QueryShardContext(
@@ -429,8 +447,12 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
);
}
- ThreadPool getThreadPool() {
- return nodeServicesProvider.getThreadPool();
+ public ThreadPool getThreadPool() {
+ return threadPool;
+ }
+
+ public BigArrays getBigArrays() {
+ return bigArrays;
}
public SearchSlowLog getSearchSlowLog() {
@@ -502,21 +524,21 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
}
@Override
- public void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage) {
+ public void onCache(ShardId shardId, String fieldName, Accountable ramUsage) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
- shard.fieldData().onCache(shardId, fieldName, fieldDataType, ramUsage);
+ shard.fieldData().onCache(shardId, fieldName, ramUsage);
}
}
}
@Override
- public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
+ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) {
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
- shard.fieldData().onRemoval(shardId, fieldName, fieldDataType, wasEvicted, sizeInBytes);
+ shard.fieldData().onRemoval(shardId, fieldName, wasEvicted, sizeInBytes);
}
}
}
@@ -547,7 +569,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
AliasMetaData alias = aliases.get(aliasName);
if (alias == null) {
// This shouldn't happen unless alias disappeared after filteringAliases was called.
- throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0], "Unknown alias name was passed to alias Filter");
+ throw new InvalidAliasNameException(indexSettings.getIndex(), aliasNames[0],
+ "Unknown alias name was passed to alias Filter");
}
Query parsedFilter = parse(alias, context);
if (parsedFilter != null) {
@@ -723,7 +746,8 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
} catch (Exception ex) {
if (lastThrownException == null || sameException(lastThrownException, ex) == false) {
// prevent the annoying fact of logging the same stuff all the time with an interval of 1 sec will spam all your logs
- indexService.logger.warn("failed to run task {} - suppressing re-occurring exceptions unless the exception changes", ex, toString());
+ indexService.logger.warn("failed to run task {} - suppressing re-occurring exceptions unless the exception changes",
+ ex, toString());
lastThrownException = ex;
}
} finally {
diff --git a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java
index 332fcdd380e..499031af970 100644
--- a/core/src/main/java/org/elasticsearch/index/IndexWarmer.java
+++ b/core/src/main/java/org/elasticsearch/index/IndexWarmer.java
@@ -19,14 +19,11 @@
package org.elasticsearch.index;
-import org.apache.lucene.index.LeafReaderContext;
+import org.apache.lucene.index.DirectoryReader;
import org.elasticsearch.common.component.AbstractComponent;
-import org.elasticsearch.common.settings.Setting;
-import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.engine.Engine;
-import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.DocumentMapper;
@@ -50,9 +47,6 @@ import java.util.concurrent.TimeUnit;
*/
public final class IndexWarmer extends AbstractComponent {
- public static final Setting INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading",
- MappedFieldType.Loading.LAZY.toString(), (s) -> MappedFieldType.Loading.parse(s, MappedFieldType.Loading.LAZY),
- Property.IndexScope);
private final List listeners;
IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) {
@@ -66,7 +60,7 @@ public final class IndexWarmer extends AbstractComponent {
this.listeners = Collections.unmodifiableList(list);
}
- void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings, boolean isTopReader) {
+ void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings) {
if (shard.state() == IndexShardState.CLOSED) {
return;
}
@@ -74,22 +68,14 @@ public final class IndexWarmer extends AbstractComponent {
return;
}
if (logger.isTraceEnabled()) {
- if (isTopReader) {
- logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader());
- } else {
- logger.trace("{} warming [{}]", shard.shardId(), searcher.reader());
- }
+ logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader());
}
shard.warmerService().onPreWarm();
long time = System.nanoTime();
final List terminationHandles = new ArrayList<>();
// get a handle on pending tasks
for (final Listener listener : listeners) {
- if (isTopReader) {
- terminationHandles.add(listener.warmTopReader(shard, searcher));
- } else {
- terminationHandles.add(listener.warmNewReaders(shard, searcher));
- }
+ terminationHandles.add(listener.warmReader(shard, searcher));
}
// wait for termination
for (TerminationHandle terminationHandle : terminationHandles) {
@@ -97,22 +83,14 @@ public final class IndexWarmer extends AbstractComponent {
terminationHandle.awaitTermination();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
- if (isTopReader) {
- logger.warn("top warming has been interrupted", e);
- } else {
- logger.warn("warming has been interrupted", e);
- }
+ logger.warn("top warming has been interrupted", e);
break;
}
}
long took = System.nanoTime() - time;
shard.warmerService().onPostWarm(took);
if (shard.warmerService().logger().isTraceEnabled()) {
- if (isTopReader) {
- shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
- } else {
- shard.warmerService().logger().trace("warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
- }
+ shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
}
}
@@ -127,9 +105,7 @@ public final class IndexWarmer extends AbstractComponent {
public interface Listener {
/** Queue tasks to warm-up the given segments and return handles that allow to wait for termination of the
* execution of those tasks. */
- TerminationHandle warmNewReaders(IndexShard indexShard, Engine.Searcher searcher);
-
- TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher);
+ TerminationHandle warmReader(IndexShard indexShard, Engine.Searcher searcher);
}
private static class FieldDataWarmer implements IndexWarmer.Listener {
@@ -140,67 +116,17 @@ public final class IndexWarmer extends AbstractComponent {
}
@Override
- public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
- final MapperService mapperService = indexShard.mapperService();
- final Map warmUp = new HashMap<>();
- for (DocumentMapper docMapper : mapperService.docMappers(false)) {
- for (FieldMapper fieldMapper : docMapper.mappers()) {
- final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
- final String indexName = fieldMapper.fieldType().name();
- if (fieldDataType == null) {
- continue;
- }
- if (fieldDataType.getLoading() == MappedFieldType.Loading.LAZY) {
- continue;
- }
-
- if (warmUp.containsKey(indexName)) {
- continue;
- }
- warmUp.put(indexName, fieldMapper.fieldType());
- }
- }
- final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
- final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());
- for (final LeafReaderContext ctx : searcher.reader().leaves()) {
- for (final MappedFieldType fieldType : warmUp.values()) {
- executor.execute(() -> {
- try {
- final long start = System.nanoTime();
- indexFieldDataService.getForField(fieldType).load(ctx);
- if (indexShard.warmerService().logger().isTraceEnabled()) {
- indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldType.name(),
- TimeValue.timeValueNanos(System.nanoTime() - start));
- }
- } catch (Throwable t) {
- indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldType.name());
- } finally {
- latch.countDown();
- }
- });
- }
- }
- return () -> latch.await();
- }
-
- @Override
- public TerminationHandle warmTopReader(final IndexShard indexShard, final Engine.Searcher searcher) {
+ public TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) {
final MapperService mapperService = indexShard.mapperService();
final Map warmUpGlobalOrdinals = new HashMap<>();
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
for (FieldMapper fieldMapper : docMapper.mappers()) {
- final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
- final String indexName = fieldMapper.fieldType().name();
- if (fieldDataType == null) {
+ final MappedFieldType fieldType = fieldMapper.fieldType();
+ final String indexName = fieldType.name();
+ if (fieldType.eagerGlobalOrdinals() == false) {
continue;
}
- if (fieldDataType.getLoading() != MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS) {
- continue;
- }
- if (warmUpGlobalOrdinals.containsKey(indexName)) {
- continue;
- }
- warmUpGlobalOrdinals.put(indexName, fieldMapper.fieldType());
+ warmUpGlobalOrdinals.put(indexName, fieldType);
}
}
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
@@ -210,7 +136,12 @@ public final class IndexWarmer extends AbstractComponent {
try {
final long start = System.nanoTime();
IndexFieldData.Global ifd = indexFieldDataService.getForField(fieldType);
- ifd.loadGlobal(searcher.getDirectoryReader());
+ DirectoryReader reader = searcher.getDirectoryReader();
+ IndexFieldData> global = ifd.loadGlobal(reader);
+ if (reader.leaves().isEmpty() == false) {
+ global.load(reader.leaves().get(0));
+ }
+
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(),
TimeValue.timeValueNanos(System.nanoTime() - start));
diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java
index b7481e78496..1054721535e 100644
--- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java
+++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java
@@ -67,8 +67,10 @@ import org.elasticsearch.env.Environment;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.Reader;
+import java.nio.charset.CharacterCodingException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -163,7 +165,8 @@ public class Analysis {
NAMED_STOP_WORDS = unmodifiableMap(namedStopWords);
}
- public static CharArraySet parseWords(Environment env, Settings settings, String name, CharArraySet defaultWords, Map> namedWords, boolean ignoreCase) {
+ public static CharArraySet parseWords(Environment env, Settings settings, String name, CharArraySet defaultWords,
+ Map> namedWords, boolean ignoreCase) {
String value = settings.get(name);
if (value != null) {
if ("_none_".equals(value)) {
@@ -237,12 +240,17 @@ public class Analysis {
}
}
- final Path wordListFile = env.configFile().resolve(wordListPath);
+ final Path path = env.configFile().resolve(wordListPath);
- try (BufferedReader reader = FileSystemUtils.newBufferedReader(wordListFile.toUri().toURL(), StandardCharsets.UTF_8)) {
+ try (BufferedReader reader = FileSystemUtils.newBufferedReader(path.toUri().toURL(), StandardCharsets.UTF_8)) {
return loadWordList(reader, "#");
+ } catch (CharacterCodingException ex) {
+ String message = String.format(Locale.ROOT,
+ "Unsupported character encoding detected while reading %s_path: %s - files must be UTF-8 encoded",
+ settingPrefix, path.toString());
+ throw new IllegalArgumentException(message, ex);
} catch (IOException ioe) {
- String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix);
+ String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, path.toString());
throw new IllegalArgumentException(message, ioe);
}
}
@@ -256,7 +264,7 @@ public class Analysis {
} else {
br = new BufferedReader(reader);
}
- String word = null;
+ String word;
while ((word = br.readLine()) != null) {
if (!Strings.hasText(word)) {
continue;
@@ -283,13 +291,16 @@ public class Analysis {
if (filePath == null) {
return null;
}
-
final Path path = env.configFile().resolve(filePath);
-
try {
return FileSystemUtils.newBufferedReader(path.toUri().toURL(), StandardCharsets.UTF_8);
+ } catch (CharacterCodingException ex) {
+ String message = String.format(Locale.ROOT,
+ "Unsupported character encoding detected while reading %s_path: %s files must be UTF-8 encoded",
+ settingPrefix, path.toString());
+ throw new IllegalArgumentException(message, ex);
} catch (IOException ioe) {
- String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix);
+ String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, path.toString());
throw new IllegalArgumentException(message, ioe);
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java
index 19ec3c8402e..2452e8147c2 100644
--- a/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java
+++ b/core/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java
@@ -216,7 +216,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
}
@Override
- public IndexWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
+ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, final Engine.Searcher searcher) {
if (indexSettings.getIndex().equals(indexShard.indexSettings().getIndex()) == false) {
// this is from a different index
return TerminationHandle.NO_WAIT;
@@ -268,11 +268,6 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
return () -> latch.await();
}
- @Override
- public TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher) {
- return TerminationHandle.NO_WAIT;
- }
-
}
Cache