Merge branch 'master' into feature/aggs-refactoring
This commit is contained in:
commit
02ecfd6279
|
@ -91,11 +91,12 @@ class PrecommitTasks {
|
|||
// on them. But we want `precommit` to depend on `checkstyle` which depends on them so
|
||||
// we have to swap them.
|
||||
project.pluginManager.apply('checkstyle')
|
||||
URL checkstyleSuppressions = PrecommitTasks.getResource('/checkstyle_suppressions.xml')
|
||||
project.checkstyle {
|
||||
config = project.resources.text.fromFile(
|
||||
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
|
||||
configProperties = [
|
||||
suppressions: PrecommitTasks.getResource('/checkstyle_suppressions.xml')
|
||||
suppressions: checkstyleSuppressions
|
||||
]
|
||||
}
|
||||
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
|
||||
|
@ -103,6 +104,7 @@ class PrecommitTasks {
|
|||
if (task != null) {
|
||||
project.tasks['check'].dependsOn.remove(task)
|
||||
checkstyleTask.dependsOn(task)
|
||||
task.inputs.file(checkstyleSuppressions)
|
||||
}
|
||||
}
|
||||
return checkstyleTask
|
||||
|
|
|
@ -340,7 +340,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/plugin'), 'install', file]
|
||||
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -32,10 +32,12 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
|
||||
|
@ -131,6 +133,7 @@ final class BootstrapCLIParser extends CliTool {
|
|||
|
||||
// hacky way to extract all the fancy extra args, there is no CLI tool helper for this
|
||||
Iterator<String> iterator = cli.getArgList().iterator();
|
||||
final Map<String, String> properties = new HashMap<>();
|
||||
while (iterator.hasNext()) {
|
||||
String arg = iterator.next();
|
||||
if (!arg.startsWith("--")) {
|
||||
|
@ -148,20 +151,22 @@ final class BootstrapCLIParser extends CliTool {
|
|||
String[] splitArg = arg.split("=", 2);
|
||||
String key = splitArg[0];
|
||||
String value = splitArg[1];
|
||||
System.setProperty("es." + key, value);
|
||||
properties.put("es." + key, value);
|
||||
} else {
|
||||
if (iterator.hasNext()) {
|
||||
String value = iterator.next();
|
||||
if (value.startsWith("--")) {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
System.setProperty("es." + arg, value);
|
||||
properties.put("es." + arg, value);
|
||||
} else {
|
||||
throw new UserError(ExitStatus.USAGE, "Parameter [" + arg + "] needs value");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<String, String> entry : properties.entrySet()) {
|
||||
System.setProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return new Start(terminal);
|
||||
}
|
||||
|
||||
|
|
|
@ -451,7 +451,7 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Moves the shard from started to initializing and bumps the version
|
||||
* Moves the shard from started to initializing
|
||||
*/
|
||||
void reinitializeShard() {
|
||||
ensureNotFrozen();
|
||||
|
|
|
@ -23,48 +23,46 @@ import org.elasticsearch.ElasticsearchException;
|
|||
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class MessageDigests {
|
||||
|
||||
private static final MessageDigest MD5_DIGEST;
|
||||
private static final MessageDigest SHA_1_DIGEST;
|
||||
private static final MessageDigest SHA_256_DIGEST;
|
||||
|
||||
static {
|
||||
try {
|
||||
MD5_DIGEST = MessageDigest.getInstance("MD5");
|
||||
SHA_1_DIGEST = MessageDigest.getInstance("SHA-1");
|
||||
SHA_256_DIGEST = MessageDigest.getInstance("SHA-256");
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ElasticsearchException("Unexpected exception creating MessageDigest instance", e);
|
||||
}
|
||||
private static ThreadLocal<MessageDigest> createThreadLocalMessageDigest(String digest) {
|
||||
return ThreadLocal.withInitial(() -> {
|
||||
try {
|
||||
return MessageDigest.getInstance(digest);
|
||||
} catch (NoSuchAlgorithmException e) {
|
||||
throw new ElasticsearchException("unexpected exception creating MessageDigest instance for [" + digest + "]", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static final ThreadLocal<MessageDigest> MD5_DIGEST = createThreadLocalMessageDigest("MD5");
|
||||
private static final ThreadLocal<MessageDigest> SHA_1_DIGEST = createThreadLocalMessageDigest("SHA-1");
|
||||
private static final ThreadLocal<MessageDigest> SHA_256_DIGEST = createThreadLocalMessageDigest("SHA-256");
|
||||
|
||||
public static MessageDigest md5() {
|
||||
return clone(MD5_DIGEST);
|
||||
return get(MD5_DIGEST);
|
||||
}
|
||||
|
||||
public static MessageDigest sha1() {
|
||||
return clone(SHA_1_DIGEST);
|
||||
return get(SHA_1_DIGEST);
|
||||
}
|
||||
|
||||
public static MessageDigest sha256() {
|
||||
return clone(SHA_256_DIGEST);
|
||||
return get(SHA_256_DIGEST);
|
||||
}
|
||||
|
||||
private static MessageDigest clone(MessageDigest messageDigest) {
|
||||
try {
|
||||
return (MessageDigest) messageDigest.clone();
|
||||
} catch (CloneNotSupportedException e) {
|
||||
throw new ElasticsearchException("Unexpected exception cloning MessageDigest instance", e);
|
||||
}
|
||||
private static MessageDigest get(ThreadLocal<MessageDigest> messageDigest) {
|
||||
MessageDigest instance = messageDigest.get();
|
||||
instance.reset();
|
||||
return instance;
|
||||
}
|
||||
|
||||
private static final char[] HEX_DIGITS = "0123456789abcdef".toCharArray();
|
||||
|
||||
public static String toHexString(byte[] bytes) {
|
||||
if (bytes == null) {
|
||||
throw new NullPointerException("bytes");
|
||||
}
|
||||
Objects.requireNonNull(bytes);
|
||||
StringBuilder sb = new StringBuilder(2 * bytes.length);
|
||||
|
||||
for (int i = 0; i < bytes.length; i++) {
|
||||
|
@ -74,4 +72,5 @@ public class MessageDigests {
|
|||
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ import org.elasticsearch.gateway.PrimaryShardAllocator;
|
|||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.analysis.HunspellService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
|
@ -290,7 +291,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
|
||||
ScriptService.SCRIPT_CACHE_EXPIRE_SETTING,
|
||||
ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesService.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
|
||||
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
|
||||
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
|
||||
|
|
|
@ -38,8 +38,8 @@ import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
|
|||
import org.elasticsearch.index.store.FsDirectoryService;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -132,7 +132,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,
|
||||
FsDirectoryService.INDEX_LOCK_FACTOR_SETTING,
|
||||
EngineConfig.INDEX_CODEC_SETTING,
|
||||
SearchService.INDEX_NORMS_LOADING_SETTING,
|
||||
IndexWarmer.INDEX_NORMS_LOADING_SETTING,
|
||||
// this sucks but we can't really validate all the analyzers/similarity in here
|
||||
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX), // this allows similarity settings to be passed
|
||||
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.smile.SmileXContent;
|
|||
import org.elasticsearch.common.xcontent.yaml.YamlXContent;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* The content type of {@link org.elasticsearch.common.xcontent.XContent}.
|
||||
|
@ -38,7 +39,12 @@ public enum XContentType {
|
|||
*/
|
||||
JSON(0) {
|
||||
@Override
|
||||
public String restContentType() {
|
||||
protected String mediaTypeWithoutParameters() {
|
||||
return "application/json";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String mediaType() {
|
||||
return "application/json; charset=UTF-8";
|
||||
}
|
||||
|
||||
|
@ -57,7 +63,7 @@ public enum XContentType {
|
|||
*/
|
||||
SMILE(1) {
|
||||
@Override
|
||||
public String restContentType() {
|
||||
protected String mediaTypeWithoutParameters() {
|
||||
return "application/smile";
|
||||
}
|
||||
|
||||
|
@ -76,7 +82,7 @@ public enum XContentType {
|
|||
*/
|
||||
YAML(2) {
|
||||
@Override
|
||||
public String restContentType() {
|
||||
protected String mediaTypeWithoutParameters() {
|
||||
return "application/yaml";
|
||||
}
|
||||
|
||||
|
@ -95,7 +101,7 @@ public enum XContentType {
|
|||
*/
|
||||
CBOR(3) {
|
||||
@Override
|
||||
public String restContentType() {
|
||||
protected String mediaTypeWithoutParameters() {
|
||||
return "application/cbor";
|
||||
}
|
||||
|
||||
|
@ -108,31 +114,30 @@ public enum XContentType {
|
|||
public XContent xContent() {
|
||||
return CborXContent.cborXContent;
|
||||
}
|
||||
},;
|
||||
};
|
||||
|
||||
public static XContentType fromRestContentType(String contentType) {
|
||||
if (contentType == null) {
|
||||
public static XContentType fromMediaTypeOrFormat(String mediaType) {
|
||||
if (mediaType == null) {
|
||||
return null;
|
||||
}
|
||||
if ("application/json".equals(contentType) || "json".equalsIgnoreCase(contentType)) {
|
||||
for (XContentType type : values()) {
|
||||
if (isSameMediaTypeAs(mediaType, type)) {
|
||||
return type;
|
||||
}
|
||||
}
|
||||
if(mediaType.toLowerCase(Locale.ROOT).startsWith("application/*")) {
|
||||
return JSON;
|
||||
}
|
||||
|
||||
if ("application/smile".equals(contentType) || "smile".equalsIgnoreCase(contentType)) {
|
||||
return SMILE;
|
||||
}
|
||||
|
||||
if ("application/yaml".equals(contentType) || "yaml".equalsIgnoreCase(contentType)) {
|
||||
return YAML;
|
||||
}
|
||||
|
||||
if ("application/cbor".equals(contentType) || "cbor".equalsIgnoreCase(contentType)) {
|
||||
return CBOR;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
private static boolean isSameMediaTypeAs(String stringType, XContentType type) {
|
||||
return type.mediaTypeWithoutParameters().equalsIgnoreCase(stringType) ||
|
||||
stringType.toLowerCase(Locale.ROOT).startsWith(type.mediaTypeWithoutParameters().toLowerCase(Locale.ROOT) + ";") ||
|
||||
type.shortName().equalsIgnoreCase(stringType);
|
||||
}
|
||||
|
||||
private int index;
|
||||
|
||||
XContentType(int index) {
|
||||
|
@ -143,12 +148,16 @@ public enum XContentType {
|
|||
return index;
|
||||
}
|
||||
|
||||
public abstract String restContentType();
|
||||
public String mediaType() {
|
||||
return mediaTypeWithoutParameters();
|
||||
}
|
||||
|
||||
public abstract String shortName();
|
||||
|
||||
public abstract XContent xContent();
|
||||
|
||||
protected abstract String mediaTypeWithoutParameters();
|
||||
|
||||
public static XContentType readFrom(StreamInput in) throws IOException {
|
||||
int index = in.readVInt();
|
||||
for (XContentType contentType : values()) {
|
||||
|
|
|
@ -910,7 +910,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
activeNodes.add(localNode);
|
||||
long joinsCounter = clusterJoinsCounter.get();
|
||||
if (joinsCounter > 0) {
|
||||
logger.trace("adding local node to the list of active nodes who has previously joined the cluster (joins counter is [{}})", joinsCounter);
|
||||
logger.trace("adding local node to the list of active nodes that have previously joined the cluster (joins counter is [{}])", joinsCounter);
|
||||
joinedOnceActiveNodes.add(localNode);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ public class MetaStateService extends AbstractComponent {
|
|||
public MetaStateService(Settings settings, NodeEnvironment nodeEnv) {
|
||||
super(settings);
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.format = XContentType.fromRestContentType(settings.get(FORMAT_SETTING, "smile"));
|
||||
this.format = XContentType.fromMediaTypeOrFormat(settings.get(FORMAT_SETTING, "smile"));
|
||||
if (this.format == XContentType.SMILE) {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
params.put("binary", "true");
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.index.similarity.SimilarityService;
|
|||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -240,7 +241,7 @@ public final class IndexModule {
|
|||
IndexSearcherWrapper newWrapper(final IndexService indexService);
|
||||
}
|
||||
|
||||
public IndexService newIndexService(NodeEnvironment environment, IndexService.ShardStoreDeleter shardStoreDeleter, NodeServicesProvider servicesProvider, MapperRegistry mapperRegistry,
|
||||
public IndexService newIndexService(NodeEnvironment environment, IndexService.ShardStoreDeleter shardStoreDeleter, NodeServicesProvider servicesProvider, MapperRegistry mapperRegistry, IndicesFieldDataCache indicesFieldDataCache,
|
||||
IndexingOperationListener... listeners) throws IOException {
|
||||
IndexSearcherWrapperFactory searcherWrapperFactory = indexSearcherWrapper.get() == null ? (shard) -> null : indexSearcherWrapper.get();
|
||||
IndexEventListener eventListener = freeze();
|
||||
|
@ -264,7 +265,7 @@ public final class IndexModule {
|
|||
final BiFunction<IndexSettings, IndicesQueryCache, QueryCache> queryCacheProvider = queryCaches.get(queryCacheType);
|
||||
final QueryCache queryCache = queryCacheProvider.apply(indexSettings, servicesProvider.getIndicesQueryCache());
|
||||
return new IndexService(indexSettings, environment, new SimilarityService(indexSettings, similarities), shardStoreDeleter, analysisRegistry, engineFactory.get(),
|
||||
servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, listeners);
|
||||
servicesProvider, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, indicesFieldDataCache, listeners);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.elasticsearch.index.analysis.AnalysisService;
|
|||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.query.QueryCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
|
@ -76,6 +77,7 @@ import org.elasticsearch.index.store.Store;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.AliasFilterParsingException;
|
||||
import org.elasticsearch.indices.InvalidAliasNameException;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
|
@ -101,6 +103,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
private final MapperService mapperService;
|
||||
private final SimilarityService similarityService;
|
||||
private final EngineFactory engineFactory;
|
||||
private final IndexWarmer warmer;
|
||||
private volatile Map<Integer, IndexShard> shards = emptyMap();
|
||||
private final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
private final AtomicBoolean deleted = new AtomicBoolean(false);
|
||||
|
@ -122,20 +125,22 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
IndexEventListener eventListener,
|
||||
IndexModule.IndexSearcherWrapperFactory wrapperFactory,
|
||||
MapperRegistry mapperRegistry,
|
||||
IndicesFieldDataCache indicesFieldDataCache,
|
||||
IndexingOperationListener... listenersIn) throws IOException {
|
||||
super(indexSettings);
|
||||
this.indexSettings = indexSettings;
|
||||
this.analysisService = registry.build(indexSettings);
|
||||
this.similarityService = similarityService;
|
||||
this.mapperService = new MapperService(indexSettings, analysisService, similarityService, mapperRegistry, IndexService.this::getQueryShardContext);
|
||||
this.indexFieldData = new IndexFieldDataService(indexSettings, nodeServicesProvider.getIndicesFieldDataCache(), nodeServicesProvider.getCircuitBreakerService(), mapperService);
|
||||
this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, nodeServicesProvider.getCircuitBreakerService(), mapperService);
|
||||
this.shardStoreDeleter = shardStoreDeleter;
|
||||
this.eventListener = eventListener;
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
this.indexStore = indexStore;
|
||||
indexFieldData.setListener(new FieldDataCacheListener(this));
|
||||
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, nodeServicesProvider.getWarmer(), new BitsetCacheListener(this));
|
||||
this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this));
|
||||
this.warmer = new IndexWarmer(indexSettings.getSettings(), nodeServicesProvider.getThreadPool(), bitsetFilterCache.createListener(nodeServicesProvider.getThreadPool()));
|
||||
this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache);
|
||||
this.engineFactory = engineFactory;
|
||||
// initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE
|
||||
|
@ -310,11 +315,18 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
|
||||
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
|
||||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
|
||||
final Engine.Warmer engineWarmer = (searcher, toLevel) -> {
|
||||
IndexShard shard = getShardOrNull(shardId.getId());
|
||||
if (shard != null) {
|
||||
warmer.warm(searcher, shard, IndexService.this.indexSettings, toLevel);
|
||||
}
|
||||
};
|
||||
|
||||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
|
||||
if (useShadowEngine(primary, indexSettings)) {
|
||||
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog); // no indexing listeners - shadow engines don't index
|
||||
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer); // no indexing listeners - shadow engines don't index
|
||||
} else {
|
||||
indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, listeners);
|
||||
indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, engineWarmer, listeners);
|
||||
}
|
||||
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
|
||||
eventListener.afterIndexShardCreated(indexShard);
|
||||
|
|
|
@ -0,0 +1,292 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*/
|
||||
public final class IndexWarmer extends AbstractComponent {
|
||||
|
||||
public static final Setting<MappedFieldType.Loading> INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading",
|
||||
MappedFieldType.Loading.LAZY.toString(), (s) -> MappedFieldType.Loading.parse(s, MappedFieldType.Loading.LAZY),
|
||||
false, Setting.Scope.INDEX);
|
||||
private final List<Listener> listeners;
|
||||
|
||||
IndexWarmer(Settings settings, ThreadPool threadPool, Listener... listeners) {
|
||||
super(settings);
|
||||
ArrayList<Listener> list = new ArrayList<>();
|
||||
final Executor executor = threadPool.executor(ThreadPool.Names.WARMER);
|
||||
list.add(new NormsWarmer(executor));
|
||||
list.add(new FieldDataWarmer(executor));
|
||||
for (Listener listener : listeners) {
|
||||
list.add(listener);
|
||||
}
|
||||
this.listeners = Collections.unmodifiableList(list);
|
||||
}
|
||||
|
||||
void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings, boolean isTopReader) {
|
||||
if (shard.state() == IndexShardState.CLOSED) {
|
||||
return;
|
||||
}
|
||||
if (settings.isWarmerEnabled() == false) {
|
||||
return;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (isTopReader) {
|
||||
logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader());
|
||||
} else {
|
||||
logger.trace("{} warming [{}]", shard.shardId(), searcher.reader());
|
||||
}
|
||||
}
|
||||
shard.warmerService().onPreWarm();
|
||||
long time = System.nanoTime();
|
||||
final List<TerminationHandle> terminationHandles = new ArrayList<>();
|
||||
// get a handle on pending tasks
|
||||
for (final Listener listener : listeners) {
|
||||
if (isTopReader) {
|
||||
terminationHandles.add(listener.warmTopReader(shard, searcher));
|
||||
} else {
|
||||
terminationHandles.add(listener.warmNewReaders(shard, searcher));
|
||||
}
|
||||
}
|
||||
// wait for termination
|
||||
for (TerminationHandle terminationHandle : terminationHandles) {
|
||||
try {
|
||||
terminationHandle.awaitTermination();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
if (isTopReader) {
|
||||
logger.warn("top warming has been interrupted", e);
|
||||
} else {
|
||||
logger.warn("warming has been interrupted", e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
long took = System.nanoTime() - time;
|
||||
shard.warmerService().onPostWarm(took);
|
||||
if (shard.warmerService().logger().isTraceEnabled()) {
|
||||
if (isTopReader) {
|
||||
shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
|
||||
} else {
|
||||
shard.warmerService().logger().trace("warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** A handle on the execution of warm-up action. */
|
||||
public interface TerminationHandle {
|
||||
|
||||
TerminationHandle NO_WAIT = () -> {};
|
||||
|
||||
/** Wait until execution of the warm-up action completes. */
|
||||
void awaitTermination() throws InterruptedException;
|
||||
}
|
||||
public interface Listener {
|
||||
/** Queue tasks to warm-up the given segments and return handles that allow to wait for termination of the
|
||||
* execution of those tasks. */
|
||||
TerminationHandle warmNewReaders(IndexShard indexShard, Engine.Searcher searcher);
|
||||
|
||||
TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher);
|
||||
}
|
||||
|
||||
private static class NormsWarmer implements IndexWarmer.Listener {
|
||||
private final Executor executor;
|
||||
public NormsWarmer(Executor executor) {
|
||||
this.executor = executor;
|
||||
}
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final MappedFieldType.Loading defaultLoading = indexShard.indexSettings().getValue(INDEX_NORMS_LOADING_SETTING);
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final ObjectSet<String> warmUp = new ObjectHashSet<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final String indexName = fieldMapper.fieldType().name();
|
||||
MappedFieldType.Loading normsLoading = fieldMapper.fieldType().normsLoading();
|
||||
if (normsLoading == null) {
|
||||
normsLoading = defaultLoading;
|
||||
}
|
||||
if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE && !fieldMapper.fieldType().omitNorms()
|
||||
&& normsLoading == MappedFieldType.Loading.EAGER) {
|
||||
warmUp.add(indexName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
// Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task
|
||||
executor.execute(() -> {
|
||||
try {
|
||||
for (ObjectCursor<String> stringObjectCursor : warmUp) {
|
||||
final String indexName = stringObjectCursor.value;
|
||||
final long start = System.nanoTime();
|
||||
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
|
||||
final NumericDocValues values = ctx.reader().getNormValues(indexName);
|
||||
if (values != null) {
|
||||
values.get(0);
|
||||
}
|
||||
}
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName,
|
||||
TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up norms", t);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
|
||||
return () -> latch.await();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
return TerminationHandle.NO_WAIT;
|
||||
}
|
||||
}
|
||||
|
||||
private static class FieldDataWarmer implements IndexWarmer.Listener {
|
||||
|
||||
private final Executor executor;
|
||||
public FieldDataWarmer(Executor executor) {
|
||||
this.executor = executor;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final Map<String, MappedFieldType> warmUp = new HashMap<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
|
||||
final String indexName = fieldMapper.fieldType().name();
|
||||
if (fieldDataType == null) {
|
||||
continue;
|
||||
}
|
||||
if (fieldDataType.getLoading() == MappedFieldType.Loading.LAZY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (warmUp.containsKey(indexName)) {
|
||||
continue;
|
||||
}
|
||||
warmUp.put(indexName, fieldMapper.fieldType());
|
||||
}
|
||||
}
|
||||
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
|
||||
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());
|
||||
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
|
||||
for (final MappedFieldType fieldType : warmUp.values()) {
|
||||
executor.execute(() -> {
|
||||
try {
|
||||
final long start = System.nanoTime();
|
||||
indexFieldDataService.getForField(fieldType).load(ctx);
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldType.name(),
|
||||
TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldType.name());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
return () -> latch.await();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmTopReader(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
|
||||
final String indexName = fieldMapper.fieldType().name();
|
||||
if (fieldDataType == null) {
|
||||
continue;
|
||||
}
|
||||
if (fieldDataType.getLoading() != MappedFieldType.Loading.EAGER_GLOBAL_ORDINALS) {
|
||||
continue;
|
||||
}
|
||||
if (warmUpGlobalOrdinals.containsKey(indexName)) {
|
||||
continue;
|
||||
}
|
||||
warmUpGlobalOrdinals.put(indexName, fieldMapper.fieldType());
|
||||
}
|
||||
}
|
||||
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
|
||||
final CountDownLatch latch = new CountDownLatch(warmUpGlobalOrdinals.size());
|
||||
for (final MappedFieldType fieldType : warmUpGlobalOrdinals.values()) {
|
||||
executor.execute(() -> {
|
||||
try {
|
||||
final long start = System.nanoTime();
|
||||
IndexFieldData.Global ifd = indexFieldDataService.getForField(fieldType);
|
||||
ifd.loadGlobal(searcher.getDirectoryReader());
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(),
|
||||
TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up global ordinals for [{}]", t, fieldType.name());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
return () -> latch.await();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -20,13 +20,10 @@
|
|||
package org.elasticsearch.index;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -40,24 +37,20 @@ public final class NodeServicesProvider {
|
|||
|
||||
private final ThreadPool threadPool;
|
||||
private final IndicesQueryCache indicesQueryCache;
|
||||
private final IndicesWarmer warmer;
|
||||
private final BigArrays bigArrays;
|
||||
private final Client client;
|
||||
private final IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
private final ScriptService scriptService;
|
||||
private final IndicesFieldDataCache indicesFieldDataCache;
|
||||
private final CircuitBreakerService circuitBreakerService;
|
||||
|
||||
@Inject
|
||||
public NodeServicesProvider(ThreadPool threadPool, IndicesQueryCache indicesQueryCache, @Nullable IndicesWarmer warmer, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, IndicesFieldDataCache indicesFieldDataCache, CircuitBreakerService circuitBreakerService) {
|
||||
public NodeServicesProvider(ThreadPool threadPool, IndicesQueryCache indicesQueryCache, BigArrays bigArrays, Client client, ScriptService scriptService, IndicesQueriesRegistry indicesQueriesRegistry, CircuitBreakerService circuitBreakerService) {
|
||||
this.threadPool = threadPool;
|
||||
this.indicesQueryCache = indicesQueryCache;
|
||||
this.warmer = warmer;
|
||||
this.bigArrays = bigArrays;
|
||||
this.client = client;
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
this.scriptService = scriptService;
|
||||
this.indicesFieldDataCache = indicesFieldDataCache;
|
||||
this.circuitBreakerService = circuitBreakerService;
|
||||
}
|
||||
|
||||
|
@ -69,10 +62,6 @@ public final class NodeServicesProvider {
|
|||
return indicesQueryCache;
|
||||
}
|
||||
|
||||
public IndicesWarmer getWarmer() {
|
||||
return warmer;
|
||||
}
|
||||
|
||||
public BigArrays getBigArrays() { return bigArrays; }
|
||||
|
||||
public Client getClient() {
|
||||
|
@ -87,10 +76,6 @@ public final class NodeServicesProvider {
|
|||
return scriptService;
|
||||
}
|
||||
|
||||
public IndicesFieldDataCache getIndicesFieldDataCache() {
|
||||
return indicesFieldDataCache;
|
||||
}
|
||||
|
||||
public CircuitBreakerService getCircuitBreakerService() {
|
||||
return circuitBreakerService;
|
||||
}
|
||||
|
|
|
@ -48,8 +48,9 @@ import org.elasticsearch.index.mapper.object.ObjectMapper;
|
|||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.IndicesWarmer.TerminationHandle;
|
||||
import org.elasticsearch.index.IndexWarmer;
|
||||
import org.elasticsearch.index.IndexWarmer.TerminationHandle;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -74,22 +75,20 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
private final boolean loadRandomAccessFiltersEagerly;
|
||||
private final Cache<Object, Cache<Query, Value>> loadedFilters;
|
||||
private final Listener listener;
|
||||
private final BitSetProducerWarmer warmer;
|
||||
private final IndicesWarmer indicesWarmer;
|
||||
|
||||
public BitsetFilterCache(IndexSettings indexSettings, IndicesWarmer indicesWarmer, Listener listener) {
|
||||
public BitsetFilterCache(IndexSettings indexSettings, Listener listener) {
|
||||
super(indexSettings);
|
||||
if (listener == null) {
|
||||
throw new IllegalArgumentException("listener must not be null");
|
||||
}
|
||||
this.loadRandomAccessFiltersEagerly = this.indexSettings.getValue(INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING);
|
||||
this.loadedFilters = CacheBuilder.<Object, Cache<Query, Value>>builder().removalListener(this).build();
|
||||
this.warmer = new BitSetProducerWarmer();
|
||||
this.indicesWarmer = indicesWarmer;
|
||||
indicesWarmer.addListener(warmer);
|
||||
this.listener = listener;
|
||||
}
|
||||
|
||||
public IndexWarmer.Listener createListener(ThreadPool threadPool) {
|
||||
return new BitSetProducerWarmer(threadPool);
|
||||
}
|
||||
|
||||
|
||||
public BitSetProducer getBitSetProducer(Query query) {
|
||||
|
@ -103,11 +102,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
|
||||
@Override
|
||||
public void close() {
|
||||
try {
|
||||
indicesWarmer.removeListener(warmer);
|
||||
} finally {
|
||||
clear("close");
|
||||
}
|
||||
clear("close");
|
||||
}
|
||||
|
||||
public void clear(String reason) {
|
||||
|
@ -210,10 +205,16 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
}
|
||||
}
|
||||
|
||||
final class BitSetProducerWarmer implements IndicesWarmer.Listener {
|
||||
final class BitSetProducerWarmer implements IndexWarmer.Listener {
|
||||
|
||||
private final Executor executor;
|
||||
|
||||
BitSetProducerWarmer(ThreadPool threadPool) {
|
||||
this.executor = threadPool.executor(ThreadPool.Names.WARMER);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
public IndexWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
if (indexSettings.getIndex().equals(indexShard.getIndexSettings().getIndex()) == false) {
|
||||
// this is from a different index
|
||||
return TerminationHandle.NO_WAIT;
|
||||
|
@ -244,7 +245,6 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
warmUp.add(Queries.newNonNestedFilter());
|
||||
}
|
||||
|
||||
final Executor executor = indicesWarmer.getExecutor();
|
||||
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());
|
||||
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
|
||||
for (final Query filterToWarm : warmUp) {
|
||||
|
@ -277,7 +277,6 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
return loadedFilters;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* A listener interface that is executed for each onCache / onRemoval event
|
||||
*/
|
||||
|
@ -295,6 +294,4 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
*/
|
||||
void onRemoval(ShardId shardId, Accountable accountable);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.lucene.store.LockObtainFailedException;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -68,7 +67,6 @@ import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.index.translog.TranslogCorruptedException;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -233,20 +231,7 @@ public class InternalEngine extends Engine {
|
|||
final TranslogRecoveryPerformer handler = engineConfig.getTranslogRecoveryPerformer();
|
||||
try {
|
||||
Translog.Snapshot snapshot = translog.newSnapshot();
|
||||
Translog.Operation operation;
|
||||
while ((operation = snapshot.next()) != null) {
|
||||
try {
|
||||
handler.performRecoveryOperation(this, operation, true);
|
||||
opsRecovered++;
|
||||
} catch (ElasticsearchException e) {
|
||||
if (e.status() == RestStatus.BAD_REQUEST) {
|
||||
// mainly for MapperParsingException and Failure to detect xcontent
|
||||
logger.info("ignoring recovery of a corrupt translog entry", e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
opsRecovered = handler.recoveryFromSnapshot(this, snapshot);
|
||||
} catch (Throwable e) {
|
||||
throw new EngineException(shardId, "failed to recover from translog", e);
|
||||
}
|
||||
|
|
|
@ -48,12 +48,12 @@ public interface IndexFieldDataCache {
|
|||
/**
|
||||
* Called after the fielddata is loaded during the cache phase
|
||||
*/
|
||||
void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage);
|
||||
default void onCache(ShardId shardId, String fieldName, FieldDataType fieldDataType, Accountable ramUsage){}
|
||||
|
||||
/**
|
||||
* Called after the fielddata is unloaded
|
||||
*/
|
||||
void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes);
|
||||
default void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes){}
|
||||
}
|
||||
|
||||
class None implements IndexFieldDataCache {
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.elasticsearch.gateway.MetaDataStateFormat;
|
|||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.SearchSlowLog;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache;
|
||||
|
@ -89,13 +90,12 @@ import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
|
|||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.SearchSlowLog;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.search.stats.ShardSearchStats;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
import org.elasticsearch.index.store.Store.MetadataSnapshot;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.Store.MetadataSnapshot;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.suggest.stats.ShardSuggestMetric;
|
||||
|
@ -105,9 +105,8 @@ import org.elasticsearch.index.translog.TranslogConfig;
|
|||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.index.warmer.ShardIndexWarmerService;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
@ -151,7 +150,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
private final Object mutex = new Object();
|
||||
private final String checkIndexOnStartup;
|
||||
private final CodecService codecService;
|
||||
private final IndicesWarmer warmer;
|
||||
private final Engine.Warmer warmer;
|
||||
private final SnapshotDeletionPolicy deletionPolicy;
|
||||
private final SimilarityService similarityService;
|
||||
private final EngineConfig engineConfig;
|
||||
|
@ -208,12 +207,12 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache,
|
||||
MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService,
|
||||
@Nullable EngineFactory engineFactory,
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, SearchSlowLog slowLog, IndexingOperationListener... listeners) {
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, SearchSlowLog slowLog, Engine.Warmer warmer, IndexingOperationListener... listeners) {
|
||||
super(shardId, indexSettings);
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
this.idxSettings = indexSettings;
|
||||
this.codecService = new CodecService(mapperService, logger);
|
||||
this.warmer = provider.getWarmer();
|
||||
this.warmer = warmer;
|
||||
this.deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
this.similarityService = similarityService;
|
||||
Objects.requireNonNull(store, "Store must be provided to the index shard");
|
||||
|
@ -875,6 +874,12 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
* After the store has been recovered, we need to start the engine in order to apply operations
|
||||
*/
|
||||
public void performTranslogRecovery(boolean indexExists) {
|
||||
if (indexExists == false) {
|
||||
// note: these are set when recovering from the translog
|
||||
final RecoveryState.Translog translogStats = recoveryState().getTranslog();
|
||||
translogStats.totalOperations(0);
|
||||
translogStats.totalOperationsOnStart(0);
|
||||
}
|
||||
internalPerformTranslogRecovery(false, indexExists);
|
||||
assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage();
|
||||
}
|
||||
|
@ -1388,10 +1393,18 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
assert recoveryState != null;
|
||||
recoveryState.getTranslog().incrementRecoveredOperations();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int recoveryFromSnapshot(Engine engine, Translog.Snapshot snapshot) throws IOException {
|
||||
assert recoveryState != null;
|
||||
RecoveryState.Translog translogStats = recoveryState.getTranslog();
|
||||
translogStats.totalOperations(snapshot.totalOperations());
|
||||
translogStats.totalOperationsOnStart(snapshot.totalOperations());
|
||||
return super.recoveryFromSnapshot(engine, snapshot);
|
||||
}
|
||||
};
|
||||
final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel);
|
||||
return new EngineConfig(shardId,
|
||||
threadPool, indexSettings, engineWarmer, store, deletionPolicy, indexSettings.getMergePolicy(),
|
||||
threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(),
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig,
|
||||
idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
|
||||
}
|
||||
|
|
|
@ -45,8 +45,8 @@ import java.io.IOException;
|
|||
public final class ShadowIndexShard extends IndexShard {
|
||||
|
||||
public ShadowIndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory,
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, NodeServicesProvider provider, SearchSlowLog searchSlowLog) throws IOException {
|
||||
super(shardId, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, wrapper, provider, searchSlowLog);
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, NodeServicesProvider provider, SearchSlowLog searchSlowLog, Engine.Warmer engineWarmer) throws IOException {
|
||||
super(shardId, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, wrapper, provider, searchSlowLog, engineWarmer);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -203,7 +203,6 @@ final class StoreRecovery {
|
|||
logger.trace("cleaning existing shard, shouldn't exists");
|
||||
IndexWriter writer = new IndexWriter(store.directory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER).setOpenMode(IndexWriterConfig.OpenMode.CREATE));
|
||||
writer.close();
|
||||
recoveryState.getTranslog().totalOperations(0);
|
||||
}
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
|
@ -224,10 +223,6 @@ final class StoreRecovery {
|
|||
} catch (IOException e) {
|
||||
logger.debug("failed to list file details", e);
|
||||
}
|
||||
if (indexShouldExists == false) {
|
||||
recoveryState.getTranslog().totalOperations(0);
|
||||
recoveryState.getTranslog().totalOperationsOnStart(0);
|
||||
}
|
||||
indexShard.performTranslogRecovery(indexShouldExists);
|
||||
indexShard.finalizeRecovery();
|
||||
indexShard.postRecovery("post recovery from shard_store");
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
import org.elasticsearch.index.mapper.Mapping;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
|
@ -77,6 +78,25 @@ public class TranslogRecoveryPerformer {
|
|||
return numOps;
|
||||
}
|
||||
|
||||
public int recoveryFromSnapshot(Engine engine, Translog.Snapshot snapshot) throws IOException {
|
||||
Translog.Operation operation;
|
||||
int opsRecovered = 0;
|
||||
while ((operation = snapshot.next()) != null) {
|
||||
try {
|
||||
performRecoveryOperation(engine, operation, true);
|
||||
opsRecovered++;
|
||||
} catch (ElasticsearchException e) {
|
||||
if (e.status() == RestStatus.BAD_REQUEST) {
|
||||
// mainly for MapperParsingException and Failure to detect xcontent
|
||||
logger.info("ignoring recovery of a corrupt translog entry", e);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
return opsRecovered;
|
||||
}
|
||||
|
||||
public static class BatchOperationException extends ElasticsearchException {
|
||||
|
||||
private final int completedOperations;
|
||||
|
@ -182,6 +202,7 @@ public class TranslogRecoveryPerformer {
|
|||
// noop
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the recovered types modifying the mapping during the recovery
|
||||
*/
|
||||
|
|
|
@ -55,12 +55,9 @@ import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
|||
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.percolator.PercolatorFieldMapper;
|
||||
import org.elasticsearch.index.termvectors.TermVectorsService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCacheListener;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
|
@ -165,13 +162,10 @@ public class IndicesModule extends AbstractModule {
|
|||
bind(SyncedFlushService.class).asEagerSingleton();
|
||||
bind(IndicesQueryCache.class).asEagerSingleton();
|
||||
bind(IndicesRequestCache.class).asEagerSingleton();
|
||||
bind(IndicesFieldDataCache.class).asEagerSingleton();
|
||||
bind(TransportNodesListShardStoreMetaData.class).asEagerSingleton();
|
||||
bind(IndicesTTLService.class).asEagerSingleton();
|
||||
bind(IndicesWarmer.class).asEagerSingleton();
|
||||
bind(UpdateHelper.class).asEagerSingleton();
|
||||
bind(MetaDataIndexUpgradeService.class).asEagerSingleton();
|
||||
bind(IndicesFieldDataCacheListener.class).asEagerSingleton();
|
||||
bind(NodeServicesProvider.class).asEagerSingleton();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.indices;
|
||||
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -33,11 +34,15 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.breaker.CircuitBreaker;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
|
@ -51,6 +56,8 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
|
@ -63,6 +70,8 @@ import org.elasticsearch.index.shard.IndexShard;
|
|||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
|
@ -81,6 +90,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
@ -94,6 +104,7 @@ import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList;
|
|||
public class IndicesService extends AbstractLifecycleComponent<IndicesService> implements Iterable<IndexService>, IndexService.ShardStoreDeleter {
|
||||
|
||||
public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout";
|
||||
public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
|
||||
private final PluginsService pluginsService;
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final TimeValue shardsClosedTimeout;
|
||||
|
@ -102,23 +113,31 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
private final ClusterService clusterService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final IndexScopedSettings indexScopeSetting;
|
||||
private final IndicesFieldDataCache indicesFieldDataCache;
|
||||
private final FieldDataCacheCleaner fieldDataCacheCleaner;
|
||||
private final ThreadPool threadPool;
|
||||
private final CircuitBreakerService circuitBreakerService;
|
||||
private volatile Map<String, IndexService> indices = emptyMap();
|
||||
private final Map<Index, List<PendingDelete>> pendingDeletes = new HashMap<>();
|
||||
private final OldShardsStats oldShardsStats = new OldShardsStats();
|
||||
private final IndexStoreConfig indexStoreConfig;
|
||||
private final MapperRegistry mapperRegistry;
|
||||
private final IndexingMemoryController indexingMemoryController;
|
||||
private final TimeValue cleanInterval;
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
// Start thread that will manage cleaning the field data cache periodically
|
||||
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME, this.fieldDataCacheCleaner);
|
||||
}
|
||||
|
||||
@Inject
|
||||
public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv,
|
||||
ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterService clusterService, MapperRegistry mapperRegistry, ThreadPool threadPool, IndexScopedSettings indexScopedSettings) {
|
||||
ClusterService clusterService, MapperRegistry mapperRegistry, ThreadPool threadPool, IndexScopedSettings indexScopedSettings, CircuitBreakerService circuitBreakerService) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.pluginsService = pluginsService;
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS));
|
||||
|
@ -132,6 +151,18 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle);
|
||||
indexingMemoryController = new IndexingMemoryController(settings, threadPool, this);
|
||||
this.indexScopeSetting = indexScopedSettings;
|
||||
this.circuitBreakerService = circuitBreakerService;
|
||||
this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {
|
||||
@Override
|
||||
public void onRemoval(ShardId shardId, String fieldName, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
|
||||
assert sizeInBytes >= 0 : "When reducing circuit breaker, it should be adjusted with a number higher or equal to 0 and not [" + sizeInBytes + "]";
|
||||
circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes);
|
||||
}
|
||||
});
|
||||
this.cleanInterval = INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING.get(settings);
|
||||
this.fieldDataCacheCleaner = new FieldDataCacheCleaner(indicesFieldDataCache, logger, threadPool, this.cleanInterval);
|
||||
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -165,7 +196,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController);
|
||||
IOUtils.closeWhileHandlingException(analysisRegistry, indexingMemoryController, indicesFieldDataCache, fieldDataCacheCleaner);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -320,7 +351,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
indexModule.addIndexEventListener(oldShardsStats);
|
||||
final IndexEventListener listener = indexModule.freeze();
|
||||
listener.beforeIndexCreated(index, idxSettings.getSettings());
|
||||
final IndexService indexService = indexModule.newIndexService(nodeEnv, this, nodeServicesProvider, mapperRegistry, indexingMemoryController);
|
||||
final IndexService indexService = indexModule.newIndexService(nodeEnv, this, nodeServicesProvider, mapperRegistry, indicesFieldDataCache, indexingMemoryController);
|
||||
boolean success = false;
|
||||
try {
|
||||
assert indexService.getIndexEventListener() == listener;
|
||||
|
@ -381,6 +412,14 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
}
|
||||
}
|
||||
|
||||
public IndicesFieldDataCache getIndicesFieldDataCache() {
|
||||
return indicesFieldDataCache;
|
||||
}
|
||||
|
||||
public CircuitBreakerService getCircuitBreakerService() {
|
||||
return circuitBreakerService;
|
||||
}
|
||||
|
||||
static class OldShardsStats implements IndexEventListener {
|
||||
|
||||
final SearchStats searchStats = new SearchStats();
|
||||
|
@ -760,4 +799,51 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
public AnalysisRegistry getAnalysis() {
|
||||
return analysisRegistry;
|
||||
}
|
||||
|
||||
/**
|
||||
* FieldDataCacheCleaner is a scheduled Runnable used to clean a Guava cache
|
||||
* periodically. In this case it is the field data cache, because a cache that
|
||||
* has an entry invalidated may not clean up the entry if it is not read from
|
||||
* or written to after invalidation.
|
||||
*/
|
||||
private final static class FieldDataCacheCleaner implements Runnable, Releasable {
|
||||
|
||||
private final IndicesFieldDataCache cache;
|
||||
private final ESLogger logger;
|
||||
private final ThreadPool threadPool;
|
||||
private final TimeValue interval;
|
||||
private final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
|
||||
public FieldDataCacheCleaner(IndicesFieldDataCache cache, ESLogger logger, ThreadPool threadPool, TimeValue interval) {
|
||||
this.cache = cache;
|
||||
this.logger = logger;
|
||||
this.threadPool = threadPool;
|
||||
this.interval = interval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
long startTimeNS = System.nanoTime();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("running periodic field data cache cleanup");
|
||||
}
|
||||
try {
|
||||
this.cache.getCache().refresh();
|
||||
} catch (Exception e) {
|
||||
logger.warn("Exception during periodic field data cache cleanup:", e);
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("periodic field data cache cleanup finished in {} milliseconds", TimeValue.nsecToMSec(System.nanoTime() - startTimeNS));
|
||||
}
|
||||
// Reschedule itself to run again if not closed
|
||||
if (closed.get() == false) {
|
||||
threadPool.schedule(interval, ThreadPool.Names.SAME, this);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
closed.compareAndSet(false, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,131 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*/
|
||||
public final class IndicesWarmer extends AbstractComponent {
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final CopyOnWriteArrayList<Listener> listeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
@Inject
|
||||
public IndicesWarmer(Settings settings, ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
public void addListener(Listener listener) {
|
||||
listeners.add(listener);
|
||||
}
|
||||
public void removeListener(Listener listener) {
|
||||
listeners.remove(listener);
|
||||
}
|
||||
|
||||
public void warm(Engine.Searcher searcher, IndexShard shard, IndexSettings settings, boolean isTopReader) {
|
||||
if (shard.state() == IndexShardState.CLOSED) {
|
||||
return;
|
||||
}
|
||||
if (settings.isWarmerEnabled() == false) {
|
||||
return;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (isTopReader) {
|
||||
logger.trace("{} top warming [{}]", shard.shardId(), searcher.reader());
|
||||
} else {
|
||||
logger.trace("{} warming [{}]", shard.shardId(), searcher.reader());
|
||||
}
|
||||
}
|
||||
shard.warmerService().onPreWarm();
|
||||
long time = System.nanoTime();
|
||||
final List<TerminationHandle> terminationHandles = new ArrayList<>();
|
||||
// get a handle on pending tasks
|
||||
for (final Listener listener : listeners) {
|
||||
if (isTopReader) {
|
||||
terminationHandles.add(listener.warmTopReader(shard, searcher));
|
||||
} else {
|
||||
terminationHandles.add(listener.warmNewReaders(shard, searcher));
|
||||
}
|
||||
}
|
||||
// wait for termination
|
||||
for (TerminationHandle terminationHandle : terminationHandles) {
|
||||
try {
|
||||
terminationHandle.awaitTermination();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
if (isTopReader) {
|
||||
logger.warn("top warming has been interrupted", e);
|
||||
} else {
|
||||
logger.warn("warming has been interrupted", e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
long took = System.nanoTime() - time;
|
||||
shard.warmerService().onPostWarm(took);
|
||||
if (shard.warmerService().logger().isTraceEnabled()) {
|
||||
if (isTopReader) {
|
||||
shard.warmerService().logger().trace("top warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
|
||||
} else {
|
||||
shard.warmerService().logger().trace("warming took [{}]", new TimeValue(took, TimeUnit.NANOSECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an executor for async warmer tasks
|
||||
*/
|
||||
public Executor getExecutor() {
|
||||
return threadPool.executor(ThreadPool.Names.WARMER);
|
||||
}
|
||||
|
||||
/** A handle on the execution of warm-up action. */
|
||||
public interface TerminationHandle {
|
||||
|
||||
TerminationHandle NO_WAIT = () -> {};
|
||||
|
||||
/** Wait until execution of the warm-up action completes. */
|
||||
void awaitTermination() throws InterruptedException;
|
||||
}
|
||||
public interface Listener {
|
||||
/** Queue tasks to warm-up the given segments and return handles that allow to wait for termination of the execution of those tasks. */
|
||||
TerminationHandle warmNewReaders(IndexShard indexShard, Engine.Searcher searcher);
|
||||
|
||||
TerminationHandle warmTopReader(IndexShard indexShard, Engine.Searcher searcher);
|
||||
}
|
||||
|
||||
}
|
|
@ -30,14 +30,12 @@ import org.elasticsearch.common.cache.CacheBuilder;
|
|||
import org.elasticsearch.common.cache.RemovalListener;
|
||||
import org.elasticsearch.common.cache.RemovalNotification;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.fielddata.AtomicFieldData;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
|
@ -45,7 +43,6 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
@ -55,20 +52,12 @@ import java.util.function.ToLongBiFunction;
|
|||
*/
|
||||
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable>, Releasable{
|
||||
|
||||
public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ByteSizeValue> INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
|
||||
|
||||
|
||||
private final IndicesFieldDataCacheListener indicesFieldDataCacheListener;
|
||||
private final IndexFieldDataCache.Listener indicesFieldDataCacheListener;
|
||||
private final Cache<Key, Accountable> cache;
|
||||
private final TimeValue cleanInterval;
|
||||
private final ThreadPool threadPool;
|
||||
private volatile boolean closed = false;
|
||||
|
||||
@Inject
|
||||
public IndicesFieldDataCache(Settings settings, IndicesFieldDataCacheListener indicesFieldDataCacheListener, ThreadPool threadPool) {
|
||||
public IndicesFieldDataCache(Settings settings, IndexFieldDataCache.Listener indicesFieldDataCacheListener) {
|
||||
super(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.indicesFieldDataCacheListener = indicesFieldDataCacheListener;
|
||||
final long sizeInBytes = INDICES_FIELDDATA_CACHE_SIZE_KEY.get(settings).bytes();
|
||||
CacheBuilder<Key, Accountable> cacheBuilder = CacheBuilder.<Key, Accountable>builder()
|
||||
|
@ -76,19 +65,12 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
|||
if (sizeInBytes > 0) {
|
||||
cacheBuilder.setMaximumWeight(sizeInBytes).weigher(new FieldDataWeigher());
|
||||
}
|
||||
|
||||
cache = cacheBuilder.build();
|
||||
|
||||
this.cleanInterval = INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING.get(settings);
|
||||
// Start thread that will manage cleaning the field data cache periodically
|
||||
threadPool.schedule(this.cleanInterval, ThreadPool.Names.SAME,
|
||||
new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
cache.invalidateAll();
|
||||
this.closed = true;
|
||||
}
|
||||
|
||||
public IndexFieldDataCache buildIndexFieldDataCache(IndexFieldDataCache.Listener listener, Index index, String fieldName, FieldDataType fieldDataType) {
|
||||
|
@ -260,44 +242,5 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* FieldDataCacheCleaner is a scheduled Runnable used to clean a Guava cache
|
||||
* periodically. In this case it is the field data cache, because a cache that
|
||||
* has an entry invalidated may not clean up the entry if it is not read from
|
||||
* or written to after invalidation.
|
||||
*/
|
||||
public class FieldDataCacheCleaner implements Runnable {
|
||||
|
||||
private final Cache<Key, Accountable> cache;
|
||||
private final ESLogger logger;
|
||||
private final ThreadPool threadPool;
|
||||
private final TimeValue interval;
|
||||
|
||||
public FieldDataCacheCleaner(Cache cache, ESLogger logger, ThreadPool threadPool, TimeValue interval) {
|
||||
this.cache = cache;
|
||||
this.logger = logger;
|
||||
this.threadPool = threadPool;
|
||||
this.interval = interval;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
long startTimeNS = System.nanoTime();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("running periodic field data cache cleanup");
|
||||
}
|
||||
try {
|
||||
this.cache.refresh();
|
||||
} catch (Exception e) {
|
||||
logger.warn("Exception during periodic field data cache cleanup:", e);
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("periodic field data cache cleanup finished in {} milliseconds", TimeValue.nsecToMSec(System.nanoTime() - startTimeNS));
|
||||
}
|
||||
// Reschedule itself to run again if not closed
|
||||
if (closed == false) {
|
||||
threadPool.schedule(interval, ThreadPool.Names.SAME, this);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -392,7 +392,6 @@ public class Node implements Closeable {
|
|||
toClose.add(injector.getInstance(IndicesService.class));
|
||||
// close filter/fielddata caches after indices
|
||||
toClose.add(injector.getInstance(IndicesQueryCache.class));
|
||||
toClose.add(injector.getInstance(IndicesFieldDataCache.class));
|
||||
toClose.add(injector.getInstance(IndicesStore.class));
|
||||
toClose.add(() ->stopWatch.stop().start("routing"));
|
||||
toClose.add(injector.getInstance(RoutingService.class));
|
||||
|
|
|
@ -48,7 +48,7 @@ public class BytesRestResponse extends RestResponse {
|
|||
* Creates a new response based on {@link XContentBuilder}.
|
||||
*/
|
||||
public BytesRestResponse(RestStatus status, XContentBuilder builder) {
|
||||
this(status, builder.contentType().restContentType(), builder.bytes());
|
||||
this(status, builder.contentType().mediaType(), builder.bytes());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -93,7 +93,7 @@ public class BytesRestResponse extends RestResponse {
|
|||
} else {
|
||||
XContentBuilder builder = convert(channel, status, t);
|
||||
this.content = builder.bytes();
|
||||
this.contentType = builder.contentType().restContentType();
|
||||
this.contentType = builder.contentType().mediaType();
|
||||
}
|
||||
if (t instanceof ElasticsearchException) {
|
||||
copyHeaders(((ElasticsearchException) t));
|
||||
|
|
|
@ -53,7 +53,7 @@ public abstract class RestChannel {
|
|||
}
|
||||
|
||||
public XContentBuilder newBuilder(@Nullable BytesReference autoDetectSource, boolean useFiltering) throws IOException {
|
||||
XContentType contentType = XContentType.fromRestContentType(request.param("format", request.header("Content-Type")));
|
||||
XContentType contentType = XContentType.fromMediaTypeOrFormat(request.param("format", request.header("Accept")));
|
||||
if (contentType == null) {
|
||||
// try and guess it from the auto detect source
|
||||
if (autoDetectSource != null) {
|
||||
|
|
|
@ -48,7 +48,7 @@ public class RestTable {
|
|||
|
||||
public static RestResponse buildResponse(Table table, RestChannel channel) throws Exception {
|
||||
RestRequest request = channel.request();
|
||||
XContentType xContentType = XContentType.fromRestContentType(request.param("format", request.header("Content-Type")));
|
||||
XContentType xContentType = XContentType.fromMediaTypeOrFormat(request.param("format", request.header("Accept")));
|
||||
if (xContentType != null) {
|
||||
return buildXContentBuilder(table, channel);
|
||||
}
|
||||
|
|
|
@ -20,13 +20,6 @@
|
|||
package org.elasticsearch.search;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectFloatHashMap;
|
||||
import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.ObjectSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NumericDocValues;
|
||||
import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -54,14 +47,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.FieldDataType;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.FieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType.Loading;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.search.stats.ShardSearchStats;
|
||||
|
@ -69,8 +54,6 @@ import org.elasticsearch.index.search.stats.StatsGroupsParseElement;
|
|||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.IndicesWarmer.TerminationHandle;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
|
@ -109,9 +92,7 @@ import java.io.IOException;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
|
@ -124,7 +105,6 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
|||
*/
|
||||
public class SearchService extends AbstractLifecycleComponent<SearchService> implements IndexEventListener {
|
||||
|
||||
public static final Setting<Loading> INDEX_NORMS_LOADING_SETTING = new Setting<>("index.norms.loading", Loading.LAZY.toString(), (s) -> Loading.parse(s, Loading.LAZY), false, Setting.Scope.INDEX);
|
||||
// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
|
||||
public static final Setting<TimeValue> DEFAULT_KEEPALIVE_SETTING = Setting.positiveTimeSetting("search.default_keep_alive", timeValueMinutes(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting("search.keep_alive_interval", timeValueMinutes(1), false, Setting.Scope.CLUSTER);
|
||||
|
@ -139,8 +119,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
private final IndicesWarmer indicesWarmer;
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
private final PageCacheRecycler pageCacheRecycler;
|
||||
|
@ -170,7 +148,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
private final ParseFieldMatcher parseFieldMatcher;
|
||||
|
||||
@Inject
|
||||
public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool,
|
||||
public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool,
|
||||
ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase,
|
||||
IndicesRequestCache indicesQueryCache) {
|
||||
super(settings);
|
||||
|
@ -178,7 +156,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
this.indicesWarmer = indicesWarmer;
|
||||
this.scriptService = scriptService;
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
this.bigArrays = bigArrays;
|
||||
|
@ -199,9 +176,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
|
||||
this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval);
|
||||
|
||||
this.indicesWarmer.addListener(new NormsWarmer(indicesWarmer));
|
||||
this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer));
|
||||
|
||||
defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
|
||||
}
|
||||
|
@ -949,184 +923,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
return this.activeContexts.size();
|
||||
}
|
||||
|
||||
static class NormsWarmer implements IndicesWarmer.Listener {
|
||||
private final IndicesWarmer indicesWarmer;
|
||||
|
||||
public NormsWarmer(IndicesWarmer indicesWarmer) {
|
||||
this.indicesWarmer = indicesWarmer;
|
||||
}
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final Loading defaultLoading = indexShard.indexSettings().getValue(INDEX_NORMS_LOADING_SETTING);
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final ObjectSet<String> warmUp = new ObjectHashSet<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final String indexName = fieldMapper.fieldType().name();
|
||||
Loading normsLoading = fieldMapper.fieldType().normsLoading();
|
||||
if (normsLoading == null) {
|
||||
normsLoading = defaultLoading;
|
||||
}
|
||||
if (fieldMapper.fieldType().indexOptions() != IndexOptions.NONE && !fieldMapper.fieldType().omitNorms() && normsLoading == Loading.EAGER) {
|
||||
warmUp.add(indexName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
// Norms loading may be I/O intensive but is not CPU intensive, so we execute it in a single task
|
||||
indicesWarmer.getExecutor().execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
for (ObjectCursor<String> stringObjectCursor : warmUp) {
|
||||
final String indexName = stringObjectCursor.value;
|
||||
final long start = System.nanoTime();
|
||||
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
|
||||
final NumericDocValues values = ctx.reader().getNormValues(indexName);
|
||||
if (values != null) {
|
||||
values.get(0);
|
||||
}
|
||||
}
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed norms for [{}], took [{}]", indexName, TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up norms", t);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return new TerminationHandle() {
|
||||
@Override
|
||||
public void awaitTermination() throws InterruptedException {
|
||||
latch.await();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmTopReader(IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
return TerminationHandle.NO_WAIT;
|
||||
}
|
||||
}
|
||||
|
||||
static class FieldDataWarmer implements IndicesWarmer.Listener {
|
||||
|
||||
private final IndicesWarmer indicesWarmer;
|
||||
|
||||
public FieldDataWarmer(IndicesWarmer indicesWarmer) {
|
||||
this.indicesWarmer = indicesWarmer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final Map<String, MappedFieldType> warmUp = new HashMap<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
|
||||
final String indexName = fieldMapper.fieldType().name();
|
||||
if (fieldDataType == null) {
|
||||
continue;
|
||||
}
|
||||
if (fieldDataType.getLoading() == Loading.LAZY) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (warmUp.containsKey(indexName)) {
|
||||
continue;
|
||||
}
|
||||
warmUp.put(indexName, fieldMapper.fieldType());
|
||||
}
|
||||
}
|
||||
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
|
||||
final Executor executor = indicesWarmer.getExecutor();
|
||||
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());
|
||||
for (final LeafReaderContext ctx : searcher.reader().leaves()) {
|
||||
for (final MappedFieldType fieldType : warmUp.values()) {
|
||||
executor.execute(new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
final long start = System.nanoTime();
|
||||
indexFieldDataService.getForField(fieldType).load(ctx);
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed fielddata for [{}], took [{}]", fieldType.name(), TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up fielddata for [{}]", t, fieldType.name());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
});
|
||||
}
|
||||
}
|
||||
return new TerminationHandle() {
|
||||
@Override
|
||||
public void awaitTermination() throws InterruptedException {
|
||||
latch.await();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public TerminationHandle warmTopReader(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final Map<String, MappedFieldType> warmUpGlobalOrdinals = new HashMap<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
for (FieldMapper fieldMapper : docMapper.mappers()) {
|
||||
final FieldDataType fieldDataType = fieldMapper.fieldType().fieldDataType();
|
||||
final String indexName = fieldMapper.fieldType().name();
|
||||
if (fieldDataType == null) {
|
||||
continue;
|
||||
}
|
||||
if (fieldDataType.getLoading() != Loading.EAGER_GLOBAL_ORDINALS) {
|
||||
continue;
|
||||
}
|
||||
if (warmUpGlobalOrdinals.containsKey(indexName)) {
|
||||
continue;
|
||||
}
|
||||
warmUpGlobalOrdinals.put(indexName, fieldMapper.fieldType());
|
||||
}
|
||||
}
|
||||
final IndexFieldDataService indexFieldDataService = indexShard.indexFieldDataService();
|
||||
final Executor executor = indicesWarmer.getExecutor();
|
||||
final CountDownLatch latch = new CountDownLatch(warmUpGlobalOrdinals.size());
|
||||
for (final MappedFieldType fieldType : warmUpGlobalOrdinals.values()) {
|
||||
executor.execute(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
final long start = System.nanoTime();
|
||||
IndexFieldData.Global ifd = indexFieldDataService.getForField(fieldType);
|
||||
ifd.loadGlobal(searcher.getDirectoryReader());
|
||||
if (indexShard.warmerService().logger().isTraceEnabled()) {
|
||||
indexShard.warmerService().logger().trace("warmed global ordinals for [{}], took [{}]", fieldType.name(), TimeValue.timeValueNanos(System.nanoTime() - start));
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
indexShard.warmerService().logger().warn("failed to warm-up global ordinals for [{}]", t, fieldType.name());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
return new TerminationHandle() {
|
||||
@Override
|
||||
public void awaitTermination() throws InterruptedException {
|
||||
latch.await();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
class Reaper implements Runnable {
|
||||
@Override
|
||||
public void run() {
|
||||
|
|
|
@ -48,6 +48,11 @@ public class ShardRoutingHelper {
|
|||
routing.reinitializeShard();
|
||||
}
|
||||
|
||||
public static void reinit(ShardRouting routing, UnassignedInfo.Reason reason) {
|
||||
routing.reinitializeShard();
|
||||
routing.updateUnassignedInfo(new UnassignedInfo(reason, "test_reinit"));
|
||||
}
|
||||
|
||||
public static void moveToUnassigned(ShardRouting routing, UnassignedInfo info) {
|
||||
routing.moveToUnassigned(info);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class XContentTypeTests extends ESTestCase {
|
||||
public void testFromJson() throws Exception {
|
||||
String mediaType = "application/json";
|
||||
XContentType expectedXContentType = XContentType.JSON;
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
|
||||
}
|
||||
|
||||
public void testFromJsonUppercase() throws Exception {
|
||||
String mediaType = "application/json".toUpperCase(Locale.ROOT);
|
||||
XContentType expectedXContentType = XContentType.JSON;
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
|
||||
}
|
||||
|
||||
public void testFromYaml() throws Exception {
|
||||
String mediaType = "application/yaml";
|
||||
XContentType expectedXContentType = XContentType.YAML;
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
|
||||
}
|
||||
|
||||
public void testFromSmile() throws Exception {
|
||||
String mediaType = "application/smile";
|
||||
XContentType expectedXContentType = XContentType.SMILE;
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType));
|
||||
}
|
||||
|
||||
public void testFromCbor() throws Exception {
|
||||
String mediaType = "application/cbor";
|
||||
XContentType expectedXContentType = XContentType.CBOR;
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType));
|
||||
}
|
||||
|
||||
public void testFromWildcard() throws Exception {
|
||||
String mediaType = "application/*";
|
||||
XContentType expectedXContentType = XContentType.JSON;
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType));
|
||||
}
|
||||
|
||||
public void testFromWildcardUppercase() throws Exception {
|
||||
String mediaType = "APPLICATION/*";
|
||||
XContentType expectedXContentType = XContentType.JSON;
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType), equalTo(expectedXContentType));
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(mediaType + ";"), equalTo(expectedXContentType));
|
||||
}
|
||||
|
||||
public void testFromRubbish() throws Exception {
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(null), nullValue());
|
||||
assertThat(XContentType.fromMediaTypeOrFormat(""), nullValue());
|
||||
assertThat(XContentType.fromMediaTypeOrFormat("text/plain"), nullValue());
|
||||
assertThat(XContentType.fromMediaTypeOrFormat("gobbly;goop"), nullValue());
|
||||
}
|
||||
}
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.index.cache.query.QueryCache;
|
|||
import org.elasticsearch.index.cache.query.index.IndexQueryCache;
|
||||
import org.elasticsearch.index.cache.query.none.NoneQueryCache;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexSearcherWrapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -52,7 +53,6 @@ import org.elasticsearch.index.similarity.SimilarityService;
|
|||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
|
@ -96,17 +96,17 @@ public class IndexModuleTests extends ESTestCase {
|
|||
public void addPendingDelete(ShardId shardId, IndexSettings indexSettings) {
|
||||
}
|
||||
};
|
||||
|
||||
private final IndexFieldDataCache.Listener listener = new IndexFieldDataCache.Listener() {};
|
||||
private MapperRegistry mapperRegistry;
|
||||
|
||||
static NodeServicesProvider newNodeServiceProvider(Settings settings, Environment environment, Client client, ScriptEngineService... scriptEngineServices) throws IOException {
|
||||
// TODO this can be used in other place too - lets first refactor the IndicesQueriesRegistry
|
||||
ThreadPool threadPool = new ThreadPool("test");
|
||||
IndicesWarmer warmer = new IndicesWarmer(settings, threadPool);
|
||||
IndicesQueryCache indicesQueryCache = new IndicesQueryCache(settings);
|
||||
CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
|
||||
PageCacheRecycler recycler = new PageCacheRecycler(settings, threadPool);
|
||||
BigArrays bigArrays = new BigArrays(recycler, circuitBreakerService);
|
||||
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndicesFieldDataCacheListener(circuitBreakerService), threadPool);
|
||||
Set<ScriptEngineService> scriptEngines = Collections.emptySet();
|
||||
scriptEngines.addAll(Arrays.asList(scriptEngineServices));
|
||||
ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.emptyList());
|
||||
|
@ -114,7 +114,7 @@ public class IndexModuleTests extends ESTestCase {
|
|||
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
ScriptService scriptService = new ScriptService(settings, environment, scriptEngines, new ResourceWatcherService(settings, threadPool), scriptEngineRegistry, scriptContextRegistry, scriptSettings);
|
||||
IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(settings, emptyMap());
|
||||
return new NodeServicesProvider(threadPool, indicesQueryCache, warmer, bigArrays, client, scriptService, indicesQueriesRegistry, indicesFieldDataCache, circuitBreakerService);
|
||||
return new NodeServicesProvider(threadPool, indicesQueryCache, bigArrays, client, scriptService, indicesQueriesRegistry, circuitBreakerService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -143,7 +143,7 @@ public class IndexModuleTests extends ESTestCase {
|
|||
IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment));
|
||||
module.setSearcherWrapper((s) -> new Wrapper());
|
||||
module.engineFactory.set(new MockEngineFactory(AssertingDirectoryReader.class));
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, new IndicesFieldDataCache(settings, listener));
|
||||
assertTrue(indexService.getSearcherWrapper() instanceof Wrapper);
|
||||
assertSame(indexService.getEngineFactory(), module.engineFactory.get());
|
||||
indexService.close("simon says", false);
|
||||
|
@ -161,7 +161,7 @@ public class IndexModuleTests extends ESTestCase {
|
|||
final Index index = indexSettings.getIndex();
|
||||
IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment));
|
||||
module.addIndexStore("foo_store", FooStore::new);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry, new IndicesFieldDataCache(settings, listener));
|
||||
assertTrue(indexService.getIndexStore() instanceof FooStore);
|
||||
try {
|
||||
module.addIndexStore("foo_store", FooStore::new);
|
||||
|
@ -184,7 +184,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
IndexModule module = new IndexModule(indexSettings, null, new AnalysisRegistry(null, environment));
|
||||
Consumer<Settings> listener = (s) -> {};
|
||||
module.addIndexEventListener(eventListener);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, this.listener));
|
||||
IndexSettings x = indexService.getIndexSettings();
|
||||
assertEquals(x.getSettings().getAsMap(), indexSettings.getSettings().getAsMap());
|
||||
assertEquals(x.getIndex(), index);
|
||||
|
@ -208,7 +209,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, listener));
|
||||
assertSame(booleanSetting, indexService.getIndexSettings().getScopedSettings().get(booleanSetting.getKey()));
|
||||
|
||||
indexService.close("simon says", false);
|
||||
|
@ -234,7 +236,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, listener));
|
||||
SimilarityService similarityService = indexService.similarityService();
|
||||
assertNotNull(similarityService.getSimilarity("my_similarity"));
|
||||
assertTrue(similarityService.getSimilarity("my_similarity").get() instanceof TestSimilarity);
|
||||
|
@ -251,7 +254,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
.build();
|
||||
IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(null, environment));
|
||||
try {
|
||||
module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, listener));
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("Unknown Similarity type [test_similarity] for [my_similarity]", ex.getMessage());
|
||||
}
|
||||
|
@ -265,7 +269,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
.build();
|
||||
IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(null, environment));
|
||||
try {
|
||||
module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, listener));
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("Similarity [my_similarity] must have an associated type", ex.getMessage());
|
||||
}
|
||||
|
@ -312,7 +317,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
assertEquals(e.getMessage(), "Can't register the same [query_cache] more than once for [custom]");
|
||||
}
|
||||
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, listener));
|
||||
assertTrue(indexService.cache().query() instanceof CustomQueryCache);
|
||||
indexService.close("simon says", false);
|
||||
}
|
||||
|
@ -322,7 +328,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
IndexModule module = new IndexModule(IndexSettingsModule.newIndexSettings("foo", indexSettings), null, new AnalysisRegistry(null, environment));
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry);
|
||||
IndexService indexService = module.newIndexService(nodeEnvironment, deleter, nodeServicesProvider, mapperRegistry,
|
||||
new IndicesFieldDataCache(settings, listener));
|
||||
assertTrue(indexService.cache().query() instanceof IndexQueryCache);
|
||||
indexService.close("simon says", false);
|
||||
}
|
||||
|
|
|
@ -41,10 +41,8 @@ import org.apache.lucene.util.BitSet;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
||||
|
@ -57,8 +55,6 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
public class BitSetFilterCacheTests extends ESTestCase {
|
||||
|
||||
private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("test", Settings.EMPTY);
|
||||
private final IndicesWarmer warmer = new IndicesWarmer(Settings.EMPTY, null);
|
||||
|
||||
|
||||
private static int matchCount(BitSetProducer producer, IndexReader reader) throws IOException {
|
||||
int count = 0;
|
||||
|
@ -95,7 +91,7 @@ public class BitSetFilterCacheTests extends ESTestCase {
|
|||
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test", "_na_", 0));
|
||||
IndexSearcher searcher = new IndexSearcher(reader);
|
||||
|
||||
BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, warmer, new BitsetFilterCache.Listener() {
|
||||
BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
|
||||
|
@ -149,7 +145,7 @@ public class BitSetFilterCacheTests extends ESTestCase {
|
|||
final AtomicInteger onCacheCalls = new AtomicInteger();
|
||||
final AtomicInteger onRemoveCalls = new AtomicInteger();
|
||||
|
||||
final BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, warmer, new BitsetFilterCache.Listener() {
|
||||
final BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
onCacheCalls.incrementAndGet();
|
||||
|
@ -188,7 +184,7 @@ public class BitSetFilterCacheTests extends ESTestCase {
|
|||
|
||||
public void testSetNullListener() {
|
||||
try {
|
||||
new BitsetFilterCache(INDEX_SETTINGS, warmer, null);
|
||||
new BitsetFilterCache(INDEX_SETTINGS, null);
|
||||
fail("listener can't be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("listener must not be null", ex.getMessage());
|
||||
|
@ -197,7 +193,7 @@ public class BitSetFilterCacheTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testRejectOtherIndex() throws IOException {
|
||||
BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, warmer, new BitsetFilterCache.Listener() {
|
||||
BitsetFilterCache cache = new BitsetFilterCache(INDEX_SETTINGS, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
|
||||
|
@ -208,7 +204,7 @@ public class BitSetFilterCacheTests extends ESTestCase {
|
|||
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
Directory dir = newDirectory();
|
||||
IndexWriter writer = new IndexWriter(
|
||||
dir,
|
||||
|
@ -218,9 +214,9 @@ public class BitSetFilterCacheTests extends ESTestCase {
|
|||
DirectoryReader reader = DirectoryReader.open(writer, true);
|
||||
writer.close();
|
||||
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId("test2", "_na_", 0));
|
||||
|
||||
|
||||
BitSetProducer producer = cache.getBitSetProducer(new MatchAllDocsQuery());
|
||||
|
||||
|
||||
try {
|
||||
producer.getBitSet(reader.leaves().get(0));
|
||||
fail();
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper;
|
|||
import org.elasticsearch.index.mapper.geo.GeoPointFieldMapperLegacy;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
@ -132,7 +133,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
|
|||
.put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
|
||||
indexService = createIndex("test", settings);
|
||||
mapperService = indexService.mapperService();
|
||||
indicesFieldDataCache = getInstanceFromNode(IndicesFieldDataCache.class);
|
||||
indicesFieldDataCache = getInstanceFromNode(IndicesService.class).getIndicesFieldDataCache();
|
||||
ifdService = indexService.fieldData();
|
||||
// LogByteSizeMP to preserve doc ID order
|
||||
writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper;
|
|||
import org.elasticsearch.index.mapper.core.ShortFieldMapper;
|
||||
import org.elasticsearch.index.mapper.core.StringFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
@ -131,10 +132,10 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
|
||||
public void testFieldDataCacheListener() throws Exception {
|
||||
final IndexService indexService = createIndex("test");
|
||||
IndexFieldDataService shardPrivateService = indexService.fieldData();
|
||||
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
// copy the ifdService since we can set the listener only once.
|
||||
final IndexFieldDataService ifdService = new IndexFieldDataService(indexService.getIndexSettings(),
|
||||
getInstanceFromNode(IndicesFieldDataCache.class), getInstanceFromNode(CircuitBreakerService.class), indexService.mapperService());
|
||||
indicesService.getIndicesFieldDataCache(), indicesService.getCircuitBreakerService(), indexService.mapperService());
|
||||
|
||||
final BuilderContext ctx = new BuilderContext(indexService.getIndexSettings().getSettings(), new ContentPath(1));
|
||||
final MappedFieldType mapper1 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "paged_bytes").build()).build(ctx).fieldType();
|
||||
|
@ -205,7 +206,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
private void doTestRequireDocValues(MappedFieldType ft) {
|
||||
ThreadPool threadPool = new ThreadPool("random_threadpool_name");
|
||||
try {
|
||||
IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null, threadPool);
|
||||
IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null);
|
||||
IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), cache, null, null);
|
||||
ft.setName("some_long");
|
||||
ft.setHasDocValues(true);
|
||||
|
@ -238,7 +239,7 @@ public class IndexFieldDataServiceTests extends ESSingleNodeTestCase {
|
|||
ThreadPool threadPool = new ThreadPool("random_threadpool_name");
|
||||
StringFieldMapper.StringFieldType ft = new StringFieldMapper.StringFieldType();
|
||||
try {
|
||||
IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null, threadPool);
|
||||
IndicesFieldDataCache cache = new IndicesFieldDataCache(Settings.EMPTY, null);
|
||||
IndexFieldDataService ifds = new IndexFieldDataService(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), cache, null, null);
|
||||
ft.setName("some_str");
|
||||
ft.setFieldDataType(new FieldDataType("string", Settings.builder().put(FieldDataType.FORMAT_KEY, "disabled").build()));
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.query;
|
|||
import com.carrotsearch.randomizedtesting.generators.CodepointSetGenerator;
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
import com.fasterxml.jackson.core.io.JsonStringEncoder;
|
||||
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -56,7 +57,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
|||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -71,13 +71,13 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.support.QueryParsers;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
|
@ -89,9 +89,9 @@ import org.elasticsearch.script.ScriptContext;
|
|||
import org.elasticsearch.script.ScriptContextRegistry;
|
||||
import org.elasticsearch.script.ScriptEngineRegistry;
|
||||
import org.elasticsearch.script.ScriptEngineService;
|
||||
import org.elasticsearch.script.ScriptSettings;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptSettings;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -271,8 +271,10 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
|
|||
SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
|
||||
MapperRegistry mapperRegistry = injector.getInstance(MapperRegistry.class);
|
||||
MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, () -> queryShardContext);
|
||||
indexFieldDataService = new IndexFieldDataService(idxSettings, injector.getInstance(IndicesFieldDataCache.class), injector.getInstance(CircuitBreakerService.class), mapperService);
|
||||
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new IndicesWarmer(idxSettings.getNodeSettings(), null), new BitsetFilterCache.Listener() {
|
||||
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {
|
||||
});
|
||||
indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache, injector.getInstance(CircuitBreakerService.class), mapperService);
|
||||
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
|
||||
|
|
|
@ -297,7 +297,8 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
|
|||
} else if (queryBuilder.fields().size() == 0) {
|
||||
assertTermQuery(query, MetaData.ALL, queryBuilder.value());
|
||||
} else {
|
||||
fail("Encountered lucene query type we do not have a validation implementation for in our " + SimpleQueryStringBuilderTests.class.getSimpleName());
|
||||
fail("Encountered lucene query type we do not have a validation implementation for in our "
|
||||
+ SimpleQueryStringBuilderTests.class.getSimpleName());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -368,4 +369,37 @@ public class SimpleQueryStringBuilderTests extends AbstractQueryTestCase<SimpleQ
|
|||
assertEquals(json, 2, parsed.fields().size());
|
||||
assertEquals(json, "snowball", parsed.analyzer());
|
||||
}
|
||||
|
||||
public void testMinimumShouldMatch() throws IOException {
|
||||
QueryShardContext shardContext = createShardContext();
|
||||
int numberOfTerms = randomIntBetween(1, 4);
|
||||
StringBuilder queryString = new StringBuilder();
|
||||
for (int i = 0; i < numberOfTerms; i++) {
|
||||
queryString.append("t" + i + " ");
|
||||
}
|
||||
SimpleQueryStringBuilder simpleQueryStringBuilder = new SimpleQueryStringBuilder(queryString.toString().trim());
|
||||
if (randomBoolean()) {
|
||||
simpleQueryStringBuilder.defaultOperator(Operator.AND);
|
||||
}
|
||||
int numberOfFields = randomIntBetween(1, 4);
|
||||
for (int i = 0; i < numberOfFields; i++) {
|
||||
simpleQueryStringBuilder.field("f" + i);
|
||||
}
|
||||
int percent = randomIntBetween(1, 100);
|
||||
simpleQueryStringBuilder.minimumShouldMatch(percent + "%");
|
||||
Query query = simpleQueryStringBuilder.toQuery(shardContext);
|
||||
|
||||
// check special case: one term & one field should get simplified to a TermQuery
|
||||
if (numberOfFields * numberOfTerms == 1) {
|
||||
assertThat(query, instanceOf(TermQuery.class));
|
||||
} else {
|
||||
assertThat(query, instanceOf(BooleanQuery.class));
|
||||
BooleanQuery boolQuery = (BooleanQuery) query;
|
||||
int expectedMinimumShouldMatch = numberOfTerms * percent / 100;
|
||||
if (simpleQueryStringBuilder.defaultOperator().equals(Operator.AND) && numberOfTerms > 1) {
|
||||
expectedMinimumShouldMatch = 0;
|
||||
}
|
||||
assertEquals(expectedMinimumShouldMatch, boolQuery.getMinimumNumberShouldMatch());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
|
@ -865,10 +864,11 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService("test");
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
|
||||
int translogOps = 1;
|
||||
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
|
||||
if (randomBoolean()) {
|
||||
client().admin().indices().prepareFlush().get();
|
||||
translogOps = 0;
|
||||
}
|
||||
ShardRouting routing = new ShardRouting(shard.routingEntry());
|
||||
test.removeShard(0, "b/c simon says so");
|
||||
|
@ -878,6 +878,10 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
|
||||
newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode, localNode));
|
||||
assertTrue(newShard.recoverFromStore(localNode));
|
||||
assertEquals(translogOps, newShard.recoveryState().getTranslog().recoveredOperations());
|
||||
assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperations());
|
||||
assertEquals(translogOps, newShard.recoveryState().getTranslog().totalOperationsOnStart());
|
||||
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
|
||||
routing = new ShardRouting(routing);
|
||||
ShardRoutingHelper.moveToStarted(routing);
|
||||
newShard.updateRoutingEntry(routing, true);
|
||||
|
@ -885,6 +889,36 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
assertHitCount(response, 1);
|
||||
}
|
||||
|
||||
public void testRecoverFromCleanStore() throws IOException {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService("test");
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
|
||||
if (randomBoolean()) {
|
||||
client().admin().indices().prepareFlush().get();
|
||||
}
|
||||
ShardRouting routing = new ShardRouting(shard.routingEntry());
|
||||
test.removeShard(0, "b/c simon says so");
|
||||
ShardRoutingHelper.reinit(routing, UnassignedInfo.Reason.INDEX_CREATED);
|
||||
IndexShard newShard = test.createShard(routing);
|
||||
newShard.updateRoutingEntry(routing, false);
|
||||
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
|
||||
newShard.markAsRecovering("store", new RecoveryState(newShard.shardId(), routing.primary(), RecoveryState.Type.STORE, localNode,
|
||||
localNode));
|
||||
assertTrue(newShard.recoverFromStore(localNode));
|
||||
assertEquals(0, newShard.recoveryState().getTranslog().recoveredOperations());
|
||||
assertEquals(0, newShard.recoveryState().getTranslog().totalOperations());
|
||||
assertEquals(0, newShard.recoveryState().getTranslog().totalOperationsOnStart());
|
||||
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
|
||||
routing = new ShardRouting(routing);
|
||||
ShardRoutingHelper.moveToStarted(routing);
|
||||
newShard.updateRoutingEntry(routing, true);
|
||||
SearchResponse response = client().prepareSearch().get();
|
||||
assertHitCount(response, 0);
|
||||
}
|
||||
|
||||
public void testFailIfIndexNotPresentInRecoverFromStore() throws IOException {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
@ -1154,7 +1188,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
ShardRouting routing = new ShardRouting(shard.routingEntry());
|
||||
shard.close("simon says", true);
|
||||
NodeServicesProvider indexServices = indexService.getIndexServices();
|
||||
IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexServices, indexService.getSearchSlowLog(), listeners);
|
||||
IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexServices, indexService.getSearchSlowLog(), null, listeners);
|
||||
ShardRoutingHelper.reinit(routing);
|
||||
newShard.updateRoutingEntry(routing, false);
|
||||
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
|
||||
|
@ -1187,7 +1221,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
List<Translog.Operation> operations = new ArrayList<>();
|
||||
operations.add(new Translog.Index("testtype", "1", jsonBuilder().startObject().field("foo", "bar").endObject().bytes().toBytes()));
|
||||
newShard.prepareForIndexRecovery();
|
||||
newShard.performTranslogRecovery(true);
|
||||
newShard.recoveryState().getTranslog().totalOperations(operations.size());
|
||||
newShard.skipTranslogRecovery();
|
||||
newShard.performBatchRecovery(operations);
|
||||
assertFalse(newShard.getTranslog().syncNeeded());
|
||||
}
|
||||
|
|
|
@ -1460,8 +1460,8 @@ public class TranslogTests extends ESTestCase {
|
|||
}
|
||||
boolean atLeastOneFailed = false;
|
||||
for (Throwable ex : threadExceptions) {
|
||||
assertTrue(ex.toString(), ex instanceof IOException || ex instanceof AlreadyClosedException);
|
||||
if (ex != null) {
|
||||
assertTrue(ex.toString(), ex instanceof IOException || ex instanceof AlreadyClosedException);
|
||||
atLeastOneFailed = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
@ -176,7 +177,7 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase {
|
|||
|
||||
// Since .cleanUp() is no longer called on cache clear, we need to call it on each node manually
|
||||
for (String node : internalCluster().getNodeNames()) {
|
||||
final IndicesFieldDataCache fdCache = internalCluster().getInstance(IndicesFieldDataCache.class, node);
|
||||
final IndicesFieldDataCache fdCache = internalCluster().getInstance(IndicesService.class, node).getIndicesFieldDataCache();
|
||||
// Clean up the cache, ensuring that entries' listeners have been called
|
||||
fdCache.getCache().refresh();
|
||||
}
|
||||
|
|
|
@ -20,20 +20,45 @@
|
|||
package org.elasticsearch.rest.action.support;
|
||||
|
||||
import org.elasticsearch.common.Table;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.rest.action.support.RestTable.buildDisplayHeaders;
|
||||
import static org.elasticsearch.rest.action.support.RestTable.buildResponse;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class RestTableTests extends ESTestCase {
|
||||
|
||||
private static final String APPLICATION_JSON = XContentType.JSON.mediaType();
|
||||
private static final String APPLICATION_YAML = XContentType.YAML.mediaType();
|
||||
private static final String APPLICATION_SMILE = XContentType.SMILE.mediaType();
|
||||
private static final String APPLICATION_CBOR = XContentType.CBOR.mediaType();
|
||||
private static final String CONTENT_TYPE = "Content-Type";
|
||||
private static final String ACCEPT = "Accept";
|
||||
private static final String TEXT_PLAIN = "text/plain; charset=UTF-8";
|
||||
private static final String TEXT_TABLE_BODY = "foo foo foo foo foo foo\n";
|
||||
private static final String JSON_TABLE_BODY = "[{\"bulk.foo\":\"foo\",\"bulk.bar\":\"foo\",\"aliasedBulk\":\"foo\"," +
|
||||
"\"aliasedSecondBulk\":\"foo\",\"unmatched\":\"foo\"," +
|
||||
"\"invalidAliasesBulk\":\"foo\"}]";
|
||||
private static final String YAML_TABLE_BODY = "---\n" +
|
||||
"- bulk.foo: \"foo\"\n" +
|
||||
" bulk.bar: \"foo\"\n" +
|
||||
" aliasedBulk: \"foo\"\n" +
|
||||
" aliasedSecondBulk: \"foo\"\n" +
|
||||
" unmatched: \"foo\"\n" +
|
||||
" invalidAliasesBulk: \"foo\"\n";
|
||||
private Table table = new Table();
|
||||
private FakeRestRequest restRequest = new FakeRestRequest();
|
||||
|
||||
|
@ -70,6 +95,65 @@ public class RestTableTests extends ESTestCase {
|
|||
assertThat(headerNames, not(hasItem("unmatched")));
|
||||
}
|
||||
|
||||
public void testThatWeUseTheAcceptHeaderJson() throws Exception {
|
||||
assertResponse(Collections.singletonMap(ACCEPT, APPLICATION_JSON),
|
||||
APPLICATION_JSON,
|
||||
JSON_TABLE_BODY);
|
||||
}
|
||||
|
||||
public void testThatWeUseTheAcceptHeaderYaml() throws Exception {
|
||||
assertResponse(Collections.singletonMap(ACCEPT, APPLICATION_YAML),
|
||||
APPLICATION_YAML,
|
||||
YAML_TABLE_BODY);
|
||||
}
|
||||
|
||||
public void testThatWeUseTheAcceptHeaderSmile() throws Exception {
|
||||
assertResponseContentType(Collections.singletonMap(ACCEPT, APPLICATION_SMILE),
|
||||
APPLICATION_SMILE);
|
||||
}
|
||||
|
||||
public void testThatWeUseTheAcceptHeaderCbor() throws Exception {
|
||||
assertResponseContentType(Collections.singletonMap(ACCEPT, APPLICATION_CBOR),
|
||||
APPLICATION_CBOR);
|
||||
}
|
||||
|
||||
public void testThatWeUseTheAcceptHeaderText() throws Exception {
|
||||
assertResponse(Collections.singletonMap(ACCEPT, TEXT_PLAIN),
|
||||
TEXT_PLAIN,
|
||||
TEXT_TABLE_BODY);
|
||||
}
|
||||
|
||||
public void testIgnoreContentType() throws Exception {
|
||||
assertResponse(Collections.singletonMap(CONTENT_TYPE, APPLICATION_JSON),
|
||||
TEXT_PLAIN,
|
||||
TEXT_TABLE_BODY);
|
||||
}
|
||||
|
||||
private RestResponse assertResponseContentType(Map<String, String> headers, String mediaType) throws Exception {
|
||||
FakeRestRequest requestWithAcceptHeader = new FakeRestRequest(headers);
|
||||
table.startRow();
|
||||
table.addCell("foo");
|
||||
table.addCell("foo");
|
||||
table.addCell("foo");
|
||||
table.addCell("foo");
|
||||
table.addCell("foo");
|
||||
table.addCell("foo");
|
||||
table.endRow();
|
||||
RestResponse response = buildResponse(table, new RestChannel(requestWithAcceptHeader, true) {
|
||||
@Override
|
||||
public void sendResponse(RestResponse response) {
|
||||
}
|
||||
});
|
||||
|
||||
assertThat(response.contentType(), equalTo(mediaType));
|
||||
return response;
|
||||
}
|
||||
|
||||
private void assertResponse(Map<String, String> headers, String mediaType, String body) throws Exception {
|
||||
RestResponse response = assertResponseContentType(headers, mediaType);
|
||||
assertThat(response.content().toUtf8(), equalTo(body));
|
||||
}
|
||||
|
||||
private List<String> getHeaderNames(List<RestTable.DisplayHeader> headers) {
|
||||
List<String> headerNames = new ArrayList<>();
|
||||
for (RestTable.DisplayHeader header : headers) {
|
||||
|
|
|
@ -116,12 +116,21 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
logger.info("--> query 2");
|
||||
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").field("body").field("body2").minimumShouldMatch("2")).get();
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(simpleQueryStringQuery("foo bar").field("body").field("body2").minimumShouldMatch("2")).get();
|
||||
assertHitCount(searchResponse, 2L);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
// test case from #13884
|
||||
logger.info("--> query 3");
|
||||
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body").field("body2").minimumShouldMatch("70%")).get();
|
||||
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo")
|
||||
.field("body").field("body2").field("body3").minimumShouldMatch("-50%")).get();
|
||||
assertHitCount(searchResponse, 3L);
|
||||
assertSearchHits(searchResponse, "1", "3", "4");
|
||||
|
||||
logger.info("--> query 4");
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(simpleQueryStringQuery("foo bar baz").field("body").field("body2").minimumShouldMatch("70%")).get();
|
||||
assertHitCount(searchResponse, 2L);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
|
@ -131,18 +140,20 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
client().prepareIndex("test", "type1", "7").setSource("body2", "foo bar", "other", "foo"),
|
||||
client().prepareIndex("test", "type1", "8").setSource("body2", "foo baz bar", "other", "foo"));
|
||||
|
||||
logger.info("--> query 4");
|
||||
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").field("body").field("body2").minimumShouldMatch("2")).get();
|
||||
logger.info("--> query 5");
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(simpleQueryStringQuery("foo bar").field("body").field("body2").minimumShouldMatch("2")).get();
|
||||
assertHitCount(searchResponse, 4L);
|
||||
assertSearchHits(searchResponse, "3", "4", "7", "8");
|
||||
|
||||
logger.info("--> query 5");
|
||||
logger.info("--> query 6");
|
||||
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar").minimumShouldMatch("2")).get();
|
||||
assertHitCount(searchResponse, 5L);
|
||||
assertSearchHits(searchResponse, "3", "4", "6", "7", "8");
|
||||
|
||||
logger.info("--> query 6");
|
||||
searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("foo bar baz").field("body2").field("other").minimumShouldMatch("70%")).get();
|
||||
logger.info("--> query 7");
|
||||
searchResponse = client().prepareSearch()
|
||||
.setQuery(simpleQueryStringQuery("foo bar baz").field("body2").field("other").minimumShouldMatch("70%")).get();
|
||||
assertHitCount(searchResponse, 3L);
|
||||
assertSearchHits(searchResponse, "6", "7", "8");
|
||||
}
|
||||
|
@ -330,7 +341,8 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("location", "Köln"));
|
||||
refresh();
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch().setQuery(simpleQueryStringQuery("Köln*").analyzeWildcard(true).field("location")).get();
|
||||
SearchResponse searchResponse = client().prepareSearch()
|
||||
.setQuery(simpleQueryStringQuery("Köln*").analyzeWildcard(true).field("location")).get();
|
||||
assertNoFailures(searchResponse);
|
||||
assertHitCount(searchResponse, 1L);
|
||||
assertSearchHits(searchResponse, "1");
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -156,7 +156,7 @@ def remove_plugin(version, release_dir, plugin_name):
|
|||
run_plugin(version, release_dir, 'remove', [plugin_name])
|
||||
|
||||
def run_plugin(version, release_dir, plugin_cmd, args):
|
||||
cmd = [os.path.join(release_dir, 'bin/plugin'), plugin_cmd] + args
|
||||
cmd = [os.path.join(release_dir, 'bin/elasticsearch-plugin'), plugin_cmd] + args
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
def create_client(http_port=DEFAULT_HTTP_TCP_PORT, timeout=30):
|
||||
|
|
|
@ -56,14 +56,14 @@ The packages may be downloaded from the following URLs:
|
|||
|
||||
Plugins can be installed as follows:
|
||||
|
||||
bin/plugin -Des.plugins.staging=true install cloud-aws
|
||||
bin/elasticsearch-plugin -Des.plugins.staging=true install cloud-aws
|
||||
|
||||
The same goes for the x-plugins:
|
||||
|
||||
bin/plugin -Des.plugins.staging=true install license
|
||||
bin/plugin -Des.plugins.staging=true install marvel-agent
|
||||
bin/plugin -Des.plugins.staging=true install shield
|
||||
bin/plugin -Des.plugins.staging=true install watcher
|
||||
bin/elasticsearch-plugin -Des.plugins.staging=true install license
|
||||
bin/elasticsearch-plugin -Des.plugins.staging=true install marvel-agent
|
||||
bin/elasticsearch-plugin -Des.plugins.staging=true install shield
|
||||
bin/elasticsearch-plugin -Des.plugins.staging=true install watcher
|
||||
|
||||
To install the deb from an APT repo:
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@ def smoke_test_release(release, files, expected_hash, plugins):
|
|||
continue # nothing to do here
|
||||
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
|
||||
print(' Smoke testing package [%s]' % release_file)
|
||||
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/plugin')
|
||||
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch-plugin')
|
||||
plugin_names = {}
|
||||
for plugin in plugins:
|
||||
print(' Install plugin [%s]' % (plugin))
|
||||
|
|
|
@ -372,7 +372,7 @@ task run(type: RunTask) {}
|
|||
|
||||
/**
|
||||
* Build some variables that are replaced in the packages. This includes both
|
||||
* scripts like bin/elasticsearch and bin/plugin that a user might run and also
|
||||
* scripts like bin/elasticsearch and bin/elasticsearch-plugin that a user might run and also
|
||||
* scripts like postinst which are run as part of the installation.
|
||||
*
|
||||
* <dl>
|
||||
|
@ -384,7 +384,7 @@ task run(type: RunTask) {}
|
|||
* <dt>path.conf</dt>
|
||||
* <dd>The default directory from which to load configuration. This is used in
|
||||
* the packaging scripts, but in that context it is always
|
||||
* /etc/elasticsearch. Its also used in bin/plugin, where it is
|
||||
* /etc/elasticsearch. Its also used in bin/elasticsearch-plugin, where it is
|
||||
* /etc/elasticsearch for the os packages but $ESHOME/config otherwise.</dd>
|
||||
* <dt>path.env</dt>
|
||||
* <dd>The env file sourced before bin/elasticsearch to set environment
|
||||
|
|
|
@ -15,7 +15,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install analysis-icu
|
||||
sudo bin/elasticsearch-plugin install analysis-icu
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -29,7 +29,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove analysis-icu
|
||||
sudo bin/elasticsearch-plugin remove analysis-icu
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -12,7 +12,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install analysis-kuromoji
|
||||
sudo bin/elasticsearch-plugin install analysis-kuromoji
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -26,7 +26,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove analysis-kuromoji
|
||||
sudo bin/elasticsearch-plugin remove analysis-kuromoji
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -13,7 +13,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install analysis-phonetic
|
||||
sudo bin/elasticsearch-plugin install analysis-phonetic
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -27,7 +27,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove analysis-phonetic
|
||||
sudo bin/elasticsearch-plugin remove analysis-phonetic
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -18,7 +18,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install analysis-smartcn
|
||||
sudo bin/elasticsearch-plugin install analysis-smartcn
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -32,7 +32,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove analysis-smartcn
|
||||
sudo bin/elasticsearch-plugin remove analysis-smartcn
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -15,7 +15,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install analysis-stempel
|
||||
sudo bin/elasticsearch-plugin install analysis-stempel
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -29,7 +29,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove analysis-stempel
|
||||
sudo bin/elasticsearch-plugin remove analysis-stempel
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -68,7 +68,7 @@ in the presence of plugins with the incorrect `elasticsearch.version`.
|
|||
=== Testing your plugin
|
||||
|
||||
When testing a Java plugin, it will only be auto-loaded if it is in the
|
||||
`plugins/` directory. Use `bin/plugin install file:///path/to/your/plugin`
|
||||
`plugins/` directory. Use `bin/elasticsearch-plugin install file:///path/to/your/plugin`
|
||||
to install your plugin for testing.
|
||||
|
||||
You may also load your plugin within the test framework for integration tests.
|
||||
|
|
|
@ -30,7 +30,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install delete-by-query
|
||||
sudo bin/elasticsearch-plugin install delete-by-query
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -43,7 +43,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove delete-by-query
|
||||
sudo bin/elasticsearch-plugin remove delete-by-query
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -11,7 +11,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install discovery-azure
|
||||
sudo bin/elasticsearch-plugin install discovery-azure
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -25,7 +25,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove discovery-azure
|
||||
sudo bin/elasticsearch-plugin remove discovery-azure
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
@ -403,7 +403,7 @@ This command should give you a JSON result:
|
|||
sudo service elasticsearch stop
|
||||
|
||||
# Install the plugin
|
||||
sudo /usr/share/elasticsearch/bin/plugin install discovery-azure
|
||||
sudo /usr/share/elasticsearch/bin/elasticsearch-plugin install discovery-azure
|
||||
|
||||
# Configure it
|
||||
sudo vi /etc/elasticsearch/elasticsearch.yml
|
||||
|
|
|
@ -11,7 +11,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install discovery-ec2
|
||||
sudo bin/elasticsearch-plugin install discovery-ec2
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -25,7 +25,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove discovery-ec2
|
||||
sudo bin/elasticsearch-plugin remove discovery-ec2
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -11,7 +11,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install discovery-gce
|
||||
sudo bin/elasticsearch-plugin install discovery-gce
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -25,7 +25,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove discovery-gce
|
||||
sudo bin/elasticsearch-plugin remove discovery-gce
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
@ -216,7 +216,7 @@ Install the plugin:
|
|||
[source,sh]
|
||||
--------------------------------------------------
|
||||
# Use Plugin Manager to install it
|
||||
sudo bin/plugin install discovery-gce
|
||||
sudo bin/elasticsearch-plugin install discovery-gce
|
||||
--------------------------------------------------
|
||||
|
||||
Open the `elasticsearch.yml` file:
|
||||
|
|
|
@ -13,7 +13,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install lang-javascript
|
||||
sudo bin/elasticsearch-plugin install lang-javascript
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -27,7 +27,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove lang-javascript
|
||||
sudo bin/elasticsearch-plugin remove lang-javascript
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -12,7 +12,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install lang-python
|
||||
sudo bin/elasticsearch-plugin install lang-python
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -26,7 +26,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove lang-python
|
||||
sudo bin/elasticsearch-plugin remove lang-python
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -15,7 +15,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install mapper-attachments
|
||||
sudo bin/elasticsearch-plugin install mapper-attachments
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -29,7 +29,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove mapper-attachments
|
||||
sudo bin/elasticsearch-plugin remove mapper-attachments
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -13,7 +13,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install mapper-murmur3
|
||||
sudo bin/elasticsearch-plugin install mapper-murmur3
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -27,7 +27,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove mapper-murmur3
|
||||
sudo bin/elasticsearch-plugin remove mapper-murmur3
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -13,7 +13,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install mapper-size
|
||||
sudo bin/elasticsearch-plugin install mapper-size
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -27,7 +27,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove mapper-size
|
||||
sudo bin/elasticsearch-plugin remove mapper-size
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -10,15 +10,15 @@ Run the following command to get usage instructions:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin -h
|
||||
sudo bin/elasticsearch-plugin -h
|
||||
-----------------------------------
|
||||
|
||||
[IMPORTANT]
|
||||
.Running as root
|
||||
=====================
|
||||
If Elasticsearch was installed using the deb or rpm package then run
|
||||
`bin/plugin` as `root` so it can write to the appropriate files on disk.
|
||||
Otherwise run `bin/plugin` as the user that owns all of the Elasticsearch
|
||||
`bin/elasticsearch-plugin` as `root` so it can write to the appropriate files on disk.
|
||||
Otherwise run `bin/elasticsearch-plugin` as the user that owns all of the Elasticsearch
|
||||
files.
|
||||
=====================
|
||||
|
||||
|
@ -36,7 +36,7 @@ Core Elasticsearch plugins can be installed as follows:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install [plugin_name]
|
||||
sudo bin/elasticsearch-plugin install [plugin_name]
|
||||
-----------------------------------
|
||||
|
||||
For instance, to install the core <<analysis-icu,ICU plugin>>, just run the
|
||||
|
@ -44,7 +44,7 @@ following command:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install analysis-icu
|
||||
sudo bin/elasticsearch-plugin install analysis-icu
|
||||
-----------------------------------
|
||||
|
||||
This command will install the version of the plugin that matches your
|
||||
|
@ -59,7 +59,7 @@ and Sonatype), or from GitHub. In this case, the command is as follows:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install [org]/[user|component]/[version]
|
||||
sudo bin/elasticsearch-plugin install [org]/[user|component]/[version]
|
||||
-----------------------------------
|
||||
|
||||
For instance, to install the https://github.com/lmenezes/elasticsearch-kopf[Kopf]
|
||||
|
@ -67,8 +67,8 @@ plugin from GitHub, run one of the following commands:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install lmenezes/elasticsearch-kopf <1>
|
||||
sudo bin/plugin install lmenezes/elasticsearch-kopf/2.x <2>
|
||||
sudo bin/elasticsearch-plugin install lmenezes/elasticsearch-kopf <1>
|
||||
sudo bin/elasticsearch-plugin install lmenezes/elasticsearch-kopf/2.x <2>
|
||||
-----------------------------------
|
||||
<1> Installs the latest version from GitHub.
|
||||
<2> Installs the 1.x version from GitHub.
|
||||
|
@ -80,7 +80,7 @@ plugin from Sonatype, run:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install org.elasticsearch.plugin/mapper-attachments/3.0.0 <1>
|
||||
sudo bin/elasticsearch-plugin install org.elasticsearch.plugin/mapper-attachments/3.0.0 <1>
|
||||
-----------------------------------
|
||||
<1> When installing from `download.elastic.co` or from Maven Central/Sonatype, the
|
||||
version is required.
|
||||
|
@ -92,7 +92,7 @@ A plugin can also be downloaded directly from a custom location by specifying th
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install [url] <1>
|
||||
sudo bin/elasticsearch-plugin install [url] <1>
|
||||
-----------------------------------
|
||||
<1> must be a valid URL, the plugin name is determined from its descriptor.
|
||||
|
||||
|
@ -100,7 +100,7 @@ For instance, to install a plugin from your local file system, you could run:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install file:///path/to/plugin.zip
|
||||
sudo bin/elasticsearch-plugin install file:///path/to/plugin.zip
|
||||
-----------------------------------
|
||||
|
||||
The plugin script will refuse to talk to an HTTPS URL with an untrusted
|
||||
|
@ -109,7 +109,7 @@ to a local Java truststore and pass the location to the script as follows:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin -Djavax.net.ssl.trustStore=/path/to/trustStore.jks install https://....
|
||||
sudo bin/elasticsearch-plugin -Djavax.net.ssl.trustStore=/path/to/trustStore.jks install https://....
|
||||
-----------------------------------
|
||||
|
||||
[[listing-removing]]
|
||||
|
@ -122,7 +122,7 @@ A list of the currently loaded plugins can be retrieved with the `list` option:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin list
|
||||
sudo bin/elasticsearch-plugin list
|
||||
-----------------------------------
|
||||
|
||||
Alternatively, use the {ref}/cluster-nodes-info.html[node-info API] to find
|
||||
|
@ -136,7 +136,7 @@ Plugins can be removed manually, by deleting the appropriate directory under
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin remove [pluginname]
|
||||
sudo bin/elasticsearch-plugin remove [pluginname]
|
||||
-----------------------------------
|
||||
|
||||
After a Java plugin has been removed, you will need to restart the node to complete the removal process.
|
||||
|
@ -167,7 +167,7 @@ can do this as follows:
|
|||
|
||||
[source,sh]
|
||||
---------------------
|
||||
sudo bin/plugin -Des.path.conf=/path/to/custom/config/dir install <plugin name>
|
||||
sudo bin/elasticsearch-plugin -Des.path.conf=/path/to/custom/config/dir install <plugin name>
|
||||
---------------------
|
||||
|
||||
You can also set the `CONF_DIR` environment variable to the custom config
|
||||
|
@ -183,13 +183,13 @@ waits. Here is some examples of setting it to different values:
|
|||
[source,shell]
|
||||
-----------------------------------
|
||||
# Wait for 30 seconds before failing
|
||||
sudo bin/plugin install mobz/elasticsearch-head --timeout 30s
|
||||
sudo bin/elasticsearch-plugin install mobz/elasticsearch-head --timeout 30s
|
||||
|
||||
# Wait for 1 minute before failing
|
||||
sudo bin/plugin install mobz/elasticsearch-head --timeout 1m
|
||||
sudo bin/elasticsearch-plugin install mobz/elasticsearch-head --timeout 1m
|
||||
|
||||
# Wait forever (default)
|
||||
sudo bin/plugin install mobz/elasticsearch-head --timeout 0
|
||||
sudo bin/elasticsearch-plugin install mobz/elasticsearch-head --timeout 0
|
||||
-----------------------------------
|
||||
|
||||
[float]
|
||||
|
@ -201,7 +201,7 @@ options can be set on the command line:
|
|||
|
||||
[source,shell]
|
||||
-----------------------------------
|
||||
sudo bin/plugin install mobz/elasticsearch-head -DproxyHost=host_name -DproxyPort=port_number
|
||||
sudo bin/elasticsearch-plugin install mobz/elasticsearch-head -DproxyHost=host_name -DproxyPort=port_number
|
||||
-----------------------------------
|
||||
|
||||
On Windows, they need to be added to the `JAVA_OPTS` environment variable:
|
||||
|
@ -209,7 +209,7 @@ On Windows, they need to be added to the `JAVA_OPTS` environment variable:
|
|||
[source,shell]
|
||||
-----------------------------------
|
||||
set JAVA_OPTS="-DproxyHost=host_name -DproxyPort=port_number"
|
||||
bin/plugin install mobz/elasticsearch-head
|
||||
bin/elasticsearch-plugin install mobz/elasticsearch-head
|
||||
-----------------------------------
|
||||
|
||||
=== Settings related to plugins
|
||||
|
|
|
@ -12,7 +12,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install repository-azure
|
||||
sudo bin/elasticsearch-plugin install repository-azure
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -26,7 +26,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove repository-azure
|
||||
sudo bin/elasticsearch-plugin remove repository-azure
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -12,7 +12,7 @@ This plugin can be installed through the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install repository-hdfs
|
||||
sudo bin/elasticsearch-plugin install repository-hdfs
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on _every_ node in the cluster, and each node must
|
||||
|
@ -26,7 +26,7 @@ The plugin can be removed by specifying the _installed_ package:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove repository-hdfs
|
||||
sudo bin/elasticsearch-plugin remove repository-hdfs
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -12,7 +12,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install repository-s3
|
||||
sudo bin/elasticsearch-plugin install repository-s3
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -26,7 +26,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove repository-s3
|
||||
sudo bin/elasticsearch-plugin remove repository-s3
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -11,7 +11,7 @@ This plugin can be installed using the plugin manager:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin install store-smb
|
||||
sudo bin/elasticsearch-plugin install store-smb
|
||||
----------------------------------------------------------------
|
||||
|
||||
The plugin must be installed on every node in the cluster, and each node must
|
||||
|
@ -25,7 +25,7 @@ The plugin can be removed with the following command:
|
|||
|
||||
[source,sh]
|
||||
----------------------------------------------------------------
|
||||
sudo bin/plugin remove store-smb
|
||||
sudo bin/elasticsearch-plugin remove store-smb
|
||||
----------------------------------------------------------------
|
||||
|
||||
The node must be stopped before removing the plugin.
|
||||
|
|
|
@ -95,6 +95,55 @@ green wiki1 3 0 10000 413 103776272 103776272
|
|||
green foo 1 0 227 0 2065131 2065131
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
=== Response as text, json, smile, yaml or cbor
|
||||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
% curl '192.168.56.10:9200/_cat/indices?format=json' | jq .
|
||||
[
|
||||
{
|
||||
"pri.store.size": "650b",
|
||||
"health": "yellow",
|
||||
"status": "open",
|
||||
"index": "twitter",
|
||||
"pri": "5",
|
||||
"rep": "1",
|
||||
"docs.count": "0",
|
||||
"docs.deleted": "0",
|
||||
"store.size": "650b"
|
||||
}
|
||||
]
|
||||
--------------------------------------------------
|
||||
|
||||
Currently supported formats (for the `?format=` parameter):
|
||||
- text (default)
|
||||
- json
|
||||
- smile
|
||||
- yaml
|
||||
- cbor
|
||||
|
||||
Alternatively you can set the "Accept" HTTP header to the appropriate media format.
|
||||
All formats above are supported, the GET parameter takes precedence over the header.
|
||||
For example:
|
||||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
% curl '192.168.56.10:9200/_cat/indices' -H "Accept: application/json" | jq .
|
||||
[
|
||||
{
|
||||
"pri.store.size": "650b",
|
||||
"health": "yellow",
|
||||
"status": "open",
|
||||
"index": "twitter",
|
||||
"pri": "5",
|
||||
"rep": "1",
|
||||
"docs.count": "0",
|
||||
"docs.deleted": "0",
|
||||
"store.size": "650b"
|
||||
}
|
||||
]
|
||||
--------------------------------------------------
|
||||
|
||||
--
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ the foreground, was deprecated in 1.0 and removed in 2.0.
|
|||
|
||||
==== `V` for version
|
||||
|
||||
The `-v` parameter now means `--verbose` for both `bin/plugin` and
|
||||
The `-v` parameter now means `--verbose` for both `bin/elasticsearch-plugin` and
|
||||
`bin/elasticsearch` (although it has no effect on the latter). To output the
|
||||
version, use `-V` or `--version` instead.
|
||||
|
||||
|
@ -52,7 +52,7 @@ Official plugins can be installed as follows:
|
|||
|
||||
[source,sh]
|
||||
---------------
|
||||
sudo bin/plugin install analysis-icu
|
||||
sudo bin/elasticsearch-plugin install analysis-icu
|
||||
---------------
|
||||
|
||||
Community-provided plugins can be installed as before.
|
||||
|
|
|
@ -38,7 +38,7 @@ can install the plugin with:
|
|||
|
||||
[source,sh]
|
||||
------------------
|
||||
./bin/plugin install delete-by-query
|
||||
./bin/elasticsearch-plugin install delete-by-query
|
||||
------------------
|
||||
|
||||
See {plugins}/plugins-delete-by-query.html for more information.
|
||||
|
@ -55,7 +55,7 @@ still need to use multicast discovery, you can install the plugin with:
|
|||
|
||||
[source,sh]
|
||||
------------------
|
||||
./bin/plugin install discovery-multicast
|
||||
./bin/elasticsearch-plugin install discovery-multicast
|
||||
------------------
|
||||
|
||||
==== `_shutdown` API
|
||||
|
@ -70,7 +70,7 @@ out of core and is available as a plugin. It can be installed as:
|
|||
|
||||
[source,sh]
|
||||
------------------
|
||||
./bin/plugin install mapper-murmur3
|
||||
./bin/elasticsearch-plugin install mapper-murmur3
|
||||
------------------
|
||||
|
||||
==== `_size` is now a plugin
|
||||
|
@ -81,7 +81,7 @@ can be installed as:
|
|||
|
||||
[source,sh]
|
||||
------------------
|
||||
./bin/plugin install mapper-size
|
||||
./bin/elasticsearch-plugin install mapper-size
|
||||
------------------
|
||||
|
||||
==== Thrift and memcached transport
|
||||
|
|
|
@ -182,7 +182,7 @@ The location of a custom config directory may be specified as follows:
|
|||
[source,sh]
|
||||
--------------
|
||||
./bin/elasticsearch --path.conf=/path/to/conf/dir
|
||||
./bin/plugin -Des.path.conf=/path/to/conf/dir install analysis-icu
|
||||
./bin/elasticsearch-plugin -Des.path.conf=/path/to/conf/dir install analysis-icu
|
||||
--------------
|
||||
|
||||
When using the RPM or debian packages, the plugin script and the
|
||||
|
|
|
@ -6,6 +6,7 @@ your application to Elasticsearch 3.0.
|
|||
|
||||
* <<breaking_30_search_changes>>
|
||||
* <<breaking_30_rest_api_changes>>
|
||||
* <<breaking_30_cat_api>>
|
||||
* <<breaking_30_parent_child_changes>>
|
||||
* <<breaking_30_settings_changes>>
|
||||
* <<breaking_30_mapping_changes>>
|
||||
|
@ -158,6 +159,18 @@ Removed support for the undocumented `query_binary` and `filter_binary` sections
|
|||
|
||||
Payloads are now loaded when needed.
|
||||
|
||||
[[breaking_30_cat_api]]
|
||||
=== CAT API changes
|
||||
|
||||
==== Use Accept header for specifying response media type
|
||||
|
||||
Previous versions of Elasticsearch accepted the Content-type header
|
||||
field for controlling the media type of the response in the cat API.
|
||||
This is in opposition to the HTTP spec which specifies the Accept
|
||||
header field for this purpose. Elasticsearch now uses the Accept header
|
||||
field and support for using the Content-Type header field for this
|
||||
purpose has been removed.
|
||||
|
||||
[[breaking_30_parent_child_changes]]
|
||||
=== Parent/Child changes
|
||||
|
||||
|
@ -301,7 +314,7 @@ On all types but `string`, the `index` property now only accepts `true`/`false`
|
|||
instead of `not_analyzed`/`no`. The `string` field still accepts
|
||||
`analyzed`/`not_analyzed`/`no`.
|
||||
|
||||
==== `_source`'s `format` option
|
||||
==== ++_source++'s `format` option
|
||||
|
||||
The `_source` mapping does not support the `format` option anymore. This option
|
||||
will still be accepted for indices created before the upgrade to 3.0 for backward
|
||||
|
@ -331,6 +344,8 @@ disable doc values is by using the `doc_values` property of mappings.
|
|||
[[breaking_30_plugins]]
|
||||
=== Plugin changes
|
||||
|
||||
The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`.
|
||||
|
||||
==== Site plugins removed
|
||||
|
||||
Site plugins have been removed. It is recommended to migrate site plugins to Kibana plugins.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -43,6 +43,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
@ -51,7 +52,6 @@ import org.elasticsearch.index.query.functionscore.ScoreFunctionParser;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
|
@ -138,8 +138,9 @@ public class TemplateQueryParserTests extends ESTestCase {
|
|||
SimilarityService similarityService = new SimilarityService(idxSettings, Collections.emptyMap());
|
||||
MapperRegistry mapperRegistry = new IndicesModule().getMapperRegistry();
|
||||
MapperService mapperService = new MapperService(idxSettings, analysisService, similarityService, mapperRegistry, () -> context);
|
||||
IndexFieldDataService indexFieldDataService =new IndexFieldDataService(idxSettings, injector.getInstance(IndicesFieldDataCache.class), injector.getInstance(CircuitBreakerService.class), mapperService);
|
||||
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new IndicesWarmer(idxSettings.getNodeSettings(), null), new BitsetFilterCache.Listener() {
|
||||
IndicesFieldDataCache cache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() {});
|
||||
IndexFieldDataService indexFieldDataService =new IndexFieldDataService(idxSettings, cache, injector.getInstance(CircuitBreakerService.class), mapperService);
|
||||
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
|
||||
|
|
|
@ -37,6 +37,11 @@ import java.security.cert.Certificate;
|
|||
* the {@link Writer}.
|
||||
*/
|
||||
final class Compiler {
|
||||
/**
|
||||
* The maximum number of characters allowed in the script source.
|
||||
*/
|
||||
static int MAXIMUM_SOURCE_LENGTH = 16384;
|
||||
|
||||
/**
|
||||
* The default language API to be used with Painless. The second construction is used
|
||||
* to finalize all the variables, so there is no mistake of modification afterwards.
|
||||
|
@ -92,6 +97,12 @@ final class Compiler {
|
|||
*/
|
||||
static Executable compile(final Loader loader, final String name, final String source,
|
||||
final Definition custom, final CompilerSettings settings) {
|
||||
if (source.length() > MAXIMUM_SOURCE_LENGTH) {
|
||||
throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH +
|
||||
" characters. The passed in script is " + source.length() + " characters. Consider using a" +
|
||||
" plugin if a script longer than this length is a requirement.");
|
||||
}
|
||||
|
||||
final Definition definition = custom != null ? new Definition(custom) : DEFAULT_DEFINITION;
|
||||
final ParserRuleContext root = createParseTree(source, definition);
|
||||
final Metadata metadata = new Metadata(definition, source, root, settings);
|
||||
|
|
|
@ -628,7 +628,7 @@ public class Utility {
|
|||
/**
|
||||
* Like {@link Math#toIntExact(long)} but for long range.
|
||||
*/
|
||||
public static long toLongExactWithoutOverflow(float value) {
|
||||
public static long toLongWithoutOverflow(float value) {
|
||||
if (value < Long.MIN_VALUE || value > Long.MAX_VALUE) {
|
||||
throw new ArithmeticException("long overflow");
|
||||
}
|
||||
|
@ -638,7 +638,7 @@ public class Utility {
|
|||
/**
|
||||
* Like {@link Math#toIntExact(long)} but for long range.
|
||||
*/
|
||||
public static float toLongExactWithoutOverflow(double value) {
|
||||
public static float toLongWithoutOverflow(double value) {
|
||||
if (value < Long.MIN_VALUE || value > Long.MAX_VALUE) {
|
||||
throw new ArithmeticException("long overflow");
|
||||
}
|
||||
|
|
|
@ -28,12 +28,67 @@ import org.elasticsearch.painless.Definition.Method;
|
|||
import org.elasticsearch.painless.Definition.Sort;
|
||||
import org.elasticsearch.painless.Definition.Transform;
|
||||
import org.elasticsearch.painless.Definition.Type;
|
||||
import org.elasticsearch.painless.PainlessParser.AfterthoughtContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ArgumentsContext;
|
||||
import org.elasticsearch.painless.PainlessParser.AssignmentContext;
|
||||
import org.elasticsearch.painless.PainlessParser.BinaryContext;
|
||||
import org.elasticsearch.painless.PainlessParser.BlockContext;
|
||||
import org.elasticsearch.painless.PainlessParser.BoolContext;
|
||||
import org.elasticsearch.painless.PainlessParser.BreakContext;
|
||||
import org.elasticsearch.painless.PainlessParser.CastContext;
|
||||
import org.elasticsearch.painless.PainlessParser.CharContext;
|
||||
import org.elasticsearch.painless.PainlessParser.CompContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ConditionalContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ContinueContext;
|
||||
import org.elasticsearch.painless.PainlessParser.DeclContext;
|
||||
import org.elasticsearch.painless.PainlessParser.DeclarationContext;
|
||||
import org.elasticsearch.painless.PainlessParser.DecltypeContext;
|
||||
import org.elasticsearch.painless.PainlessParser.DeclvarContext;
|
||||
import org.elasticsearch.painless.PainlessParser.DoContext;
|
||||
import org.elasticsearch.painless.PainlessParser.EmptyContext;
|
||||
import org.elasticsearch.painless.PainlessParser.EmptyscopeContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExprContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExpressionContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtbraceContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtcallContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtcastContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtdotContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExternalContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtfieldContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtnewContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtprecContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstartContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtstringContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExttypeContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ExtvarContext;
|
||||
import org.elasticsearch.painless.PainlessParser.FalseContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ForContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IfContext;
|
||||
import org.elasticsearch.painless.PainlessParser.IncrementContext;
|
||||
import org.elasticsearch.painless.PainlessParser.InitializerContext;
|
||||
import org.elasticsearch.painless.PainlessParser.MultipleContext;
|
||||
import org.elasticsearch.painless.PainlessParser.NullContext;
|
||||
import org.elasticsearch.painless.PainlessParser.NumericContext;
|
||||
import org.elasticsearch.painless.PainlessParser.PostincContext;
|
||||
import org.elasticsearch.painless.PainlessParser.PrecedenceContext;
|
||||
import org.elasticsearch.painless.PainlessParser.PreincContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ReturnContext;
|
||||
import org.elasticsearch.painless.PainlessParser.SingleContext;
|
||||
import org.elasticsearch.painless.PainlessParser.SourceContext;
|
||||
import org.elasticsearch.painless.PainlessParser.StatementContext;
|
||||
import org.elasticsearch.painless.PainlessParser.ThrowContext;
|
||||
import org.elasticsearch.painless.PainlessParser.TrapContext;
|
||||
import org.elasticsearch.painless.PainlessParser.TrueContext;
|
||||
import org.elasticsearch.painless.PainlessParser.TryContext;
|
||||
import org.elasticsearch.painless.PainlessParser.UnaryContext;
|
||||
import org.elasticsearch.painless.PainlessParser.WhileContext;
|
||||
import org.elasticsearch.script.ScoreAccessor;
|
||||
import org.objectweb.asm.ClassWriter;
|
||||
import org.objectweb.asm.Label;
|
||||
import org.objectweb.asm.Opcodes;
|
||||
import org.objectweb.asm.commons.GeneratorAdapter;
|
||||
|
||||
import java.lang.invoke.MethodType;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.Deque;
|
||||
import java.util.HashMap;
|
||||
|
@ -43,70 +98,16 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.painless.PainlessParser.ADD;
|
||||
import static org.elasticsearch.painless.PainlessParser.AfterthoughtContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ArgumentsContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.AssignmentContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.BWAND;
|
||||
import static org.elasticsearch.painless.PainlessParser.BWOR;
|
||||
import static org.elasticsearch.painless.PainlessParser.BWXOR;
|
||||
import static org.elasticsearch.painless.PainlessParser.BinaryContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.BlockContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.BoolContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.BreakContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.CastContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.CharContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.CompContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ConditionalContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ContinueContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.DIV;
|
||||
import static org.elasticsearch.painless.PainlessParser.DeclContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.DeclarationContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.DecltypeContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.DeclvarContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.DoContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.EmptyContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.EmptyscopeContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExprContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExpressionContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtbraceContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtcallContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtcastContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtdotContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExternalContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtfieldContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtnewContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtprecContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtstartContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtstringContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExttypeContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ExtvarContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.FalseContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ForContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.IfContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.IncrementContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.InitializerContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.LSH;
|
||||
import static org.elasticsearch.painless.PainlessParser.MUL;
|
||||
import static org.elasticsearch.painless.PainlessParser.MultipleContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.NullContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.NumericContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.PostincContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.PrecedenceContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.PreincContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.REM;
|
||||
import static org.elasticsearch.painless.PainlessParser.RSH;
|
||||
import static org.elasticsearch.painless.PainlessParser.ReturnContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.SUB;
|
||||
import static org.elasticsearch.painless.PainlessParser.SingleContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.SourceContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.StatementContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.ThrowContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.TrapContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.TrueContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.TryContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.USH;
|
||||
import static org.elasticsearch.painless.PainlessParser.UnaryContext;
|
||||
import static org.elasticsearch.painless.PainlessParser.WhileContext;
|
||||
|
||||
class Writer extends PainlessParserBaseVisitor<Void> {
|
||||
private static class Branch {
|
||||
|
@ -125,181 +126,142 @@ class Writer extends PainlessParserBaseVisitor<Void> {
|
|||
final static String BASE_CLASS_NAME = Executable.class.getName();
|
||||
final static String CLASS_NAME = BASE_CLASS_NAME + "$CompiledPainlessExecutable";
|
||||
private final static org.objectweb.asm.Type BASE_CLASS_TYPE = org.objectweb.asm.Type.getType(Executable.class);
|
||||
private final static org.objectweb.asm.Type CLASS_TYPE =
|
||||
org.objectweb.asm.Type.getType("L" + CLASS_NAME.replace(".", "/") + ";");
|
||||
private final static org.objectweb.asm.Type CLASS_TYPE = org.objectweb.asm.Type.getType("L" + CLASS_NAME.replace(".", "/") + ";");
|
||||
|
||||
private final static org.objectweb.asm.commons.Method CONSTRUCTOR =
|
||||
getAsmMethod(void.class, "<init>", Definition.class, String.class, String.class);
|
||||
private final static org.objectweb.asm.commons.Method EXECUTE = getAsmMethod(Object.class, "execute", Map.class);
|
||||
private final static String SIGNATURE = "(Ljava/util/Map<Ljava/lang/String;Ljava/lang/Object;>;)Ljava/lang/Object;";
|
||||
|
||||
private final static org.objectweb.asm.Type PAINLESS_ERROR_TYPE = org.objectweb.asm.Type.getType(PainlessError.class);
|
||||
|
||||
private final static org.objectweb.asm.commons.Method CONSTRUCTOR = org.objectweb.asm.commons.Method.getMethod(
|
||||
"void <init>(org.elasticsearch.painless.Definition, java.lang.String, java.lang.String)");
|
||||
private final static org.objectweb.asm.commons.Method EXECUTE = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object execute(java.util.Map)");
|
||||
private final static String SIGNATURE = "(Ljava/util/Map<Ljava/lang/String;Ljava/lang/Object;>;)Ljava/lang/Object;";
|
||||
|
||||
private final static org.objectweb.asm.Type DEFINITION_TYPE = org.objectweb.asm.Type.getType(Definition.class);
|
||||
|
||||
private final static org.objectweb.asm.Type MAP_TYPE = org.objectweb.asm.Type.getType(Map.class);
|
||||
private final static org.objectweb.asm.commons.Method MAP_GET =
|
||||
org.objectweb.asm.commons.Method.getMethod("Object get(Object)");
|
||||
private final static org.objectweb.asm.commons.Method MAP_GET = getAsmMethod(Object.class, "get", Object.class);
|
||||
|
||||
private final static org.objectweb.asm.Type SCORE_ACCESSOR_TYPE = org.objectweb.asm.Type.getType(ScoreAccessor.class);
|
||||
private final static org.objectweb.asm.commons.Method SCORE_ACCESSOR_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("float floatValue()");
|
||||
private final static org.objectweb.asm.commons.Method SCORE_ACCESSOR_FLOAT = getAsmMethod(float.class, "floatValue");
|
||||
|
||||
private final static org.objectweb.asm.commons.Method DEF_METHOD_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object methodCall(java.lang.Object, java.lang.String, " +
|
||||
"org.elasticsearch.painless.Definition, java.lang.Object[], boolean[])");
|
||||
private final static org.objectweb.asm.commons.Method DEF_ARRAY_STORE = org.objectweb.asm.commons.Method.getMethod(
|
||||
"void arrayStore(java.lang.Object, java.lang.Object, java.lang.Object, " +
|
||||
"org.elasticsearch.painless.Definition, boolean, boolean)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_ARRAY_LOAD = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object arrayLoad(java.lang.Object, java.lang.Object, " +
|
||||
"org.elasticsearch.painless.Definition, boolean)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_FIELD_STORE = org.objectweb.asm.commons.Method.getMethod(
|
||||
"void fieldStore(java.lang.Object, java.lang.Object, java.lang.String, " +
|
||||
"org.elasticsearch.painless.Definition, boolean)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_FIELD_LOAD = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object fieldLoad(java.lang.Object, java.lang.String, org.elasticsearch.painless.Definition)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_METHOD_CALL = getAsmMethod(
|
||||
Object.class, "methodCall", Object.class, String.class, Definition.class, Object[].class, boolean[].class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_ARRAY_STORE = getAsmMethod(
|
||||
void.class, "arrayStore", Object.class, Object.class, Object.class, Definition.class, boolean.class, boolean.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_ARRAY_LOAD = getAsmMethod(
|
||||
Object.class, "arrayLoad", Object.class, Object.class, Definition.class, boolean.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_FIELD_STORE = getAsmMethod(
|
||||
void.class, "fieldStore", Object.class, Object.class, String.class, Definition.class, boolean.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_FIELD_LOAD = getAsmMethod(
|
||||
Object.class, "fieldLoad", Object.class, String.class, Definition.class);
|
||||
|
||||
private final static org.objectweb.asm.commons.Method DEF_NOT_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object not(java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_NEG_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object neg(java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_MUL_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object mul(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_DIV_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object div(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_REM_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object rem(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_ADD_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object add(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_SUB_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object sub(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_LSH_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object lsh(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_RSH_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object rsh(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_USH_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object ush(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_AND_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object and(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_XOR_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object xor(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_OR_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"java.lang.Object or(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_EQ_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"boolean eq(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_LT_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"boolean lt(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_LTE_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"boolean lte(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_GT_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"boolean gt(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_GTE_CALL = org.objectweb.asm.commons.Method.getMethod(
|
||||
"boolean gte(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method DEF_NOT_CALL = getAsmMethod(Object.class, "not", Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_NEG_CALL = getAsmMethod(Object.class, "neg", Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_MUL_CALL = getAsmMethod(Object.class, "mul", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_DIV_CALL = getAsmMethod(Object.class, "div", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_REM_CALL = getAsmMethod(Object.class, "rem", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_ADD_CALL = getAsmMethod(Object.class, "add", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_SUB_CALL = getAsmMethod(Object.class, "sub", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_LSH_CALL = getAsmMethod(Object.class, "lsh", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_RSH_CALL = getAsmMethod(Object.class, "rsh", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_USH_CALL = getAsmMethod(Object.class, "ush", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_AND_CALL = getAsmMethod(Object.class, "and", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_XOR_CALL = getAsmMethod(Object.class, "xor", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_OR_CALL = getAsmMethod(Object.class, "or" , Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_EQ_CALL = getAsmMethod(boolean.class, "eq" , Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_LT_CALL = getAsmMethod(boolean.class, "lt" , Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_LTE_CALL = getAsmMethod(boolean.class, "lte", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_GT_CALL = getAsmMethod(boolean.class, "gt" , Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method DEF_GTE_CALL = getAsmMethod(boolean.class, "gte", Object.class, Object.class);
|
||||
|
||||
private final static org.objectweb.asm.Type STRINGBUILDER_TYPE = org.objectweb.asm.Type.getType(StringBuilder.class);
|
||||
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_CONSTRUCTOR =
|
||||
org.objectweb.asm.commons.Method.getMethod("void <init>()");
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_CONSTRUCTOR = getAsmMethod(void.class, "<init>");
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_BOOLEAN =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(boolean)");
|
||||
getAsmMethod(StringBuilder.class, "append", boolean.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_CHAR =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(char)");
|
||||
getAsmMethod(StringBuilder.class, "append", char.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(int)");
|
||||
getAsmMethod(StringBuilder.class, "append", int.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(long)");
|
||||
getAsmMethod(StringBuilder.class, "append", long.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(float)");
|
||||
getAsmMethod(StringBuilder.class, "append", float.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(double)");
|
||||
getAsmMethod(StringBuilder.class, "append", double.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_STRING =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(java.lang.String)");
|
||||
getAsmMethod(StringBuilder.class, "append", String.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_APPEND_OBJECT =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.StringBuilder append(java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_TOSTRING =
|
||||
org.objectweb.asm.commons.Method.getMethod("java.lang.String toString()");
|
||||
getAsmMethod(StringBuilder.class, "append", Object.class);
|
||||
private final static org.objectweb.asm.commons.Method STRINGBUILDER_TOSTRING = getAsmMethod(String.class, "toString");
|
||||
|
||||
private final static org.objectweb.asm.commons.Method TOINTEXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("int toIntExact(long)");
|
||||
private final static org.objectweb.asm.commons.Method NEGATEEXACT_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("int negateExact(int)");
|
||||
private final static org.objectweb.asm.commons.Method NEGATEEXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("long negateExact(long)");
|
||||
private final static org.objectweb.asm.commons.Method MULEXACT_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("int multiplyExact(int, int)");
|
||||
private final static org.objectweb.asm.commons.Method MULEXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("long multiplyExact(long, long)");
|
||||
private final static org.objectweb.asm.commons.Method ADDEXACT_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("int addExact(int, int)");
|
||||
private final static org.objectweb.asm.commons.Method ADDEXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("long addExact(long, long)");
|
||||
private final static org.objectweb.asm.commons.Method SUBEXACT_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("int subtractExact(int, int)");
|
||||
private final static org.objectweb.asm.commons.Method SUBEXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("long subtractExact(long, long)");
|
||||
private final static org.objectweb.asm.commons.Method TOINTEXACT_LONG = getAsmMethod(int.class, "toIntExact", long.class);
|
||||
private final static org.objectweb.asm.commons.Method NEGATEEXACT_INT = getAsmMethod(int.class, "negateExact", int.class);
|
||||
private final static org.objectweb.asm.commons.Method NEGATEEXACT_LONG = getAsmMethod(long.class, "negateExact", long.class);
|
||||
private final static org.objectweb.asm.commons.Method MULEXACT_INT = getAsmMethod(int.class, "multiplyExact", int.class, int.class);
|
||||
private final static org.objectweb.asm.commons.Method MULEXACT_LONG = getAsmMethod(long.class, "multiplyExact", long.class, long.class);
|
||||
private final static org.objectweb.asm.commons.Method ADDEXACT_INT = getAsmMethod(int.class, "addExact", int.class, int.class);
|
||||
private final static org.objectweb.asm.commons.Method ADDEXACT_LONG = getAsmMethod(long.class, "addExact", long.class, long.class);
|
||||
private final static org.objectweb.asm.commons.Method SUBEXACT_INT = getAsmMethod(int.class, "subtractExact", int.class, int.class);
|
||||
private final static org.objectweb.asm.commons.Method SUBEXACT_LONG = getAsmMethod(long.class, "subtractExact", long.class, long.class);
|
||||
|
||||
private final static org.objectweb.asm.commons.Method CHECKEQUALS =
|
||||
org.objectweb.asm.commons.Method.getMethod("boolean checkEquals(java.lang.Object, java.lang.Object)");
|
||||
private final static org.objectweb.asm.commons.Method TOBYTEEXACT_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("byte toByteExact(int)");
|
||||
private final static org.objectweb.asm.commons.Method TOBYTEEXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("byte toByteExact(long)");
|
||||
getAsmMethod(boolean.class, "checkEquals", Object.class, Object.class);
|
||||
private final static org.objectweb.asm.commons.Method TOBYTEEXACT_INT = getAsmMethod(byte.class, "toByteExact", int.class);
|
||||
private final static org.objectweb.asm.commons.Method TOBYTEEXACT_LONG = getAsmMethod(byte.class, "toByteExact", long.class);
|
||||
private final static org.objectweb.asm.commons.Method TOBYTEWOOVERFLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("byte toByteWithoutOverflow(float)");
|
||||
getAsmMethod(byte.class, "toByteWithoutOverflow", float.class);
|
||||
private final static org.objectweb.asm.commons.Method TOBYTEWOOVERFLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("byte toByteWithoutOverflow(double)");
|
||||
private final static org.objectweb.asm.commons.Method TOSHORTEXACT_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("short toShortExact(int)");
|
||||
private final static org.objectweb.asm.commons.Method TOSHORTEXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("short toShortExact(long)");
|
||||
getAsmMethod(byte.class, "toByteWithoutOverflow", double.class);
|
||||
private final static org.objectweb.asm.commons.Method TOSHORTEXACT_INT = getAsmMethod(short.class, "toShortExact", int.class);
|
||||
private final static org.objectweb.asm.commons.Method TOSHORTEXACT_LONG = getAsmMethod(short.class, "toShortExact", long.class);
|
||||
private final static org.objectweb.asm.commons.Method TOSHORTWOOVERFLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("short toShortWithoutOverflow(float)");
|
||||
getAsmMethod(short.class, "toShortWithoutOverflow", float.class);
|
||||
private final static org.objectweb.asm.commons.Method TOSHORTWOOVERFLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("short toShortWithoutOverflow(double)");
|
||||
private final static org.objectweb.asm.commons.Method TOCHAREXACT_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("char toCharExact(int)");
|
||||
private final static org.objectweb.asm.commons.Method TOCHAREXACT_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("char toCharExact(long)");
|
||||
getAsmMethod(short.class, "toShortWihtoutOverflow", double.class);
|
||||
private final static org.objectweb.asm.commons.Method TOCHAREXACT_INT = getAsmMethod(char.class, "toCharExact", int.class);
|
||||
private final static org.objectweb.asm.commons.Method TOCHAREXACT_LONG = getAsmMethod(char.class, "toCharExact", long.class);
|
||||
private final static org.objectweb.asm.commons.Method TOCHARWOOVERFLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("char toCharWithoutOverflow(float)");
|
||||
getAsmMethod(char.class, "toCharWithoutOverflow", float.class);
|
||||
private final static org.objectweb.asm.commons.Method TOCHARWOOVERFLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("char toCharWithoutOverflow(double)");
|
||||
getAsmMethod(char.class, "toCharWithoutOverflow", double.class);
|
||||
private final static org.objectweb.asm.commons.Method TOINTWOOVERFLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("int toIntWithoutOverflow(float)");
|
||||
getAsmMethod(int.class, "toIntWithoutOverflow", float.class);
|
||||
private final static org.objectweb.asm.commons.Method TOINTWOOVERFLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("int toIntWithoutOverflow(double)");
|
||||
getAsmMethod(int.class, "toIntWithoutOverflow", double.class);
|
||||
private final static org.objectweb.asm.commons.Method TOLONGWOOVERFLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("long toLongExactWithoutOverflow(float)");
|
||||
getAsmMethod(long.class, "toLongWithoutOverflow", float.class);
|
||||
private final static org.objectweb.asm.commons.Method TOLONGWOOVERFLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("long toLongExactWithoutOverflow(double)");
|
||||
getAsmMethod(long.class, "toLongWithoutOverflow", double.class);
|
||||
private final static org.objectweb.asm.commons.Method TOFLOATWOOVERFLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("float toFloatWithoutOverflow(double)");
|
||||
getAsmMethod(float.class , "toFloatWihtoutOverflow", double.class);
|
||||
private final static org.objectweb.asm.commons.Method MULWOOVERLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("float multiplyWithoutOverflow(float, float)");
|
||||
getAsmMethod(float.class, "multiplyWithoutOverflow", float.class, float.class);
|
||||
private final static org.objectweb.asm.commons.Method MULWOOVERLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("double multiplyWithoutOverflow(double, double)");
|
||||
getAsmMethod(double.class, "multiplyWithoutOverflow", double.class, double.class);
|
||||
private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_INT =
|
||||
org.objectweb.asm.commons.Method.getMethod("int divideWithoutOverflow(int, int)");
|
||||
getAsmMethod(int.class, "divideWithoutOverflow", int.class, int.class);
|
||||
private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_LONG =
|
||||
org.objectweb.asm.commons.Method.getMethod("long divideWithoutOverflow(long, long)");
|
||||
getAsmMethod(long.class, "divideWithoutOverflow", long.class, long.class);
|
||||
private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("float divideWithoutOverflow(float, float)");
|
||||
getAsmMethod(float.class, "divideWithoutOverflow", float.class, float.class);
|
||||
private final static org.objectweb.asm.commons.Method DIVWOOVERLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("double divideWithoutOverflow(double, double)");
|
||||
getAsmMethod(double.class, "divideWithoutOverflow", double.class, double.class);
|
||||
private final static org.objectweb.asm.commons.Method REMWOOVERLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("float remainderWithoutOverflow(float, float)");
|
||||
getAsmMethod(float.class, "remainderWithoutOverflow", float.class, float.class);
|
||||
private final static org.objectweb.asm.commons.Method REMWOOVERLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("double remainderWithoutOverflow(double, double)");
|
||||
getAsmMethod(double.class, "remainderWithoutOverflow", double.class, double.class);
|
||||
private final static org.objectweb.asm.commons.Method ADDWOOVERLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("float addWithoutOverflow(float, float)");
|
||||
getAsmMethod(float.class, "addWithoutOverflow", float.class, float.class);
|
||||
private final static org.objectweb.asm.commons.Method ADDWOOVERLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("double addWithoutOverflow(double, double)");
|
||||
getAsmMethod(double.class, "addWithoutOverflow", double.class, double.class);
|
||||
private final static org.objectweb.asm.commons.Method SUBWOOVERLOW_FLOAT =
|
||||
org.objectweb.asm.commons.Method.getMethod("float subtractWithoutOverflow(float, float)");
|
||||
getAsmMethod(float.class, "subtractWithoutOverflow", float.class, float.class);
|
||||
private final static org.objectweb.asm.commons.Method SUBWOOVERLOW_DOUBLE =
|
||||
org.objectweb.asm.commons.Method.getMethod("double subtractWithoutOverflow(double, double)");
|
||||
getAsmMethod(double.class, "subtractWithoutOverflow", double.class, double.class);
|
||||
|
||||
private static org.objectweb.asm.commons.Method getAsmMethod(final Class<?> rtype, final String name, final Class<?>... ptypes) {
|
||||
return new org.objectweb.asm.commons.Method(name, MethodType.methodType(rtype, ptypes).toMethodDescriptorString());
|
||||
}
|
||||
|
||||
static byte[] write(Metadata metadata) {
|
||||
Writer writer = new Writer(metadata);
|
||||
|
@ -356,7 +318,7 @@ class Writer extends PainlessParserBaseVisitor<Void> {
|
|||
private void writeBegin() {
|
||||
final int compute = ClassWriter.COMPUTE_FRAMES | ClassWriter.COMPUTE_MAXS;
|
||||
final int version = Opcodes.V1_7;
|
||||
final int access = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL | Opcodes.ACC_SYNTHETIC;
|
||||
final int access = Opcodes.ACC_PUBLIC | Opcodes.ACC_SUPER | Opcodes.ACC_FINAL;
|
||||
final String base = BASE_CLASS_TYPE.getInternalName();
|
||||
final String name = CLASS_TYPE.getInternalName();
|
||||
|
||||
|
@ -366,7 +328,7 @@ class Writer extends PainlessParserBaseVisitor<Void> {
|
|||
}
|
||||
|
||||
private void writeConstructor() {
|
||||
final int access = Opcodes.ACC_PUBLIC | Opcodes.ACC_SYNTHETIC;
|
||||
final int access = Opcodes.ACC_PUBLIC;
|
||||
final GeneratorAdapter constructor = new GeneratorAdapter(access, CONSTRUCTOR, null, null, writer);
|
||||
constructor.loadThis();
|
||||
constructor.loadArgs();
|
||||
|
@ -376,7 +338,7 @@ class Writer extends PainlessParserBaseVisitor<Void> {
|
|||
}
|
||||
|
||||
private void writeExecute() {
|
||||
final int access = Opcodes.ACC_PUBLIC | Opcodes.ACC_SYNTHETIC;
|
||||
final int access = Opcodes.ACC_PUBLIC;
|
||||
execute = new GeneratorAdapter(access, EXECUTE, SIGNATURE, null, writer);
|
||||
|
||||
final Label fals = new Label();
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.painless;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
public class WhenThingsGoWrongTests extends ScriptTestCase {
|
||||
|
@ -128,4 +129,21 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
|
|||
"The maximum number of statements that can be executed in a loop has been reached."));
|
||||
}
|
||||
}
|
||||
|
||||
public void testSourceLimits() {
|
||||
char[] chars = new char[Compiler.MAXIMUM_SOURCE_LENGTH + 1];
|
||||
Arrays.fill(chars, '0');
|
||||
|
||||
try {
|
||||
exec(new String(chars));
|
||||
fail("should have hit IllegalArgumentException");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
assertTrue(expected.getMessage().contains("Scripts may be no longer than"));
|
||||
}
|
||||
|
||||
chars = new char[Compiler.MAXIMUM_SOURCE_LENGTH];
|
||||
Arrays.fill(chars, '0');
|
||||
|
||||
assertEquals(0, exec(new String(chars)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,11 +29,15 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK;
|
||||
import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT;
|
||||
|
@ -48,6 +52,12 @@ public class BootstrapCliParserTests extends CliToolTestCase {
|
|||
|
||||
private CaptureOutputTerminal terminal = new CaptureOutputTerminal();
|
||||
private List<String> propertiesToClear = new ArrayList<>();
|
||||
private Map<Object, Object> properties;
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
this.properties = new HashMap<>(System.getProperties());
|
||||
}
|
||||
|
||||
@After
|
||||
public void clearProperties() {
|
||||
|
@ -55,6 +65,7 @@ public class BootstrapCliParserTests extends CliToolTestCase {
|
|||
System.clearProperty(property);
|
||||
}
|
||||
propertiesToClear.clear();
|
||||
assertEquals("properties leaked", properties, new HashMap<>(System.getProperties()));
|
||||
}
|
||||
|
||||
public void testThatVersionIsReturned() throws Exception {
|
||||
|
@ -235,6 +246,7 @@ public class BootstrapCliParserTests extends CliToolTestCase {
|
|||
parser.parse("start", new String[]{"--foo=bar", "-Dbaz=qux"});
|
||||
});
|
||||
assertThat(e.getMessage(), containsString("must be before any parameters starting with --"));
|
||||
assertNull(System.getProperty("es.foo"));
|
||||
}
|
||||
|
||||
private void registerProperties(String ... systemProperties) {
|
||||
|
|
|
@ -85,10 +85,10 @@ fi
|
|||
|
||||
@test "[$GROUP] install jvm-example plugin with a custom CONFIG_FILE and check failure" {
|
||||
local relativePath=${1:-$(readlink -m jvm-example-*.zip)}
|
||||
CONF_FILE="$ESCONFIG/elasticsearch.yml" run sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" install "file://$relativePath"
|
||||
CONF_FILE="$ESCONFIG/elasticsearch.yml" run sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath"
|
||||
# this should fail because CONF_FILE is no longer supported
|
||||
[ $status = 1 ]
|
||||
CONF_FILE="$ESCONFIG/elasticsearch.yml" run sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" remove jvm-example
|
||||
CONF_FILE="$ESCONFIG/elasticsearch.yml" run sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" remove jvm-example
|
||||
echo "status is $status"
|
||||
[ $status = 1 ]
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ fi
|
|||
local JAVA=$(which java)
|
||||
|
||||
sudo chmod -x $JAVA
|
||||
run "$ESHOME/bin/plugin"
|
||||
run "$ESHOME/bin/elasticsearch-plugin"
|
||||
sudo chmod +x $JAVA
|
||||
|
||||
[ "$status" -eq 1 ]
|
||||
|
@ -264,7 +264,7 @@ fi
|
|||
}
|
||||
|
||||
@test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" {
|
||||
"$ESHOME/bin/plugin" list | tail -n +2 | sed 's/^......//' > /tmp/installed
|
||||
"$ESHOME/bin/elasticsearch-plugin" list | tail -n +2 | sed 's/^......//' > /tmp/installed
|
||||
compare_plugins_list "/tmp/installed" "'plugins list'"
|
||||
}
|
||||
|
||||
|
@ -389,7 +389,7 @@ fi
|
|||
|
||||
@test "[$GROUP] install jvm-example with different logging modes and check output" {
|
||||
local relativePath=${1:-$(readlink -m jvm-example-*.zip)}
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" install "file://$relativePath" > /tmp/plugin-cli-output
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" > /tmp/plugin-cli-output
|
||||
local loglines=$(cat /tmp/plugin-cli-output | wc -l)
|
||||
if [ "$GROUP" == "TAR PLUGINS" ]; then
|
||||
# tar extraction does not create the plugins directory so the plugin tool will print an additional line that the directory will be created
|
||||
|
@ -408,7 +408,7 @@ fi
|
|||
remove_jvm_example
|
||||
|
||||
local relativePath=${1:-$(readlink -m jvm-example-*.zip)}
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" install "file://$relativePath" -Des.logger.level=DEBUG > /tmp/plugin-cli-output
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$relativePath" -Des.logger.level=DEBUG > /tmp/plugin-cli-output
|
||||
local loglines=$(cat /tmp/plugin-cli-output | wc -l)
|
||||
if [ "$GROUP" == "TAR PLUGINS" ]; then
|
||||
[ "$loglines" -gt "7" ] || {
|
||||
|
|
|
@ -32,7 +32,7 @@ install_plugin() {
|
|||
|
||||
assert_file_exist "$path"
|
||||
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" install "file://$path"
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" install "file://$path"
|
||||
|
||||
assert_file_exist "$ESPLUGINS/$name"
|
||||
assert_file_exist "$ESPLUGINS/$name/plugin-descriptor.properties"
|
||||
|
@ -60,7 +60,7 @@ remove_plugin() {
|
|||
local name=$1
|
||||
|
||||
echo "Removing $name...."
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/plugin" remove $name
|
||||
sudo -E -u $ESPLUGIN_COMMAND_USER "$ESHOME/bin/elasticsearch-plugin" remove $name
|
||||
|
||||
assert_file_not_exist "$ESPLUGINS/$name"
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ verify_archive_installation() {
|
|||
assert_file "$ESHOME/bin" d
|
||||
assert_file "$ESHOME/bin/elasticsearch" f
|
||||
assert_file "$ESHOME/bin/elasticsearch.in.sh" f
|
||||
assert_file "$ESHOME/bin/plugin" f
|
||||
assert_file "$ESHOME/bin/elasticsearch-plugin" f
|
||||
assert_file "$ESCONFIG" d
|
||||
assert_file "$ESCONFIG/elasticsearch.yml" f
|
||||
assert_file "$ESCONFIG/logging.yml" f
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"bytes": {
|
||||
"type": "enum",
|
||||
"description" : "The unit in which to display byte values",
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"bytes": {
|
||||
"type": "enum",
|
||||
"description" : "The unit in which to display byte values",
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
"parts": {
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"bytes": {
|
||||
"type": "enum",
|
||||
"description" : "The unit in which to display byte values",
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
"parts": {
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
"parts": {
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
"parts": {
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
"parts": {
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -6,6 +6,10 @@
|
|||
"path": "/_cat/plugins",
|
||||
"paths": ["/_cat/plugins"],
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node (default: false)"
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"bytes": {
|
||||
"type": "enum",
|
||||
"description" : "The unit in which to display byte values",
|
||||
|
|
|
@ -8,6 +8,10 @@
|
|||
"parts": {
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"local": {
|
||||
"type" : "boolean",
|
||||
"description" : "Return local information, do not retrieve the state from master node",
|
||||
|
|
|
@ -12,6 +12,10 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"format": {
|
||||
"type" : "string",
|
||||
"description" : "a short version of the Accept header, e.g. json, yaml"
|
||||
},
|
||||
"h": {
|
||||
"type": "list",
|
||||
"description" : "Comma-separated list of column names to display"
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue