Merge branch 'master' into feature/ingest
This commit is contained in:
commit
9c06736dbd
|
@ -57,8 +57,7 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.echo(message: "[${LocalDateTime.now()}] Waiting for elasticsearch node", level: "info")
|
||||
ant.get(src: "http://${node.httpUri()}",
|
||||
ant.get(src: "http://${node.httpUri()}/_cluster/health?wait_for_nodes=${numNodes}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
|
|
|
@ -35,7 +35,7 @@ public class GetAliasesRequest extends MasterNodeReadRequest<GetAliasesRequest>
|
|||
private String[] indices = Strings.EMPTY_ARRAY;
|
||||
private String[] aliases = Strings.EMPTY_ARRAY;
|
||||
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpand();
|
||||
|
||||
public GetAliasesRequest(String[] aliases) {
|
||||
this.aliases = aliases;
|
||||
|
|
|
@ -52,10 +52,6 @@ public class PercolateShardRequest extends BroadcastShardRequest {
|
|||
this.startTime = request.startTime;
|
||||
}
|
||||
|
||||
public PercolateShardRequest(ShardId shardId, OriginalIndices originalIndices) {
|
||||
super(shardId, originalIndices);
|
||||
}
|
||||
|
||||
PercolateShardRequest(ShardId shardId, PercolateRequest request) {
|
||||
super(shardId, request);
|
||||
this.documentType = request.documentType();
|
||||
|
|
|
@ -160,12 +160,8 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
items = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
int slot = in.readVInt();
|
||||
OriginalIndices originalIndices = OriginalIndices.readOriginalIndices(in);
|
||||
PercolateShardRequest shardRequest = new PercolateShardRequest(new ShardId(index, shardId), originalIndices);
|
||||
shardRequest.documentType(in.readString());
|
||||
shardRequest.source(in.readBytesReference());
|
||||
shardRequest.docSource(in.readBytesReference());
|
||||
shardRequest.onlyCount(in.readBoolean());
|
||||
PercolateShardRequest shardRequest = new PercolateShardRequest();
|
||||
shardRequest.readFrom(in);
|
||||
Item item = new Item(slot, shardRequest);
|
||||
items.add(item);
|
||||
}
|
||||
|
@ -179,11 +175,7 @@ public class TransportShardMultiPercolateAction extends TransportSingleShardActi
|
|||
out.writeVInt(items.size());
|
||||
for (Item item : items) {
|
||||
out.writeVInt(item.slot);
|
||||
OriginalIndices.writeOriginalIndices(item.request.originalIndices(), out);
|
||||
out.writeString(item.request.documentType());
|
||||
out.writeBytesReference(item.request.source());
|
||||
out.writeBytesReference(item.request.docSource());
|
||||
out.writeBoolean(item.request.onlyCount());
|
||||
item.request.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -421,7 +421,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer) {
|
||||
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer));
|
||||
sourceBuilder().addRescorer(new RescoreBuilder(rescorer));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -433,7 +433,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
* @return this for chaining
|
||||
*/
|
||||
public SearchRequestBuilder addRescorer(RescoreBuilder.Rescorer rescorer, int window) {
|
||||
sourceBuilder().addRescorer(new RescoreBuilder().rescorer(rescorer).windowSize(window));
|
||||
sourceBuilder().addRescorer(new RescoreBuilder(rescorer).windowSize(window));
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -66,13 +66,11 @@ import org.elasticsearch.common.util.ExtensionPoint;
|
|||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.IndexingSlowLog;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.settings.IndexDynamicSettings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
|
@ -150,8 +148,7 @@ public class ClusterModule extends AbstractModule {
|
|||
registerIndexDynamicSetting(IndicesTTLService.INDEX_TTL_DISABLE_PURGE, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_REFRESH_INTERVAL, Validator.TIME);
|
||||
registerIndexDynamicSetting(PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(EngineConfig.INDEX_GC_DELETES_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_FLUSH_ON_CLOSE, Validator.BOOLEAN);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_GC_DELETES_SETTING, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO, Validator.TIME);
|
||||
registerIndexDynamicSetting(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG, Validator.TIME);
|
||||
|
@ -178,7 +175,7 @@ public class ClusterModule extends AbstractModule {
|
|||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER, Validator.DOUBLE_GTE_2);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT, Validator.NON_NEGATIVE_DOUBLE);
|
||||
registerIndexDynamicSetting(MergePolicyConfig.INDEX_COMPOUND_FORMAT, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, Validator.BYTES_SIZE);
|
||||
registerIndexDynamicSetting(IndexSettings.INDEX_TRANSLOG_DURABILITY, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesWarmer.INDEX_WARMER_ENABLED, Validator.EMPTY);
|
||||
registerIndexDynamicSetting(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED, Validator.BOOLEAN);
|
||||
|
|
|
@ -406,10 +406,26 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
String nodeId = nodeStats.getNode().id();
|
||||
String nodeName = nodeStats.getNode().getName();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}",
|
||||
nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(),
|
||||
leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
|
||||
}
|
||||
if (leastAvailablePath.getTotal().bytes() < 0) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}] least available path has less than 0 total bytes of disk [{}], skipping",
|
||||
nodeId, leastAvailablePath.getTotal().bytes());
|
||||
}
|
||||
} else {
|
||||
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
|
||||
}
|
||||
if (mostAvailablePath.getTotal().bytes() < 0) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}] most available path has less than 0 total bytes of disk [{}], skipping",
|
||||
nodeId, mostAvailablePath.getTotal().bytes());
|
||||
}
|
||||
} else {
|
||||
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
|
||||
}
|
||||
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
|
||||
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -630,7 +630,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
|
||||
}
|
||||
|
||||
executor.clusterStatePublished(newClusterState);
|
||||
try {
|
||||
executor.clusterStatePublished(newClusterState);
|
||||
} catch (Exception e) {
|
||||
logger.error("exception thrown while notifying executor of new cluster state publication [{}]", e, source);
|
||||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
|
|
|
@ -336,7 +336,7 @@ public class Cache<K, V> {
|
|||
* value using the given mapping function and enters it into this map unless null. The load method for a given key
|
||||
* will be invoked at most once.
|
||||
*
|
||||
* @param key the key whose associated value is to be returned or computed for if non-existant
|
||||
* @param key the key whose associated value is to be returned or computed for if non-existent
|
||||
* @param loader the function to compute a value given a key
|
||||
* @return the current (existing or computed) value associated with the specified key, or null if the computed
|
||||
* value is null
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
|||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -676,6 +677,13 @@ public abstract class StreamInput extends InputStream {
|
|||
return readNamedWriteable(ShapeBuilder.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a {@link QueryBuilder} from the current stream
|
||||
*/
|
||||
public Rescorer readRescorer() throws IOException {
|
||||
return readNamedWriteable(Rescorer.class);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream
|
||||
*/
|
||||
|
|
|
@ -36,13 +36,13 @@ import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
|||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
|
||||
import org.joda.time.ReadableInstant;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.file.AccessDeniedException;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.DirectoryNotEmptyException;
|
||||
|
@ -676,5 +676,12 @@ public abstract class StreamOutput extends OutputStream {
|
|||
for (T obj: list) {
|
||||
obj.writeTo(this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a {@link Rescorer} to the current stream
|
||||
*/
|
||||
public void writeRescorer(Rescorer rescorer) throws IOException {
|
||||
writeNamedWriteable(rescorer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
|||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexSearcherWrapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -108,6 +109,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
private final IndexingOperationListener[] listeners;
|
||||
private volatile AsyncRefreshTask refreshTask;
|
||||
private final AsyncTranslogFSync fsyncTask;
|
||||
private final SearchSlowLog searchSlowLog;
|
||||
|
||||
public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv,
|
||||
SimilarityService similarityService,
|
||||
|
@ -151,6 +153,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
this.fsyncTask = null;
|
||||
}
|
||||
this.refreshTask = new AsyncRefreshTask(this);
|
||||
searchSlowLog = new SearchSlowLog(indexSettings.getSettings());
|
||||
}
|
||||
|
||||
public int numberOfShards() {
|
||||
|
@ -313,9 +316,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
|
||||
store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId)));
|
||||
if (useShadowEngine(primary, indexSettings)) {
|
||||
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); // no indexing listeners - shadow engines don't index
|
||||
indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog); // no indexing listeners - shadow engines don't index
|
||||
} else {
|
||||
indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, listeners);
|
||||
indexShard = new IndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider, searchSlowLog, listeners);
|
||||
}
|
||||
eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created");
|
||||
eventListener.afterIndexShardCreated(indexShard);
|
||||
|
@ -414,6 +417,10 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
return nodeServicesProvider.getThreadPool();
|
||||
}
|
||||
|
||||
public SearchSlowLog getSearchSlowLog() {
|
||||
return searchSlowLog;
|
||||
}
|
||||
|
||||
private class StoreCloseListener implements Store.OnClose {
|
||||
private final ShardId shardId;
|
||||
private final boolean ownsShard;
|
||||
|
@ -562,9 +569,9 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
final Settings settings = indexSettings.getSettings();
|
||||
for (final IndexShard shard : this.shards.values()) {
|
||||
try {
|
||||
shard.onRefreshSettings(settings);
|
||||
shard.onSettingsChanged();
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to refresh shard settings", e, shard.shardId().id());
|
||||
logger.warn("[{}] failed to notify shard about setting change", e, shard.shardId().id());
|
||||
}
|
||||
}
|
||||
try {
|
||||
|
@ -577,6 +584,12 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh slowlog settings", e);
|
||||
}
|
||||
|
||||
try {
|
||||
searchSlowLog.onRefreshSettings(settings); // this will be refactored soon anyway so duplication is ok here
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to refresh slowlog settings", e);
|
||||
}
|
||||
if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) {
|
||||
rescheduleRefreshTasks();
|
||||
}
|
||||
|
@ -674,10 +687,10 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
|
|||
|
||||
private synchronized void onTaskCompletion() {
|
||||
if (mustReschedule()) {
|
||||
indexService.logger.debug("scheduling {} every {}", toString(), interval);
|
||||
indexService.logger.trace("scheduling {} every {}", toString(), interval);
|
||||
this.scheduledFuture = threadPool.schedule(interval, getThreadPool(), BaseAsyncTask.this);
|
||||
} else {
|
||||
indexService.logger.debug("scheduled {} disabled", toString());
|
||||
indexService.logger.trace("scheduled {} disabled", toString());
|
||||
this.scheduledFuture = null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
|
@ -25,11 +26,11 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -59,6 +60,14 @@ public final class IndexSettings {
|
|||
public static final String INDEX_TRANSLOG_DURABILITY = "index.translog.durability";
|
||||
public static final String INDEX_REFRESH_INTERVAL = "index.refresh_interval";
|
||||
public static final TimeValue DEFAULT_REFRESH_INTERVAL = new TimeValue(1, TimeUnit.SECONDS);
|
||||
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE = "index.translog.flush_threshold_size";
|
||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||
|
||||
/**
|
||||
* Index setting to enable / disable deletes garbage collection.
|
||||
* This setting is realtime updateable
|
||||
*/
|
||||
public static final String INDEX_GC_DELETES_SETTING = "index.gc_deletes";
|
||||
|
||||
private final String uuid;
|
||||
private final List<Consumer<Settings>> updateListeners;
|
||||
|
@ -82,7 +91,11 @@ public final class IndexSettings {
|
|||
private volatile Translog.Durability durability;
|
||||
private final TimeValue syncInterval;
|
||||
private volatile TimeValue refreshInterval;
|
||||
private volatile ByteSizeValue flushThresholdSize;
|
||||
private final MergeSchedulerConfig mergeSchedulerConfig;
|
||||
private final MergePolicyConfig mergePolicyConfig;
|
||||
|
||||
private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
|
||||
|
||||
|
||||
/**
|
||||
|
@ -165,6 +178,10 @@ public final class IndexSettings {
|
|||
this.durability = getFromSettings(settings, Translog.Durability.REQUEST);
|
||||
syncInterval = settings.getAsTime(INDEX_TRANSLOG_SYNC_INTERVAL, TimeValue.timeValueSeconds(5));
|
||||
refreshInterval = settings.getAsTime(INDEX_REFRESH_INTERVAL, DEFAULT_REFRESH_INTERVAL);
|
||||
flushThresholdSize = settings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
|
||||
mergeSchedulerConfig = new MergeSchedulerConfig(settings);
|
||||
gcDeletesInMillis = settings.getAsTime(IndexSettings.INDEX_GC_DELETES_SETTING, DEFAULT_GC_DELETES).getMillis();
|
||||
this.mergePolicyConfig = new MergePolicyConfig(logger, settings);
|
||||
assert indexNameMatcher.test(indexMetaData.getIndex());
|
||||
}
|
||||
|
||||
|
@ -360,13 +377,77 @@ public final class IndexSettings {
|
|||
logger.info("updating refresh_interval from [{}] to [{}]", this.refreshInterval, refreshInterval);
|
||||
this.refreshInterval = refreshInterval;
|
||||
}
|
||||
|
||||
ByteSizeValue flushThresholdSize = settings.getAsBytesSize(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, this.flushThresholdSize);
|
||||
if (!flushThresholdSize.equals(this.flushThresholdSize)) {
|
||||
logger.info("updating flush_threshold_size from [{}] to [{}]", this.flushThresholdSize, flushThresholdSize);
|
||||
this.flushThresholdSize = flushThresholdSize;
|
||||
}
|
||||
|
||||
final int maxThreadCount = settings.getAsInt(MergeSchedulerConfig.MAX_THREAD_COUNT, mergeSchedulerConfig.getMaxThreadCount());
|
||||
if (maxThreadCount != mergeSchedulerConfig.getMaxThreadCount()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.MAX_THREAD_COUNT, mergeSchedulerConfig.getMaxMergeCount(), maxThreadCount);
|
||||
mergeSchedulerConfig.setMaxThreadCount(maxThreadCount);
|
||||
}
|
||||
|
||||
final int maxMergeCount = settings.getAsInt(MergeSchedulerConfig.MAX_MERGE_COUNT, mergeSchedulerConfig.getMaxMergeCount());
|
||||
if (maxMergeCount != mergeSchedulerConfig.getMaxMergeCount()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.MAX_MERGE_COUNT, mergeSchedulerConfig.getMaxMergeCount(), maxMergeCount);
|
||||
mergeSchedulerConfig.setMaxMergeCount(maxMergeCount);
|
||||
}
|
||||
|
||||
final boolean autoThrottle = settings.getAsBoolean(MergeSchedulerConfig.AUTO_THROTTLE, mergeSchedulerConfig.isAutoThrottle());
|
||||
if (autoThrottle != mergeSchedulerConfig.isAutoThrottle()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.AUTO_THROTTLE, mergeSchedulerConfig.isAutoThrottle(), autoThrottle);
|
||||
mergeSchedulerConfig.setAutoThrottle(autoThrottle);
|
||||
}
|
||||
|
||||
long gcDeletesInMillis = settings.getAsTime(IndexSettings.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(this.gcDeletesInMillis)).getMillis();
|
||||
if (gcDeletesInMillis != this.gcDeletesInMillis) {
|
||||
logger.info("updating {} from [{}] to [{}]", IndexSettings.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(this.gcDeletesInMillis), TimeValue.timeValueMillis(gcDeletesInMillis));
|
||||
this.gcDeletesInMillis = gcDeletesInMillis;
|
||||
}
|
||||
|
||||
mergePolicyConfig.onRefreshSettings(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the translog sync interval. This is the interval in which the transaction log is asynchronously fsynced unless
|
||||
* the transaction log is fsyncing on every operations
|
||||
*/
|
||||
public TimeValue getTranslogSyncInterval() {
|
||||
return syncInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns this interval in which the shards of this index are asynchronously refreshed. <tt>-1</tt> means async refresh is disabled.
|
||||
*/
|
||||
public TimeValue getRefreshInterval() {
|
||||
return refreshInterval;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the transaction log threshold size when to forcefully flush the index and clear the transaction log.
|
||||
*/
|
||||
public ByteSizeValue getFlushThresholdSize() { return flushThresholdSize; }
|
||||
|
||||
/**
|
||||
* Returns the {@link MergeSchedulerConfig}
|
||||
*/
|
||||
public MergeSchedulerConfig getMergeSchedulerConfig() { return mergeSchedulerConfig; }
|
||||
|
||||
/**
|
||||
* Returns the GC deletes cycle in milliseconds.
|
||||
*/
|
||||
public long getGcDeletesInMillis() {
|
||||
return gcDeletesInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the merge policy that should be used for this index.
|
||||
*/
|
||||
public MergePolicy getMergePolicy() {
|
||||
return mergePolicyConfig.getMergePolicy();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.shard;
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
|
@ -33,61 +33,61 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
* where the index data is stored, and are immutable up to delete markers.
|
||||
* Segments are, periodically, merged into larger segments to keep the
|
||||
* index size at bay and expunge deletes.
|
||||
*
|
||||
*
|
||||
* <p>
|
||||
* Merges select segments of approximately equal size, subject to an allowed
|
||||
* number of segments per tier. The merge policy is able to merge
|
||||
* non-adjacent segments, and separates how many segments are merged at once from how many
|
||||
* segments are allowed per tier. It also does not over-merge (i.e., cascade merges).
|
||||
*
|
||||
*
|
||||
* <p>
|
||||
* All merge policy settings are <b>dynamic</b> and can be updated on a live index.
|
||||
* The merge policy has the following settings:
|
||||
*
|
||||
*
|
||||
* <ul>
|
||||
* <li><code>index.merge.policy.expunge_deletes_allowed</code>:
|
||||
*
|
||||
*
|
||||
* When expungeDeletes is called, we only merge away a segment if its delete
|
||||
* percentage is over this threshold. Default is <code>10</code>.
|
||||
*
|
||||
*
|
||||
* <li><code>index.merge.policy.floor_segment</code>:
|
||||
*
|
||||
*
|
||||
* Segments smaller than this are "rounded up" to this size, i.e. treated as
|
||||
* equal (floor) size for merge selection. This is to prevent frequent
|
||||
* flushing of tiny segments, thus preventing a long tail in the index. Default
|
||||
* is <code>2mb</code>.
|
||||
*
|
||||
*
|
||||
* <li><code>index.merge.policy.max_merge_at_once</code>:
|
||||
*
|
||||
*
|
||||
* Maximum number of segments to be merged at a time during "normal" merging.
|
||||
* Default is <code>10</code>.
|
||||
*
|
||||
*
|
||||
* <li><code>index.merge.policy.max_merge_at_once_explicit</code>:
|
||||
*
|
||||
*
|
||||
* Maximum number of segments to be merged at a time, during force merge or
|
||||
* expungeDeletes. Default is <code>30</code>.
|
||||
*
|
||||
*
|
||||
* <li><code>index.merge.policy.max_merged_segment</code>:
|
||||
*
|
||||
*
|
||||
* Maximum sized segment to produce during normal merging (not explicit
|
||||
* force merge). This setting is approximate: the estimate of the merged
|
||||
* segment size is made by summing sizes of to-be-merged segments
|
||||
* (compensating for percent deleted docs). Default is <code>5gb</code>.
|
||||
*
|
||||
*
|
||||
* <li><code>index.merge.policy.segments_per_tier</code>:
|
||||
*
|
||||
*
|
||||
* Sets the allowed number of segments per tier. Smaller values mean more
|
||||
* merging but fewer segments. Default is <code>10</code>. Note, this value needs to be
|
||||
* >= than the <code>max_merge_at_once</code> otherwise you'll force too many merges to
|
||||
* occur.
|
||||
*
|
||||
*
|
||||
* <li><code>index.merge.policy.reclaim_deletes_weight</code>:
|
||||
*
|
||||
*
|
||||
* Controls how aggressively merges that reclaim more deletions are favored.
|
||||
* Higher values favor selecting merges that reclaim deletions. A value of
|
||||
* <code>0.0</code> means deletions don't impact merge selection. Defaults to <code>2.0</code>.
|
||||
* </ul>
|
||||
*
|
||||
*
|
||||
* <p>
|
||||
* For normal merging, the policy first computes a "budget" of how many
|
||||
* segments are allowed to be in the index. If the index is over-budget,
|
||||
|
@ -97,13 +97,13 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
* smallest seg), total merge size and pct deletes reclaimed, so that
|
||||
* merges with lower skew, smaller size and those reclaiming more deletes,
|
||||
* are favored.
|
||||
*
|
||||
*
|
||||
* <p>
|
||||
* If a merge will produce a segment that's larger than
|
||||
* <code>max_merged_segment</code> then the policy will merge fewer segments (down to
|
||||
* 1 at once, if that one has deletions) to keep the segment size under
|
||||
* budget.
|
||||
*
|
||||
*
|
||||
* <p>
|
||||
* Note, this can mean that for large shards that holds many gigabytes of
|
||||
* data, the default of <code>max_merged_segment</code> (<code>5gb</code>) can cause for many
|
||||
|
@ -138,7 +138,7 @@ public final class MergePolicyConfig {
|
|||
public static final String INDEX_MERGE_ENABLED = "index.merge.enabled";
|
||||
|
||||
|
||||
public MergePolicyConfig(ESLogger logger, Settings indexSettings) {
|
||||
MergePolicyConfig(ESLogger logger, Settings indexSettings) {
|
||||
this.logger = logger;
|
||||
this.noCFSRatio = parseNoCFSRatio(indexSettings.get(INDEX_COMPOUND_FORMAT, Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO)));
|
||||
double forceMergeDeletesPctAllowed = indexSettings.getAsDouble("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED); // percentage
|
||||
|
@ -180,11 +180,11 @@ public final class MergePolicyConfig {
|
|||
return maxMergeAtOnce;
|
||||
}
|
||||
|
||||
public MergePolicy getMergePolicy() {
|
||||
MergePolicy getMergePolicy() {
|
||||
return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
|
||||
}
|
||||
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
void onRefreshSettings(Settings settings) {
|
||||
final double oldExpungeDeletesPctAllowed = mergePolicy.getForceMergeDeletesPctAllowed();
|
||||
final double expungeDeletesPctAllowed = settings.getAsDouble(INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED, oldExpungeDeletesPctAllowed);
|
||||
if (expungeDeletesPctAllowed != oldExpungeDeletesPctAllowed) {
|
||||
|
@ -243,7 +243,7 @@ public final class MergePolicyConfig {
|
|||
}
|
||||
}
|
||||
|
||||
public static double parseNoCFSRatio(String noCFSRatio) {
|
||||
private static double parseNoCFSRatio(String noCFSRatio) {
|
||||
noCFSRatio = noCFSRatio.trim();
|
||||
if (noCFSRatio.equalsIgnoreCase("true")) {
|
||||
return 1.0d;
|
||||
|
@ -262,7 +262,7 @@ public final class MergePolicyConfig {
|
|||
}
|
||||
}
|
||||
|
||||
public static String formatNoCFSRatio(double ratio) {
|
||||
private static String formatNoCFSRatio(double ratio) {
|
||||
if (ratio == 1.0) {
|
||||
return Boolean.TRUE.toString();
|
||||
} else if (ratio == 0.0) {
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.shard;
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.lucene.index.ConcurrentMergeScheduler;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -60,8 +60,7 @@ public final class MergeSchedulerConfig {
|
|||
private volatile int maxThreadCount;
|
||||
private volatile int maxMergeCount;
|
||||
|
||||
public MergeSchedulerConfig(IndexSettings indexSettings) {
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
MergeSchedulerConfig(Settings settings) {
|
||||
maxThreadCount = settings.getAsInt(MAX_THREAD_COUNT, Math.max(1, Math.min(4, EsExecutors.boundedNumberOfProcessors(settings) / 2)));
|
||||
maxMergeCount = settings.getAsInt(MAX_MERGE_COUNT, maxThreadCount + 5);
|
||||
this.autoThrottle = settings.getAsBoolean(AUTO_THROTTLE, true);
|
||||
|
@ -78,7 +77,7 @@ public final class MergeSchedulerConfig {
|
|||
/**
|
||||
* Enables / disables auto throttling on the {@link ConcurrentMergeScheduler}
|
||||
*/
|
||||
public void setAutoThrottle(boolean autoThrottle) {
|
||||
void setAutoThrottle(boolean autoThrottle) {
|
||||
this.autoThrottle = autoThrottle;
|
||||
}
|
||||
|
||||
|
@ -93,7 +92,7 @@ public final class MergeSchedulerConfig {
|
|||
* Expert: directly set the maximum number of merge threads and
|
||||
* simultaneous merges allowed.
|
||||
*/
|
||||
public void setMaxThreadCount(int maxThreadCount) {
|
||||
void setMaxThreadCount(int maxThreadCount) {
|
||||
this.maxThreadCount = maxThreadCount;
|
||||
}
|
||||
|
||||
|
@ -108,7 +107,7 @@ public final class MergeSchedulerConfig {
|
|||
*
|
||||
* Expert: set the maximum number of simultaneous merges allowed.
|
||||
*/
|
||||
public void setMaxMergeCount(int maxMergeCount) {
|
||||
void setMaxMergeCount(int maxMergeCount) {
|
||||
this.maxMergeCount = maxMergeCount;
|
||||
}
|
||||
}
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.merge.OnGoingMerge;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -67,8 +67,8 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
|
|||
private final Set<OnGoingMerge> readOnlyOnGoingMerges = Collections.unmodifiableSet(onGoingMerges);
|
||||
private final MergeSchedulerConfig config;
|
||||
|
||||
public ElasticsearchConcurrentMergeScheduler(ShardId shardId, IndexSettings indexSettings, MergeSchedulerConfig config) {
|
||||
this.config = config;
|
||||
public ElasticsearchConcurrentMergeScheduler(ShardId shardId, IndexSettings indexSettings) {
|
||||
this.config = indexSettings.getMergeSchedulerConfig();
|
||||
this.shardId = shardId;
|
||||
this.indexSettings = indexSettings.getSettings();
|
||||
this.logger = Loggers.getLogger(getClass(), this.indexSettings, shardId);
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.codec.CodecService;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
|
@ -39,8 +38,6 @@ import org.elasticsearch.index.translog.TranslogConfig;
|
|||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/*
|
||||
* Holds all the configuration that is used to create an {@link Engine}.
|
||||
* Once {@link Engine} has been created with this object, changes to this
|
||||
|
@ -51,7 +48,6 @@ public final class EngineConfig {
|
|||
private final TranslogRecoveryPerformer translogRecoveryPerformer;
|
||||
private final IndexSettings indexSettings;
|
||||
private final ByteSizeValue indexingBufferSize;
|
||||
private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis();
|
||||
private volatile boolean enableGcDeletes = true;
|
||||
private final TimeValue flushMergesAfter;
|
||||
private final String codecName;
|
||||
|
@ -60,7 +56,6 @@ public final class EngineConfig {
|
|||
private final Store store;
|
||||
private final SnapshotDeletionPolicy deletionPolicy;
|
||||
private final MergePolicy mergePolicy;
|
||||
private final MergeSchedulerConfig mergeSchedulerConfig;
|
||||
private final Analyzer analyzer;
|
||||
private final Similarity similarity;
|
||||
private final CodecService codecService;
|
||||
|
@ -69,12 +64,6 @@ public final class EngineConfig {
|
|||
private final QueryCache queryCache;
|
||||
private final QueryCachingPolicy queryCachingPolicy;
|
||||
|
||||
/**
|
||||
* Index setting to enable / disable deletes garbage collection.
|
||||
* This setting is realtime updateable
|
||||
*/
|
||||
public static final String INDEX_GC_DELETES_SETTING = "index.gc_deletes";
|
||||
|
||||
/**
|
||||
* Index setting to change the low level lucene codec used for writing new segments.
|
||||
* This setting is <b>not</b> realtime updateable.
|
||||
|
@ -84,8 +73,6 @@ public final class EngineConfig {
|
|||
/** if set to true the engine will start even if the translog id in the commit point can not be found */
|
||||
public static final String INDEX_FORCE_NEW_TRANSLOG = "index.engine.force_new_translog";
|
||||
|
||||
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
|
||||
|
||||
private static final String DEFAULT_CODEC_NAME = "default";
|
||||
private TranslogConfig translogConfig;
|
||||
private boolean create = false;
|
||||
|
@ -95,7 +82,7 @@ public final class EngineConfig {
|
|||
*/
|
||||
public EngineConfig(ShardId shardId, ThreadPool threadPool,
|
||||
IndexSettings indexSettings, Engine.Warmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
|
||||
MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
|
||||
MergePolicy mergePolicy,Analyzer analyzer,
|
||||
Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
|
||||
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig, TimeValue flushMergesAfter) {
|
||||
this.shardId = shardId;
|
||||
|
@ -106,7 +93,6 @@ public final class EngineConfig {
|
|||
this.store = store;
|
||||
this.deletionPolicy = deletionPolicy;
|
||||
this.mergePolicy = mergePolicy;
|
||||
this.mergeSchedulerConfig = mergeSchedulerConfig;
|
||||
this.analyzer = analyzer;
|
||||
this.similarity = similarity;
|
||||
this.codecService = codecService;
|
||||
|
@ -116,7 +102,6 @@ public final class EngineConfig {
|
|||
// there are not too many shards allocated to this node. Instead, IndexingMemoryController periodically checks
|
||||
// and refreshes the most heap-consuming shards when total indexing heap usage across all shards is too high:
|
||||
indexingBufferSize = new ByteSizeValue(256, ByteSizeUnit.MB);
|
||||
gcDeletesInMillis = settings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis();
|
||||
this.translogRecoveryPerformer = translogRecoveryPerformer;
|
||||
this.forceNewTranslog = settings.getAsBoolean(INDEX_FORCE_NEW_TRANSLOG, false);
|
||||
this.queryCache = queryCache;
|
||||
|
@ -146,19 +131,12 @@ public final class EngineConfig {
|
|||
return indexingBufferSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the GC deletes cycle in milliseconds.
|
||||
*/
|
||||
public long getGcDeletesInMillis() {
|
||||
return gcDeletesInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff delete garbage collection in the engine should be enabled. This setting is updateable
|
||||
* in realtime and forces a volatile read. Consumers can safely read this value directly go fetch it's latest value. The default is <code>true</code>
|
||||
* <p>
|
||||
* Engine GC deletion if enabled collects deleted documents from in-memory realtime data structures after a certain amount of
|
||||
* time ({@link #getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document that was deleted
|
||||
* time ({@link IndexSettings#getGcDeletesInMillis()} if enabled. Before deletes are GCed they will cause re-adding the document that was deleted
|
||||
* to fail.
|
||||
* </p>
|
||||
*/
|
||||
|
@ -218,13 +196,6 @@ public final class EngineConfig {
|
|||
return mergePolicy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link MergeSchedulerConfig}
|
||||
*/
|
||||
public MergeSchedulerConfig getMergeSchedulerConfig() {
|
||||
return mergeSchedulerConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a listener that should be called on engine failure
|
||||
*/
|
||||
|
@ -258,13 +229,6 @@ public final class EngineConfig {
|
|||
return similarity;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the GC deletes cycle in milliseconds.
|
||||
*/
|
||||
public void setGcDeletesInMillis(long gcDeletesInMillis) {
|
||||
this.gcDeletesInMillis = gcDeletesInMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link org.elasticsearch.index.shard.TranslogRecoveryPerformer} for this engine. This class is used
|
||||
* to apply transaction log operations to the engine. It encapsulates all the logic to transfer the translog entry into
|
||||
|
|
|
@ -57,14 +57,12 @@ import org.elasticsearch.common.lucene.uid.Versions;
|
|||
import org.elasticsearch.common.math.MathUtils;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.merge.OnGoingMerge;
|
||||
import org.elasticsearch.index.shard.ElasticsearchMergePolicy;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
@ -136,7 +134,7 @@ public class InternalEngine extends Engine {
|
|||
try {
|
||||
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
|
||||
this.warmer = engineConfig.getWarmer();
|
||||
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig());
|
||||
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings());
|
||||
this.dirtyLocks = new Object[Runtime.getRuntime().availableProcessors() * 10]; // we multiply it to have enough...
|
||||
for (int i = 0; i < dirtyLocks.length; i++) {
|
||||
dirtyLocks[i] = new Object();
|
||||
|
@ -370,7 +368,7 @@ public class InternalEngine extends Engine {
|
|||
deleted = currentVersion == Versions.NOT_FOUND;
|
||||
} else {
|
||||
deleted = versionValue.delete();
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
|
||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||
} else {
|
||||
currentVersion = versionValue.version();
|
||||
|
@ -436,7 +434,7 @@ public class InternalEngine extends Engine {
|
|||
private void maybePruneDeletedTombstones() {
|
||||
// It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it
|
||||
// every 1/4 of gcDeletesInMillis:
|
||||
if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().estimatedTimeInMillis() - lastDeleteVersionPruneTimeMSec > engineConfig.getGcDeletesInMillis() * 0.25) {
|
||||
if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().estimatedTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) {
|
||||
pruneDeletedTombstones();
|
||||
}
|
||||
}
|
||||
|
@ -452,7 +450,7 @@ public class InternalEngine extends Engine {
|
|||
deleted = currentVersion == Versions.NOT_FOUND;
|
||||
} else {
|
||||
deleted = versionValue.delete();
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > engineConfig.getGcDeletesInMillis()) {
|
||||
if (engineConfig.isEnableGcDeletes() && versionValue.delete() && (engineConfig.getThreadPool().estimatedTimeInMillis() - versionValue.time()) > getGcDeletesInMillis()) {
|
||||
currentVersion = Versions.NOT_FOUND; // deleted, and GC
|
||||
} else {
|
||||
currentVersion = versionValue.version();
|
||||
|
@ -701,7 +699,7 @@ public class InternalEngine extends Engine {
|
|||
// Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator:
|
||||
VersionValue versionValue = versionMap.getTombstoneUnderLock(uid);
|
||||
if (versionValue != null) {
|
||||
if (timeMSec - versionValue.time() > engineConfig.getGcDeletesInMillis()) {
|
||||
if (timeMSec - versionValue.time() > getGcDeletesInMillis()) {
|
||||
versionMap.removeTombstoneUnderLock(uid);
|
||||
}
|
||||
}
|
||||
|
@ -1072,7 +1070,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
long getGcDeletesInMillis() {
|
||||
return engineConfig.getGcDeletesInMillis();
|
||||
return engineConfig.getIndexSettings().getGcDeletesInMillis();
|
||||
}
|
||||
|
||||
LiveIndexWriterConfig getCurrentIndexWriterConfig() {
|
||||
|
@ -1083,8 +1081,8 @@ public class InternalEngine extends Engine {
|
|||
private final AtomicInteger numMergesInFlight = new AtomicInteger(0);
|
||||
private final AtomicBoolean isThrottling = new AtomicBoolean();
|
||||
|
||||
EngineMergeScheduler(ShardId shardId, IndexSettings indexSettings, MergeSchedulerConfig config) {
|
||||
super(shardId, indexSettings, config);
|
||||
EngineMergeScheduler(ShardId shardId, IndexSettings indexSettings) {
|
||||
super(shardId, indexSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,193 +19,29 @@
|
|||
|
||||
package org.elasticsearch.index.fielddata;
|
||||
|
||||
import org.apache.lucene.index.TermsEnum;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder;
|
||||
import org.elasticsearch.index.mapper.core.BooleanFieldMapper;
|
||||
|
||||
/**
|
||||
*/
|
||||
public interface IndexNumericFieldData extends IndexFieldData<AtomicNumericFieldData> {
|
||||
|
||||
public static enum NumericType {
|
||||
BOOLEAN(1, false, SortField.Type.INT, 0, 1) {
|
||||
@Override
|
||||
public long toLong(BytesRef indexForm) {
|
||||
if (indexForm.equals(BooleanFieldMapper.Values.FALSE)) {
|
||||
return 0;
|
||||
} else if (indexForm.equals(BooleanFieldMapper.Values.TRUE)) {
|
||||
return 1;
|
||||
} else {
|
||||
throw new IllegalArgumentException("Cannot convert " + indexForm + " to a boolean");
|
||||
}
|
||||
}
|
||||
BOOLEAN(false),
|
||||
BYTE(false),
|
||||
SHORT(false),
|
||||
INT(false),
|
||||
LONG(false),
|
||||
FLOAT(true),
|
||||
DOUBLE(true);
|
||||
|
||||
@Override
|
||||
public void toIndexForm(Number number, BytesRefBuilder bytes) {
|
||||
bytes.append(number.intValue() != 0 ? BooleanFieldMapper.Values.TRUE : BooleanFieldMapper.Values.FALSE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number toNumber(BytesRef indexForm) {
|
||||
return toLong(indexForm);
|
||||
}
|
||||
|
||||
},
|
||||
BYTE(8, false, SortField.Type.INT, Byte.MIN_VALUE, Byte.MAX_VALUE) {
|
||||
@Override
|
||||
public long toLong(BytesRef indexForm) {
|
||||
return INT.toLong(indexForm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toIndexForm(Number number, BytesRefBuilder bytes) {
|
||||
INT.toIndexForm(number, bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number toNumber(BytesRef indexForm) {
|
||||
return INT.toNumber(indexForm);
|
||||
}
|
||||
},
|
||||
SHORT(16, false, SortField.Type.INT, Short.MIN_VALUE, Short.MAX_VALUE) {
|
||||
@Override
|
||||
public long toLong(BytesRef indexForm) {
|
||||
return INT.toLong(indexForm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toIndexForm(Number number, BytesRefBuilder bytes) {
|
||||
INT.toIndexForm(number, bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number toNumber(BytesRef indexForm) {
|
||||
return INT.toNumber(indexForm);
|
||||
}
|
||||
},
|
||||
INT(32, false, SortField.Type.INT, Integer.MIN_VALUE, Integer.MAX_VALUE) {
|
||||
@Override
|
||||
public long toLong(BytesRef indexForm) {
|
||||
return NumericUtils.prefixCodedToInt(indexForm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toIndexForm(Number number, BytesRefBuilder bytes) {
|
||||
NumericUtils.intToPrefixCodedBytes(number.intValue(), 0, bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number toNumber(BytesRef indexForm) {
|
||||
return NumericUtils.prefixCodedToInt(indexForm);
|
||||
}
|
||||
},
|
||||
LONG(64, false, SortField.Type.LONG, Long.MIN_VALUE, Long.MAX_VALUE) {
|
||||
@Override
|
||||
public long toLong(BytesRef indexForm) {
|
||||
return NumericUtils.prefixCodedToLong(indexForm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toIndexForm(Number number, BytesRefBuilder bytes) {
|
||||
NumericUtils.longToPrefixCodedBytes(number.longValue(), 0, bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number toNumber(BytesRef indexForm) {
|
||||
return NumericUtils.prefixCodedToLong(indexForm);
|
||||
}
|
||||
},
|
||||
FLOAT(32, true, SortField.Type.FLOAT, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY) {
|
||||
@Override
|
||||
public double toDouble(BytesRef indexForm) {
|
||||
return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(indexForm));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toIndexForm(Number number, BytesRefBuilder bytes) {
|
||||
NumericUtils.intToPrefixCodedBytes(NumericUtils.floatToSortableInt(number.floatValue()), 0, bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number toNumber(BytesRef indexForm) {
|
||||
return NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(indexForm));
|
||||
}
|
||||
},
|
||||
DOUBLE(64, true, SortField.Type.DOUBLE, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY) {
|
||||
@Override
|
||||
public double toDouble(BytesRef indexForm) {
|
||||
return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(indexForm));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void toIndexForm(Number number, BytesRefBuilder bytes) {
|
||||
NumericUtils.longToPrefixCodedBytes(NumericUtils.doubleToSortableLong(number.doubleValue()), 0, bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Number toNumber(BytesRef indexForm) {
|
||||
return NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(indexForm));
|
||||
}
|
||||
};
|
||||
|
||||
private final int requiredBits;
|
||||
private final boolean floatingPoint;
|
||||
private final SortField.Type type;
|
||||
private final Number minValue, maxValue;
|
||||
|
||||
private NumericType(int requiredBits, boolean floatingPoint, SortField.Type type, Number minValue, Number maxValue) {
|
||||
this.requiredBits = requiredBits;
|
||||
private NumericType(boolean floatingPoint) {
|
||||
this.floatingPoint = floatingPoint;
|
||||
this.type = type;
|
||||
this.minValue = minValue;
|
||||
this.maxValue = maxValue;
|
||||
}
|
||||
|
||||
public final SortField.Type sortFieldType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public final Number minValue() {
|
||||
return minValue;
|
||||
}
|
||||
|
||||
public final Number maxValue() {
|
||||
return maxValue;
|
||||
}
|
||||
|
||||
public final boolean isFloatingPoint() {
|
||||
return floatingPoint;
|
||||
}
|
||||
|
||||
public final int requiredBits() {
|
||||
return requiredBits;
|
||||
}
|
||||
|
||||
public abstract void toIndexForm(Number number, BytesRefBuilder bytes);
|
||||
|
||||
public long toLong(BytesRef indexForm) {
|
||||
return (long) toDouble(indexForm);
|
||||
}
|
||||
|
||||
public double toDouble(BytesRef indexForm) {
|
||||
return (double) toLong(indexForm);
|
||||
}
|
||||
|
||||
public abstract Number toNumber(BytesRef indexForm);
|
||||
|
||||
public final TermsEnum wrapTermsEnum(TermsEnum termsEnum) {
|
||||
if (requiredBits() == 1) { // boolean, no prefix-terms
|
||||
return termsEnum;
|
||||
} else if (requiredBits() > 32) {
|
||||
return OrdinalsBuilder.wrapNumeric64Bit(termsEnum);
|
||||
} else {
|
||||
return OrdinalsBuilder.wrapNumeric32Bit(termsEnum);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NumericType getNumericType();
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
/**
|
||||
*/
|
||||
public final class SearchSlowLog{
|
||||
public final class SearchSlowLog {
|
||||
|
||||
private boolean reformat;
|
||||
|
||||
|
@ -62,7 +62,7 @@ public final class SearchSlowLog{
|
|||
public static final String INDEX_SEARCH_SLOWLOG_REFORMAT = INDEX_SEARCH_SLOWLOG_PREFIX + ".reformat";
|
||||
public static final String INDEX_SEARCH_SLOWLOG_LEVEL = INDEX_SEARCH_SLOWLOG_PREFIX + ".level";
|
||||
|
||||
SearchSlowLog(Settings indexSettings) {
|
||||
public SearchSlowLog(Settings indexSettings) {
|
||||
|
||||
this.reformat = indexSettings.getAsBoolean(INDEX_SEARCH_SLOWLOG_REFORMAT, true);
|
||||
|
||||
|
@ -109,7 +109,7 @@ public final class SearchSlowLog{
|
|||
}
|
||||
}
|
||||
|
||||
synchronized void onRefreshSettings(Settings settings) {
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
long queryWarnThreshold = settings.getAsTime(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN, TimeValue.timeValueNanos(this.queryWarnThreshold)).nanos();
|
||||
if (queryWarnThreshold != this.queryWarnThreshold) {
|
||||
this.queryWarnThreshold = queryWarnThreshold;
|
||||
|
|
|
@ -41,8 +41,8 @@ public final class ShardSearchStats {
|
|||
private final CounterMetric openContexts = new CounterMetric();
|
||||
private volatile Map<String, StatsHolder> groupsStats = emptyMap();
|
||||
|
||||
public ShardSearchStats(Settings indexSettings) {
|
||||
this.slowLogSearchService = new SearchSlowLog(indexSettings);
|
||||
public ShardSearchStats(SearchSlowLog searchSlowLog) {
|
||||
this.slowLogSearchService = searchSlowLog;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.metrics.MeanMetric;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
|
@ -92,6 +91,7 @@ import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
|
|||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.search.stats.ShardSearchStats;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
@ -141,7 +141,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
private final MapperService mapperService;
|
||||
private final IndexCache indexCache;
|
||||
private final Store store;
|
||||
private final MergeSchedulerConfig mergeSchedulerConfig;
|
||||
private final InternalIndexingStats internalIndexingStats;
|
||||
private final ShardSearchStats searchService;
|
||||
private final ShardGetService getService;
|
||||
|
@ -161,7 +160,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
private final SimilarityService similarityService;
|
||||
private final EngineConfig engineConfig;
|
||||
private final TranslogConfig translogConfig;
|
||||
private final MergePolicyConfig mergePolicyConfig;
|
||||
private final IndicesQueryCache indicesQueryCache;
|
||||
private final IndexEventListener indexEventListener;
|
||||
private final IndexSettings idxSettings;
|
||||
|
@ -188,15 +186,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
private final MeanMetric flushMetric = new MeanMetric();
|
||||
|
||||
private final ShardEventListener shardEventListener = new ShardEventListener();
|
||||
private volatile boolean flushOnClose = true;
|
||||
private volatile ByteSizeValue flushThresholdSize;
|
||||
|
||||
/**
|
||||
* Index setting to control if a flush is executed before engine is closed
|
||||
* This setting is realtime updateable.
|
||||
*/
|
||||
public static final String INDEX_FLUSH_ON_CLOSE = "index.flush_on_close";
|
||||
public static final String INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE = "index.translog.flush_threshold_size";
|
||||
|
||||
private final ShardPath path;
|
||||
|
||||
|
@ -215,7 +204,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
public IndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache,
|
||||
MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService,
|
||||
@Nullable EngineFactory engineFactory,
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, IndexingOperationListener... listeners) {
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, SearchSlowLog slowLog, IndexingOperationListener... listeners) {
|
||||
super(shardId, indexSettings);
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
this.idxSettings = indexSettings;
|
||||
|
@ -227,7 +216,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
this.engineFactory = engineFactory == null ? new InternalEngineFactory() : engineFactory;
|
||||
this.store = store;
|
||||
this.indexEventListener = indexEventListener;
|
||||
this.mergeSchedulerConfig = new MergeSchedulerConfig(indexSettings);
|
||||
this.threadPool = provider.getThreadPool();
|
||||
this.mapperService = mapperService;
|
||||
this.indexCache = indexCache;
|
||||
|
@ -237,7 +225,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
this.indexingOperationListeners = new IndexingOperationListener.CompositeListener(listenersList, logger);
|
||||
this.getService = new ShardGetService(indexSettings, this, mapperService);
|
||||
this.termVectorsService = provider.getTermVectorsService();
|
||||
this.searchService = new ShardSearchStats(settings);
|
||||
this.searchService = new ShardSearchStats(slowLog);
|
||||
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
|
||||
this.indicesQueryCache = provider.getIndicesQueryCache();
|
||||
this.shardQueryCache = new ShardRequestCache(shardId, indexSettings);
|
||||
|
@ -245,9 +233,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
this.indexFieldDataService = indexFieldDataService;
|
||||
this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings);
|
||||
state = IndexShardState.CREATED;
|
||||
this.flushOnClose = settings.getAsBoolean(INDEX_FLUSH_ON_CLOSE, true);
|
||||
this.path = path;
|
||||
this.mergePolicyConfig = new MergePolicyConfig(logger, settings);
|
||||
/* create engine config */
|
||||
logger.debug("state: [CREATED]");
|
||||
|
||||
|
@ -264,7 +250,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
|
||||
this.flushThresholdSize = settings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
|
||||
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
|
||||
this.provider = provider;
|
||||
this.searcherWrapper = indexSearcherWrapper;
|
||||
|
@ -817,7 +802,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
} finally {
|
||||
final Engine engine = this.currentEngineReference.getAndSet(null);
|
||||
try {
|
||||
if (engine != null && flushEngine && this.flushOnClose) {
|
||||
if (engine != null && flushEngine) {
|
||||
engine.flushAndClose();
|
||||
}
|
||||
} finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times
|
||||
|
@ -1048,10 +1033,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public final boolean isFlushOnClose() {
|
||||
return flushOnClose;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the shards metadata state. This method can only be executed if the shard is not active.
|
||||
*
|
||||
|
@ -1093,7 +1074,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
if (engine != null) {
|
||||
try {
|
||||
Translog translog = engine.getTranslog();
|
||||
return translog.sizeInBytes() > flushThresholdSize.bytes();
|
||||
return translog.sizeInBytes() > indexSettings.getFlushThresholdSize().bytes();
|
||||
} catch (AlreadyClosedException | EngineClosedException ex) {
|
||||
// that's fine we are already close - no need to flush
|
||||
}
|
||||
|
@ -1101,57 +1082,10 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
return false;
|
||||
}
|
||||
|
||||
public void onRefreshSettings(Settings settings) {
|
||||
boolean change = false;
|
||||
synchronized (mutex) {
|
||||
if (state() == IndexShardState.CLOSED) { // no need to update anything if we are closed
|
||||
return;
|
||||
}
|
||||
ByteSizeValue flushThresholdSize = settings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, this.flushThresholdSize);
|
||||
if (!flushThresholdSize.equals(this.flushThresholdSize)) {
|
||||
logger.info("updating flush_threshold_size from [{}] to [{}]", this.flushThresholdSize, flushThresholdSize);
|
||||
this.flushThresholdSize = flushThresholdSize;
|
||||
}
|
||||
|
||||
final EngineConfig config = engineConfig;
|
||||
final boolean flushOnClose = settings.getAsBoolean(INDEX_FLUSH_ON_CLOSE, this.flushOnClose);
|
||||
if (flushOnClose != this.flushOnClose) {
|
||||
logger.info("updating {} from [{}] to [{}]", INDEX_FLUSH_ON_CLOSE, this.flushOnClose, flushOnClose);
|
||||
this.flushOnClose = flushOnClose;
|
||||
}
|
||||
|
||||
long gcDeletesInMillis = settings.getAsTime(EngineConfig.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(config.getGcDeletesInMillis())).millis();
|
||||
if (gcDeletesInMillis != config.getGcDeletesInMillis()) {
|
||||
logger.info("updating {} from [{}] to [{}]", EngineConfig.INDEX_GC_DELETES_SETTING, TimeValue.timeValueMillis(config.getGcDeletesInMillis()), TimeValue.timeValueMillis(gcDeletesInMillis));
|
||||
config.setGcDeletesInMillis(gcDeletesInMillis);
|
||||
change = true;
|
||||
}
|
||||
|
||||
final int maxThreadCount = settings.getAsInt(MergeSchedulerConfig.MAX_THREAD_COUNT, mergeSchedulerConfig.getMaxThreadCount());
|
||||
if (maxThreadCount != mergeSchedulerConfig.getMaxThreadCount()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.MAX_THREAD_COUNT, mergeSchedulerConfig.getMaxMergeCount(), maxThreadCount);
|
||||
mergeSchedulerConfig.setMaxThreadCount(maxThreadCount);
|
||||
change = true;
|
||||
}
|
||||
|
||||
final int maxMergeCount = settings.getAsInt(MergeSchedulerConfig.MAX_MERGE_COUNT, mergeSchedulerConfig.getMaxMergeCount());
|
||||
if (maxMergeCount != mergeSchedulerConfig.getMaxMergeCount()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.MAX_MERGE_COUNT, mergeSchedulerConfig.getMaxMergeCount(), maxMergeCount);
|
||||
mergeSchedulerConfig.setMaxMergeCount(maxMergeCount);
|
||||
change = true;
|
||||
}
|
||||
|
||||
final boolean autoThrottle = settings.getAsBoolean(MergeSchedulerConfig.AUTO_THROTTLE, mergeSchedulerConfig.isAutoThrottle());
|
||||
if (autoThrottle != mergeSchedulerConfig.isAutoThrottle()) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", MergeSchedulerConfig.AUTO_THROTTLE, mergeSchedulerConfig.isAutoThrottle(), autoThrottle);
|
||||
mergeSchedulerConfig.setAutoThrottle(autoThrottle);
|
||||
change = true;
|
||||
}
|
||||
}
|
||||
mergePolicyConfig.onRefreshSettings(settings);
|
||||
searchService.onRefreshSettings(settings);
|
||||
if (change) {
|
||||
getEngine().onSettingsChanged();
|
||||
public void onSettingsChanged() {
|
||||
Engine engineOrNull = getEngineOrNull();
|
||||
if (engineOrNull != null) {
|
||||
engineOrNull.onSettingsChanged();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1431,7 +1365,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
};
|
||||
final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel);
|
||||
return new EngineConfig(shardId,
|
||||
threadPool, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
|
||||
threadPool, indexSettings, engineWarmer, store, deletionPolicy, indexSettings.getMergePolicy(),
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig,
|
||||
idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.index.engine.EngineFactory;
|
|||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
|
@ -44,8 +45,8 @@ import java.io.IOException;
|
|||
public final class ShadowIndexShard extends IndexShard {
|
||||
|
||||
public ShadowIndexShard(ShardId shardId, IndexSettings indexSettings, ShardPath path, Store store, IndexCache indexCache, MapperService mapperService, SimilarityService similarityService, IndexFieldDataService indexFieldDataService, @Nullable EngineFactory engineFactory,
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, NodeServicesProvider provider) throws IOException {
|
||||
super(shardId, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, wrapper, provider);
|
||||
IndexEventListener indexEventListener, IndexSearcherWrapper wrapper, NodeServicesProvider provider, SearchSlowLog searchSlowLog) throws IOException {
|
||||
super(shardId, indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldDataService, engineFactory, indexEventListener, wrapper, provider, searchSlowLog);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -576,7 +576,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
if (current.getTragicException() != null) {
|
||||
try {
|
||||
close();
|
||||
} catch (AlreadyClosedException inner) {
|
||||
// don't do anything in this case. The AlreadyClosedException comes from TranslogWriter and we should not add it as suppressed because
|
||||
// will contain the Exception ex as cause. See also https://github.com/elastic/elasticsearch/issues/15941
|
||||
} catch (Exception inner) {
|
||||
assert (ex != inner.getCause());
|
||||
ex.addSuppressed(inner);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -111,8 +111,9 @@ public class OsProbe {
|
|||
* Returns the system load averages
|
||||
*/
|
||||
public double[] getSystemLoadAverage() {
|
||||
if (Constants.LINUX) {
|
||||
double[] loadAverage = readProcLoadavg("/proc/loadavg");
|
||||
if (Constants.LINUX || Constants.FREE_BSD) {
|
||||
final String procLoadAvg = Constants.LINUX ? "/proc/loadavg" : "/compat/linux/proc/loadavg";
|
||||
double[] loadAverage = readProcLoadavg(procLoadAvg);
|
||||
if (loadAverage != null) {
|
||||
return loadAverage;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.rescore;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
public enum QueryRescoreMode implements Writeable<QueryRescoreMode> {
|
||||
Avg {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return (primary + secondary) / 2;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "avg";
|
||||
}
|
||||
},
|
||||
Max {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return Math.max(primary, secondary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "max";
|
||||
}
|
||||
},
|
||||
Min {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return Math.min(primary, secondary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "min";
|
||||
}
|
||||
},
|
||||
Total {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return primary + secondary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "sum";
|
||||
}
|
||||
},
|
||||
Multiply {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return primary * secondary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "product";
|
||||
}
|
||||
};
|
||||
|
||||
public abstract float combine(float primary, float secondary);
|
||||
|
||||
static QueryRescoreMode PROTOTYPE = Total;
|
||||
|
||||
@Override
|
||||
public QueryRescoreMode readFrom(StreamInput in) throws IOException {
|
||||
int ordinal = in.readVInt();
|
||||
if (ordinal < 0 || ordinal >= values().length) {
|
||||
throw new IOException("Unknown ScoreMode ordinal [" + ordinal + "]");
|
||||
}
|
||||
return values()[ordinal];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(this.ordinal());
|
||||
}
|
||||
|
||||
public static QueryRescoreMode fromString(String scoreMode) {
|
||||
for (QueryRescoreMode mode : values()) {
|
||||
if (scoreMode.toLowerCase(Locale.ROOT).equals(mode.name().toLowerCase(Locale.ROOT))) {
|
||||
return mode;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("illegal score_mode [" + scoreMode + "]");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
}
|
|
@ -38,66 +38,6 @@ import java.util.Set;
|
|||
|
||||
public final class QueryRescorer implements Rescorer {
|
||||
|
||||
private static enum ScoreMode {
|
||||
Avg {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return (primary + secondary) / 2;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "avg";
|
||||
}
|
||||
},
|
||||
Max {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return Math.max(primary, secondary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "max";
|
||||
}
|
||||
},
|
||||
Min {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return Math.min(primary, secondary);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "min";
|
||||
}
|
||||
},
|
||||
Total {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return primary + secondary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "sum";
|
||||
}
|
||||
},
|
||||
Multiply {
|
||||
@Override
|
||||
public float combine(float primary, float secondary) {
|
||||
return primary * secondary;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "product";
|
||||
}
|
||||
};
|
||||
|
||||
public abstract float combine(float primary, float secondary);
|
||||
}
|
||||
|
||||
public static final Rescorer INSTANCE = new QueryRescorer();
|
||||
public static final String NAME = "query";
|
||||
|
||||
|
@ -170,7 +110,7 @@ public final class QueryRescorer implements Rescorer {
|
|||
rescoreExplain.getValue() * secondaryWeight,
|
||||
"product of:",
|
||||
rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight"));
|
||||
ScoreMode scoreMode = rescore.scoreMode();
|
||||
QueryRescoreMode scoreMode = rescore.scoreMode();
|
||||
return Explanation.match(
|
||||
scoreMode.combine(prim.getValue(), sec.getValue()),
|
||||
scoreMode + " of:",
|
||||
|
@ -228,7 +168,7 @@ public final class QueryRescorer implements Rescorer {
|
|||
// secondary score?
|
||||
in.scoreDocs[i].score *= ctx.queryWeight();
|
||||
}
|
||||
|
||||
|
||||
// TODO: this is wrong, i.e. we are comparing apples and oranges at this point. It would be better if we always rescored all
|
||||
// incoming first pass hits, instead of allowing recoring of just the top subset:
|
||||
Arrays.sort(in.scoreDocs, SCORE_DOC_COMPARATOR);
|
||||
|
@ -240,13 +180,13 @@ public final class QueryRescorer implements Rescorer {
|
|||
|
||||
public QueryRescoreContext(QueryRescorer rescorer) {
|
||||
super(NAME, 10, rescorer);
|
||||
this.scoreMode = ScoreMode.Total;
|
||||
this.scoreMode = QueryRescoreMode.Total;
|
||||
}
|
||||
|
||||
private ParsedQuery parsedQuery;
|
||||
private float queryWeight = 1.0f;
|
||||
private float rescoreQueryWeight = 1.0f;
|
||||
private ScoreMode scoreMode;
|
||||
private QueryRescoreMode scoreMode;
|
||||
|
||||
public void setParsedQuery(ParsedQuery parsedQuery) {
|
||||
this.parsedQuery = parsedQuery;
|
||||
|
@ -264,7 +204,7 @@ public final class QueryRescorer implements Rescorer {
|
|||
return rescoreQueryWeight;
|
||||
}
|
||||
|
||||
public ScoreMode scoreMode() {
|
||||
public QueryRescoreMode scoreMode() {
|
||||
return scoreMode;
|
||||
}
|
||||
|
||||
|
@ -276,26 +216,13 @@ public final class QueryRescorer implements Rescorer {
|
|||
this.queryWeight = queryWeight;
|
||||
}
|
||||
|
||||
public void setScoreMode(ScoreMode scoreMode) {
|
||||
public void setScoreMode(QueryRescoreMode scoreMode) {
|
||||
this.scoreMode = scoreMode;
|
||||
}
|
||||
|
||||
public void setScoreMode(String scoreMode) {
|
||||
if ("avg".equals(scoreMode)) {
|
||||
setScoreMode(ScoreMode.Avg);
|
||||
} else if ("max".equals(scoreMode)) {
|
||||
setScoreMode(ScoreMode.Max);
|
||||
} else if ("min".equals(scoreMode)) {
|
||||
setScoreMode(ScoreMode.Min);
|
||||
} else if ("total".equals(scoreMode)) {
|
||||
setScoreMode(ScoreMode.Total);
|
||||
} else if ("multiply".equals(scoreMode)) {
|
||||
setScoreMode(ScoreMode.Multiply);
|
||||
} else {
|
||||
throw new IllegalArgumentException("illegal score_mode [" + scoreMode + "]");
|
||||
}
|
||||
setScoreMode(QueryRescoreMode.fromString(scoreMode));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,24 +19,36 @@
|
|||
|
||||
package org.elasticsearch.search.rescore;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
public class RescoreBuilder implements ToXContent {
|
||||
public class RescoreBuilder implements ToXContent, Writeable<RescoreBuilder> {
|
||||
|
||||
private Rescorer rescorer;
|
||||
private Integer windowSize;
|
||||
public static final RescoreBuilder PROTOYPE = new RescoreBuilder(new QueryRescorer(new MatchAllQueryBuilder()));
|
||||
|
||||
public static QueryRescorer queryRescorer(QueryBuilder queryBuilder) {
|
||||
return new QueryRescorer(queryBuilder);
|
||||
public RescoreBuilder(Rescorer rescorer) {
|
||||
if (rescorer == null) {
|
||||
throw new IllegalArgumentException("rescorer cannot be null");
|
||||
}
|
||||
this.rescorer = rescorer;
|
||||
}
|
||||
|
||||
public RescoreBuilder rescorer(Rescorer rescorer) {
|
||||
this.rescorer = rescorer;
|
||||
return this;
|
||||
public Rescorer rescorer() {
|
||||
return this.rescorer;
|
||||
}
|
||||
|
||||
public RescoreBuilder windowSize(int windowSize) {
|
||||
|
@ -48,10 +60,6 @@ public class RescoreBuilder implements ToXContent {
|
|||
return windowSize;
|
||||
}
|
||||
|
||||
public boolean isEmpty() {
|
||||
return rescorer == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (windowSize != null) {
|
||||
|
@ -61,13 +69,66 @@ public class RescoreBuilder implements ToXContent {
|
|||
return builder;
|
||||
}
|
||||
|
||||
public static abstract class Rescorer implements ToXContent {
|
||||
public static QueryRescorer queryRescorer(QueryBuilder<?> queryBuilder) {
|
||||
return new QueryRescorer(queryBuilder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
return Objects.hash(windowSize, rescorer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
RescoreBuilder other = (RescoreBuilder) obj;
|
||||
return Objects.equals(windowSize, other.windowSize) &&
|
||||
Objects.equals(rescorer, other.rescorer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RescoreBuilder readFrom(StreamInput in) throws IOException {
|
||||
RescoreBuilder builder = new RescoreBuilder(in.readRescorer());
|
||||
Integer windowSize = in.readOptionalVInt();
|
||||
if (windowSize != null) {
|
||||
builder.windowSize(windowSize);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeRescorer(rescorer);
|
||||
out.writeOptionalVInt(this.windowSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
builder.prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (Exception e) {
|
||||
return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}";
|
||||
}
|
||||
}
|
||||
|
||||
public static abstract class Rescorer implements ToXContent, NamedWriteable<Rescorer> {
|
||||
|
||||
private String name;
|
||||
|
||||
public Rescorer(String name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(name);
|
||||
|
@ -78,23 +139,41 @@ public class RescoreBuilder implements ToXContent {
|
|||
|
||||
protected abstract XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException;
|
||||
|
||||
@Override
|
||||
public abstract int hashCode();
|
||||
|
||||
@Override
|
||||
public abstract boolean equals(Object obj);
|
||||
}
|
||||
|
||||
public static class QueryRescorer extends Rescorer {
|
||||
|
||||
private static final String NAME = "query";
|
||||
private QueryBuilder queryBuilder;
|
||||
private Float rescoreQueryWeight;
|
||||
private Float queryWeight;
|
||||
private String scoreMode;
|
||||
public static final QueryRescorer PROTOTYPE = new QueryRescorer(new MatchAllQueryBuilder());
|
||||
public static final float DEFAULT_RESCORE_QUERYWEIGHT = 1.0f;
|
||||
public static final float DEFAULT_QUERYWEIGHT = 1.0f;
|
||||
public static final QueryRescoreMode DEFAULT_SCORE_MODE = QueryRescoreMode.Total;
|
||||
private final QueryBuilder<?> queryBuilder;
|
||||
private float rescoreQueryWeight = DEFAULT_RESCORE_QUERYWEIGHT;
|
||||
private float queryWeight = DEFAULT_QUERYWEIGHT;
|
||||
private QueryRescoreMode scoreMode = DEFAULT_SCORE_MODE;
|
||||
|
||||
/**
|
||||
* Creates a new {@link QueryRescorer} instance
|
||||
* @param builder the query builder to build the rescore query from
|
||||
*/
|
||||
public QueryRescorer(QueryBuilder builder) {
|
||||
public QueryRescorer(QueryBuilder<?> builder) {
|
||||
super(NAME);
|
||||
this.queryBuilder = builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the query used for this rescore query
|
||||
*/
|
||||
public QueryBuilder<?> getRescoreQuery() {
|
||||
return this.queryBuilder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the original query weight for rescoring. The default is <tt>1.0</tt>
|
||||
*/
|
||||
|
@ -103,6 +182,14 @@ public class RescoreBuilder implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Gets the original query weight for rescoring. The default is <tt>1.0</tt>
|
||||
*/
|
||||
public float getQueryWeight() {
|
||||
return this.queryWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the original query weight for rescoring. The default is <tt>1.0</tt>
|
||||
*/
|
||||
|
@ -112,27 +199,76 @@ public class RescoreBuilder implements ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the original query score mode. The default is <tt>total</tt>
|
||||
* Gets the original query weight for rescoring. The default is <tt>1.0</tt>
|
||||
*/
|
||||
public QueryRescorer setScoreMode(String scoreMode) {
|
||||
public float getRescoreQueryWeight() {
|
||||
return this.rescoreQueryWeight;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the original query score mode. The default is {@link QueryRescoreMode#Total}.
|
||||
*/
|
||||
public QueryRescorer setScoreMode(QueryRescoreMode scoreMode) {
|
||||
this.scoreMode = scoreMode;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the original query score mode. The default is <tt>total</tt>
|
||||
*/
|
||||
public QueryRescoreMode getScoreMode() {
|
||||
return this.scoreMode;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected XContentBuilder innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("rescore_query", queryBuilder);
|
||||
if (queryWeight != null) {
|
||||
builder.field("query_weight", queryWeight);
|
||||
}
|
||||
if (rescoreQueryWeight != null) {
|
||||
builder.field("rescore_query_weight", rescoreQueryWeight);
|
||||
}
|
||||
if (scoreMode != null) {
|
||||
builder.field("score_mode", scoreMode);
|
||||
}
|
||||
builder.field("query_weight", queryWeight);
|
||||
builder.field("rescore_query_weight", rescoreQueryWeight);
|
||||
builder.field("score_mode", scoreMode.name().toLowerCase(Locale.ROOT));
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int hashCode() {
|
||||
return Objects.hash(getClass(), scoreMode, queryWeight, rescoreQueryWeight, queryBuilder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
QueryRescorer other = (QueryRescorer) obj;
|
||||
return Objects.equals(scoreMode, other.scoreMode) &&
|
||||
Objects.equals(queryWeight, other.queryWeight) &&
|
||||
Objects.equals(rescoreQueryWeight, other.rescoreQueryWeight) &&
|
||||
Objects.equals(queryBuilder, other.queryBuilder);
|
||||
}
|
||||
|
||||
@Override
|
||||
public QueryRescorer readFrom(StreamInput in) throws IOException {
|
||||
QueryRescorer rescorer = new QueryRescorer(in.readQuery());
|
||||
rescorer.setScoreMode(QueryRescoreMode.PROTOTYPE.readFrom(in));
|
||||
rescorer.setRescoreQueryWeight(in.readFloat());
|
||||
rescorer.setQueryWeight(in.readFloat());
|
||||
return rescorer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeQuery(queryBuilder);
|
||||
scoreMode.writeTo(out);
|
||||
out.writeFloat(rescoreQueryWeight);
|
||||
out.writeFloat(queryWeight);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,4 +118,7 @@ grant {
|
|||
|
||||
// load averages on Linux
|
||||
permission java.io.FilePermission "/proc/loadavg", "read";
|
||||
|
||||
// load averages on FreeBSD
|
||||
permission java.io.FilePermission "/compat/linux/proc/loadavg", "read";
|
||||
};
|
||||
|
|
|
@ -200,6 +200,7 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932,https://github.com/elastic/elasticsearch/pull/15853" )
|
||||
public void testCreateAndDeleteIndexConcurrently() throws InterruptedException {
|
||||
createIndex("test");
|
||||
final AtomicInteger indexVersion = new AtomicInteger(0);
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.segments;
|
|||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.Before;
|
||||
|
|
|
@ -50,7 +50,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.mapper.string.StringFieldMapperPositionIncrementGapTests;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
|
||||
|
|
|
@ -164,7 +164,50 @@ public class DiskUsageTests extends ESTestCase {
|
|||
assertDiskUsage(mostNode_3, node3FSInfo[1]);
|
||||
}
|
||||
|
||||
public void testFillDiskUsageSomeInvalidValues() {
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newLeastAvailableUsages = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> newMostAvailableUsages = ImmutableOpenMap.builder();
|
||||
FsInfo.Path[] node1FSInfo = new FsInfo.Path[] {
|
||||
new FsInfo.Path("/middle", "/dev/sda", 100, 90, 80),
|
||||
new FsInfo.Path("/least", "/dev/sdb", -1, -1, -1),
|
||||
new FsInfo.Path("/most", "/dev/sdc", 300, 290, 280),
|
||||
};
|
||||
FsInfo.Path[] node2FSInfo = new FsInfo.Path[] {
|
||||
new FsInfo.Path("/least_most", "/dev/sda", -2, -1, -1),
|
||||
};
|
||||
|
||||
FsInfo.Path[] node3FSInfo = new FsInfo.Path[] {
|
||||
new FsInfo.Path("/most", "/dev/sda", 100, 90, 70),
|
||||
new FsInfo.Path("/least", "/dev/sda", 10, -8, 0),
|
||||
};
|
||||
NodeStats[] nodeStats = new NodeStats[] {
|
||||
new NodeStats(new DiscoveryNode("node_1", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
|
||||
null,null,null,null,null,new FsInfo(0, node1FSInfo), null,null,null,null,null),
|
||||
new NodeStats(new DiscoveryNode("node_2", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, node2FSInfo), null,null,null,null,null),
|
||||
new NodeStats(new DiscoveryNode("node_3", DummyTransportAddress.INSTANCE, Version.CURRENT), 0,
|
||||
null,null,null,null,null, new FsInfo(0, node3FSInfo), null,null,null,null,null)
|
||||
};
|
||||
InternalClusterInfoService.fillDiskUsagePerNode(logger, nodeStats, newLeastAvailableUsages, newMostAvailableUsages);
|
||||
DiskUsage leastNode_1 = newLeastAvailableUsages.get("node_1");
|
||||
DiskUsage mostNode_1 = newMostAvailableUsages.get("node_1");
|
||||
assertNull("node1 should have been skipped", leastNode_1);
|
||||
assertDiskUsage(mostNode_1, node1FSInfo[2]);
|
||||
|
||||
DiskUsage leastNode_2 = newLeastAvailableUsages.get("node_2");
|
||||
DiskUsage mostNode_2 = newMostAvailableUsages.get("node_2");
|
||||
assertNull("node2 should have been skipped", leastNode_2);
|
||||
assertNull("node2 should have been skipped", mostNode_2);
|
||||
|
||||
DiskUsage leastNode_3 = newLeastAvailableUsages.get("node_3");
|
||||
DiskUsage mostNode_3 = newMostAvailableUsages.get("node_3");
|
||||
assertDiskUsage(leastNode_3, node3FSInfo[1]);
|
||||
assertDiskUsage(mostNode_3, node3FSInfo[0]);
|
||||
}
|
||||
|
||||
private void assertDiskUsage(DiskUsage usage, FsInfo.Path path) {
|
||||
assertNotNull(usage);
|
||||
assertNotNull(path);
|
||||
assertEquals(usage.toString(), usage.getPath(), path.getPath());
|
||||
assertEquals(usage.toString(), usage.getTotalBytes(), path.getTotal().bytes());
|
||||
assertEquals(usage.toString(), usage.getFreeBytes(), path.getAvailable().bytes());
|
||||
|
|
|
@ -22,11 +22,14 @@ package org.elasticsearch.cluster.routing;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.disruption.NetworkDisconnectPartition;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -41,6 +44,12 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
@ESIntegTestCase.SuppressLocalMode
|
||||
public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
// disruption tests need MockTransportService
|
||||
return pluginList(MockTransportService.TestPlugin.class);
|
||||
}
|
||||
|
||||
public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception {
|
||||
logger.info("--> starting 3 nodes, 1 master, 2 data");
|
||||
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||
|
|
|
@ -188,6 +188,25 @@ public class ObjectParserTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
enum TestEnum {
|
||||
FOO, BAR
|
||||
};
|
||||
|
||||
public void testParseEnumFromString() throws IOException {
|
||||
class TestStruct {
|
||||
public TestEnum test;
|
||||
|
||||
public void set(TestEnum value) {
|
||||
test = value;
|
||||
}
|
||||
}
|
||||
XContentParser parser = XContentType.JSON.xContent().createParser("{ \"test\" : \"FOO\" }");
|
||||
ObjectParser<TestStruct, Void> objectParser = new ObjectParser("foo");
|
||||
objectParser.declareString((struct, value) -> struct.set(TestEnum.valueOf(value)), new ParseField("test"));
|
||||
TestStruct s = objectParser.parse(parser, new TestStruct());
|
||||
assertEquals(s.test, TestEnum.FOO);
|
||||
}
|
||||
|
||||
public void testAllVariants() throws IOException {
|
||||
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
|
||||
builder.startObject();
|
||||
|
|
|
@ -169,11 +169,13 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
|||
IndexService.BaseAsyncTask task = new IndexService.BaseAsyncTask(indexService, TimeValue.timeValueMillis(1)) {
|
||||
@Override
|
||||
protected void runInternal() {
|
||||
final CountDownLatch l1 = latch.get();
|
||||
final CountDownLatch l2 = latch2.get();
|
||||
count.incrementAndGet();
|
||||
assertTrue("generic threadpool is configured", Thread.currentThread().getName().contains("[generic]"));
|
||||
latch.get().countDown();
|
||||
l1.countDown();
|
||||
try {
|
||||
latch2.get().await();
|
||||
l2.await();
|
||||
} catch (InterruptedException e) {
|
||||
fail("interrupted");
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
|
|||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
|
||||
.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB))
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB))
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, dataPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.shard;
|
||||
package org.elasticsearch.index;
|
||||
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.TieredMergePolicy;
|
||||
|
@ -24,6 +24,8 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
|
@ -21,52 +21,44 @@ package org.elasticsearch.index.engine;
|
|||
import org.apache.lucene.index.LiveIndexWriterConfig;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.EngineAccess;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class InternalEngineSettingsTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testSettingsUpdate() {
|
||||
final IndexService service = createIndex("foo");
|
||||
// INDEX_COMPOUND_ON_FLUSH
|
||||
InternalEngine engine = ((InternalEngine) EngineAccess.engine(service.getShardOrNull(0)));
|
||||
assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(true));
|
||||
|
||||
|
||||
// VERSION MAP SIZE
|
||||
long indexBufferSize = engine.config().getIndexingBufferSize().bytes();
|
||||
|
||||
final int iters = between(1, 20);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
boolean compoundOnFlush = randomBoolean();
|
||||
|
||||
// Tricky: TimeValue.parseTimeValue casts this long to a double, which steals 11 of the 64 bits for exponent, so we can't use
|
||||
// the full long range here else the assert below fails:
|
||||
long gcDeletes = random().nextLong() & (Long.MAX_VALUE >> 11);
|
||||
|
||||
Settings build = Settings.builder()
|
||||
.put(EngineConfig.INDEX_GC_DELETES_SETTING, gcDeletes, TimeUnit.MILLISECONDS)
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING, gcDeletes, TimeUnit.MILLISECONDS)
|
||||
.build();
|
||||
assertEquals(gcDeletes, build.getAsTime(EngineConfig.INDEX_GC_DELETES_SETTING, null).millis());
|
||||
assertEquals(gcDeletes, build.getAsTime(IndexSettings.INDEX_GC_DELETES_SETTING, null).millis());
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("foo").setSettings(build).get();
|
||||
LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig();
|
||||
assertEquals(currentIndexWriterConfig.getUseCompoundFile(), true);
|
||||
|
||||
|
||||
assertEquals(engine.config().getGcDeletesInMillis(), gcDeletes);
|
||||
assertEquals(engine.config().getIndexSettings().getGcDeletesInMillis(), gcDeletes);
|
||||
assertEquals(engine.getGcDeletesInMillis(), gcDeletes);
|
||||
|
||||
indexBufferSize = engine.config().getIndexingBufferSize().bytes();
|
||||
}
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(EngineConfig.INDEX_GC_DELETES_SETTING, 1000, TimeUnit.MILLISECONDS)
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING, 1000, TimeUnit.MILLISECONDS)
|
||||
.build();
|
||||
client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
|
||||
assertEquals(engine.getGcDeletesInMillis(), 1000);
|
||||
|
@ -74,7 +66,7 @@ public class InternalEngineSettingsTests extends ESSingleNodeTestCase {
|
|||
|
||||
|
||||
settings = Settings.builder()
|
||||
.put(EngineConfig.INDEX_GC_DELETES_SETTING, "0ms")
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING, "0ms")
|
||||
.build();
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
|
||||
|
@ -82,7 +74,7 @@ public class InternalEngineSettingsTests extends ESSingleNodeTestCase {
|
|||
assertTrue(engine.config().isEnableGcDeletes());
|
||||
|
||||
settings = Settings.builder()
|
||||
.put(EngineConfig.INDEX_GC_DELETES_SETTING, 1000, TimeUnit.MILLISECONDS)
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING, 1000, TimeUnit.MILLISECONDS)
|
||||
.build();
|
||||
client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get();
|
||||
assertEquals(engine.getGcDeletesInMillis(), 1000);
|
||||
|
|
|
@ -61,8 +61,6 @@ import org.elasticsearch.common.logging.ESLogger;
|
|||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -85,7 +83,7 @@ import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
|
|||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.RootObjectMapper;
|
||||
import org.elasticsearch.index.shard.IndexSearcherWrapper;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
|
||||
|
@ -95,7 +93,6 @@ import org.elasticsearch.index.store.DirectoryUtils;
|
|||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.index.translog.TranslogConfig;
|
||||
import org.elasticsearch.indices.IndexingMemoryController;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.mapper.MapperRegistry;
|
||||
import org.elasticsearch.test.DummyShardLock;
|
||||
|
@ -168,7 +165,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
codecName = "default";
|
||||
}
|
||||
defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder()
|
||||
.put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
|
||||
.put(EngineConfig.INDEX_CODEC_SETTING, codecName)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.build()); // TODO randomize more settings
|
||||
|
@ -260,19 +257,19 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected InternalEngine createEngine(Store store, Path translogPath) {
|
||||
return createEngine(defaultSettings, store, translogPath, new MergeSchedulerConfig(defaultSettings), newMergePolicy());
|
||||
return createEngine(defaultSettings, store, translogPath, newMergePolicy());
|
||||
}
|
||||
|
||||
protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy) {
|
||||
return new InternalEngine(config(indexSettings, store, translogPath, mergeSchedulerConfig, mergePolicy), false);
|
||||
protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) {
|
||||
return new InternalEngine(config(indexSettings, store, translogPath, mergePolicy), false);
|
||||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy) {
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) {
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
|
||||
|
||||
EngineConfig config = new EngineConfig(shardId, threadPool, indexSettings
|
||||
, null, store, createSnapshotDeletionPolicy(), mergePolicy, mergeSchedulerConfig,
|
||||
, null, store, createSnapshotDeletionPolicy(), mergePolicy,
|
||||
iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), new Engine.EventListener() {
|
||||
@Override
|
||||
public void onFailedEngine(String reason, @Nullable Throwable t) {
|
||||
|
@ -293,7 +290,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testSegments() throws Exception {
|
||||
try (Store store = createStore();
|
||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings), NoMergePolicy.INSTANCE)) {
|
||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
|
||||
List<Segment> segments = engine.segments(false);
|
||||
assertThat(segments.isEmpty(), equalTo(true));
|
||||
assertThat(engine.segmentsStats().getCount(), equalTo(0l));
|
||||
|
@ -411,7 +408,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testVerboseSegments() throws Exception {
|
||||
try (Store store = createStore();
|
||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings), NoMergePolicy.INSTANCE)) {
|
||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
|
||||
List<Segment> segments = engine.segments(true);
|
||||
assertThat(segments.isEmpty(), equalTo(true));
|
||||
|
||||
|
@ -440,7 +437,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testSegmentsWithMergeFlag() throws Exception {
|
||||
try (Store store = createStore();
|
||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings), new TieredMergePolicy())) {
|
||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), new TieredMergePolicy())) {
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null);
|
||||
Engine.Index index = new Engine.Index(newUid("1"), doc);
|
||||
engine.index(index);
|
||||
|
@ -770,7 +767,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testSyncedFlush() throws IOException {
|
||||
try (Store store = createStore();
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings),
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
|
||||
new LogByteSizeMergePolicy()), false)) {
|
||||
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
|
||||
|
@ -797,7 +794,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
final int iters = randomIntBetween(2, 5); // run this a couple of times to get some coverage
|
||||
for (int i = 0; i < iters; i++) {
|
||||
try (Store store = createStore();
|
||||
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings),
|
||||
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
|
||||
new LogDocMergePolicy()), false)) {
|
||||
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocumentWithTextField(), B_1, null);
|
||||
|
@ -1027,7 +1024,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testForceMerge() throws IOException {
|
||||
try (Store store = createStore();
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings),
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
|
||||
new LogByteSizeMergePolicy()), false)) { // use log MP here we test some behavior in ESMP
|
||||
int numDocs = randomIntBetween(10, 100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
|
@ -1466,7 +1463,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testEnableGcDeletes() throws Exception {
|
||||
try (Store store = createStore();
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new MergeSchedulerConfig(defaultSettings), newMergePolicy()), false)) {
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy()), false)) {
|
||||
engine.config().setEnableGcDeletes(false);
|
||||
|
||||
// Add document
|
||||
|
@ -1605,7 +1602,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
IndexSettings indexSettings = new IndexSettings(defaultSettings.getIndexMetaData(),
|
||||
Settings.builder().put(defaultSettings.getSettings()).put(EngineConfig.INDEX_FORCE_NEW_TRANSLOG, true).build(),
|
||||
Collections.emptyList());
|
||||
engine = createEngine(indexSettings, store, primaryTranslogDir, new MergeSchedulerConfig(indexSettings), newMergePolicy());
|
||||
engine = createEngine(indexSettings, store, primaryTranslogDir, newMergePolicy());
|
||||
}
|
||||
|
||||
public void testTranslogReplayWithFailure() throws IOException {
|
||||
|
@ -1939,7 +1936,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
TranslogConfig translogConfig = new TranslogConfig(shardId, translog.location(), config.getIndexSettings(), BigArrays.NON_RECYCLING_INSTANCE);
|
||||
|
||||
EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexSettings()
|
||||
, null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getMergeSchedulerConfig(),
|
||||
, null, store, createSnapshotDeletionPolicy(), newMergePolicy(),
|
||||
config.getAnalyzer(), config.getSimilarity(), new CodecService(null, logger), config.getEventListener()
|
||||
, config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5));
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ import org.elasticsearch.index.mapper.ParseContext;
|
|||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.index.store.DirectoryService;
|
||||
|
@ -117,7 +117,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
codecName = "default";
|
||||
}
|
||||
defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder()
|
||||
.put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
|
||||
.put(IndexSettings.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us
|
||||
.put(EngineConfig.INDEX_CODEC_SETTING, codecName)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.build()); // TODO randomize more settings
|
||||
|
@ -209,7 +209,7 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected ShadowEngine createShadowEngine(IndexSettings indexSettings, Store store) {
|
||||
return new ShadowEngine(config(indexSettings, store, null, new MergeSchedulerConfig(indexSettings), null));
|
||||
return new ShadowEngine(config(indexSettings, store, null, null));
|
||||
}
|
||||
|
||||
protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath) {
|
||||
|
@ -217,14 +217,14 @@ public class ShadowEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected InternalEngine createInternalEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) {
|
||||
return new InternalEngine(config(indexSettings, store, translogPath, new MergeSchedulerConfig(indexSettings), mergePolicy), true);
|
||||
return new InternalEngine(config(indexSettings, store, translogPath, mergePolicy), true);
|
||||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy) {
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) {
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
|
||||
EngineConfig config = new EngineConfig(shardId, threadPool, indexSettings
|
||||
, null, store, createSnapshotDeletionPolicy(), mergePolicy, mergeSchedulerConfig,
|
||||
, null, store, createSnapshotDeletionPolicy(), mergePolicy,
|
||||
iwc.getAnalyzer(), iwc.getSimilarity() , new CodecService(null, logger), new Engine.EventListener() {
|
||||
@Override
|
||||
public void onFailedEngine(String reason, @Nullable Throwable t) {
|
||||
|
|
|
@ -82,6 +82,7 @@ import org.elasticsearch.index.mapper.ParseContext;
|
|||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.search.stats.SearchSlowLog;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
|
@ -127,28 +128,6 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
*/
|
||||
public class IndexShardTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testFlushOnDeleteSetting() throws Exception {
|
||||
boolean initValue = randomBoolean();
|
||||
createIndex("test", settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, initValue).build());
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService("test");
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
assertEquals(initValue, shard.isFlushOnClose());
|
||||
final boolean newValue = !initValue;
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, newValue).build()));
|
||||
assertEquals(newValue, shard.isFlushOnClose());
|
||||
|
||||
try {
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, "FOOBAR").build()));
|
||||
fail("exception expected");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
|
||||
}
|
||||
assertEquals(newValue, shard.isFlushOnClose());
|
||||
|
||||
}
|
||||
|
||||
public void testWriteShardState() throws Exception {
|
||||
try (NodeEnvironment env = newNodeEnvironment()) {
|
||||
ShardId id = new ShardId("foo", 1);
|
||||
|
@ -719,7 +698,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
IndexService test = indicesService.indexService("test");
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
assertFalse(shard.shouldFlush());
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
|
||||
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
|
||||
assertFalse(shard.shouldFlush());
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(), new BytesArray(new byte[]{1}), null);
|
||||
|
@ -735,7 +714,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
shard.getEngine().getTranslog().sync();
|
||||
long size = shard.getEngine().getTranslog().sizeInBytes();
|
||||
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(size, ByteSizeUnit.BYTES))
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(size, ByteSizeUnit.BYTES))
|
||||
.build()).get();
|
||||
client().prepareDelete("test", "test", "2").get();
|
||||
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
|
||||
|
@ -753,7 +732,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
IndexService test = indicesService.indexService("test");
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
assertFalse(shard.shouldFlush());
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
|
||||
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
|
||||
assertFalse(shard.shouldFlush());
|
||||
final AtomicBoolean running = new AtomicBoolean(true);
|
||||
|
@ -1064,7 +1043,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
ShardRouting routing = new ShardRouting(shard.routingEntry());
|
||||
shard.close("simon says", true);
|
||||
NodeServicesProvider indexServices = indexService.getIndexServices();
|
||||
IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexServices, listeners);
|
||||
IndexShard newShard = new IndexShard(shard.shardId(), indexService.getIndexSettings(), shard.shardPath(), shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, indexServices, indexService.getSearchSlowLog(), listeners);
|
||||
ShardRoutingHelper.reinit(routing);
|
||||
newShard.updateRoutingEntry(routing, false);
|
||||
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
|
||||
|
|
|
@ -49,13 +49,13 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -142,7 +142,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
|
||||
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
|
||||
.put(MockFSIndexStore.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
|
||||
.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put("indices.recovery.concurrent_streams", 10)
|
||||
));
|
||||
ensureGreen();
|
||||
|
@ -247,7 +247,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
|
||||
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
|
||||
.put(MockFSIndexStore.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
|
||||
.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put("indices.recovery.concurrent_streams", 10)
|
||||
));
|
||||
ensureGreen();
|
||||
|
@ -473,7 +473,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test
|
||||
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
|
||||
.put(MockFSIndexStore.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
|
||||
.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put("indices.recovery.concurrent_streams", 10)
|
||||
));
|
||||
ensureGreen();
|
||||
|
@ -528,7 +528,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, cluster().numDataNodes() - 1)
|
||||
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
|
||||
.put(MockFSIndexStore.CHECK_INDEX_ON_CLOSE, false) // no checkindex - we corrupt shards on purpose
|
||||
.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
|
||||
.put("indices.recovery.concurrent_streams", 10)
|
||||
));
|
||||
ensureGreen();
|
||||
|
|
|
@ -33,7 +33,8 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -66,9 +67,7 @@ import static org.hamcrest.Matchers.notNullValue;
|
|||
public class CorruptedTranslogIT extends ESIntegTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
// we really need local GW here since this also checks for corruption etc.
|
||||
// and we need to make sure primaries are not just trashed if we don't have replicas
|
||||
return pluginList(MockTransportService.TestPlugin.class);
|
||||
return pluginList(MockTransportService.TestPlugin.class, MockEngineFactoryPlugin.class);
|
||||
}
|
||||
|
||||
public void testCorruptTranslogFiles() throws Exception {
|
||||
|
@ -78,8 +77,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
|||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 0)
|
||||
.put("index.refresh_interval", "-1")
|
||||
.put(MockEngineSupport.FLUSH_ON_CLOSE_RATIO, 0.0d) // never flush - always recover from translog
|
||||
.put(IndexShard.INDEX_FLUSH_ON_CLOSE, false) // never flush - always recover from translog
|
||||
.put(MockEngineSupport.DISABLE_FLUSH_ON_CLOSE.getKey(), true) // never flush - always recover from translog
|
||||
));
|
||||
ensureYellow();
|
||||
|
||||
|
@ -103,7 +101,6 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
|||
client().prepareSearch("test").setQuery(matchAllQuery()).get();
|
||||
fail("all shards should be failed due to a corrupted translog");
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
e.printStackTrace();
|
||||
// Good, all shards should be failed because there is only a
|
||||
// single shard and its translog is corrupt
|
||||
}
|
||||
|
@ -170,13 +167,13 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
|||
|
||||
/** Disables translog flushing for the specified index */
|
||||
private static void disableTranslogFlush(String index) {
|
||||
Settings settings = Settings.builder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)).build();
|
||||
Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)).build();
|
||||
client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
|
||||
}
|
||||
|
||||
/** Enables translog flushing for the specified index */
|
||||
private static void enableTranslogFlush(String index) {
|
||||
Settings settings = Settings.builder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB)).build();
|
||||
Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB)).build();
|
||||
client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,8 +30,8 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
@ -153,7 +153,7 @@ public class FlushIT extends ESIntegTestCase {
|
|||
createIndex("test");
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(
|
||||
Settings.builder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)).put("index.refresh_interval", -1).put("index.number_of_replicas", internalCluster().numDataNodes() - 1))
|
||||
Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)).put("index.refresh_interval", -1).put("index.number_of_replicas", internalCluster().numDataNodes() - 1))
|
||||
.get();
|
||||
ensureGreen();
|
||||
final AtomicBoolean stop = new AtomicBoolean(false);
|
||||
|
|
|
@ -111,7 +111,7 @@ public class RandomExceptionCircuitBreakerIT extends ESIntegTestCase {
|
|||
.put(indexSettings())
|
||||
.put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
|
||||
.put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
|
||||
.put(MockEngineSupport.WRAP_READER_RATIO, 1.0d);
|
||||
.put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d);
|
||||
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(settings)
|
||||
|
|
|
@ -31,8 +31,8 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
|
|
@ -44,8 +44,8 @@ import org.elasticsearch.index.VersionType;
|
|||
import org.elasticsearch.index.cache.query.QueryCacheStats;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.store.IndexStore;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
|
|
|
@ -33,12 +33,14 @@ import org.elasticsearch.test.ESIntegTestCase;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.percolate.PercolateSourceBuilder.docBuilder;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.smileBuilder;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.yamlBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.boolQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.rangeQuery;
|
||||
import static org.elasticsearch.percolator.PercolatorTestUtil.convertFromTextArray;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatchCount;
|
||||
|
@ -363,6 +365,33 @@ public class MultiPercolatorIT extends ESIntegTestCase {
|
|||
assertEquals(response.getItems()[1].getResponse().getMatches()[0].getId().string(), "Q");
|
||||
}
|
||||
|
||||
public void testStartTimeIsPropagatedToShardRequests() throws Exception {
|
||||
// See: https://github.com/elastic/elasticsearch/issues/15908
|
||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(settingsBuilder()
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1)
|
||||
)
|
||||
.addMapping("type", "date_field", "type=date,format=strict_date_optional_time||epoch_millis")
|
||||
.get();
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test", ".percolator", "1")
|
||||
.setSource(jsonBuilder().startObject().field("query", rangeQuery("date_field").lt("now+90d")).endObject())
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
|
||||
for (int i = 0; i < 32; i++) {
|
||||
MultiPercolateResponse response = client().prepareMultiPercolate()
|
||||
.add(client().preparePercolate().setDocumentType("type").setIndices("test")
|
||||
.setPercolateDoc(new PercolateSourceBuilder.DocBuilder().setDoc("date_field", "2015-07-21T10:28:01-07:00")))
|
||||
.get();
|
||||
assertThat(response.getItems()[0].getResponse().getCount(), equalTo(1L));
|
||||
assertThat(response.getItems()[0].getResponse().getMatches()[0].getId().string(), equalTo("1"));
|
||||
}
|
||||
}
|
||||
|
||||
void initNestedIndexAndPercolation() throws IOException {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder();
|
||||
mapping.startObject().startObject("properties").startObject("companyname").field("type", "string").endObject()
|
||||
|
|
|
@ -66,14 +66,13 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
public class PercolateDocumentParserTests extends ESTestCase {
|
||||
|
||||
private Index index;
|
||||
private MapperService mapperService;
|
||||
private PercolateDocumentParser parser;
|
||||
private QueryShardContext queryShardContext;
|
||||
private PercolateShardRequest request;
|
||||
|
||||
@Before
|
||||
public void init() {
|
||||
index = new Index("_index");
|
||||
IndexSettings indexSettings = new IndexSettings(new IndexMetaData.Builder("_index").settings(
|
||||
Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
|
@ -97,6 +96,10 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
parser = new PercolateDocumentParser(
|
||||
highlightPhase, new SortParseElement(), aggregationPhase, mappingUpdatedAction
|
||||
);
|
||||
|
||||
request = Mockito.mock(PercolateShardRequest.class);
|
||||
Mockito.when(request.shardId()).thenReturn(new ShardId(new Index("_index"), 0));
|
||||
Mockito.when(request.documentType()).thenReturn("type");
|
||||
}
|
||||
|
||||
public void testParseDoc() throws Exception {
|
||||
|
@ -105,9 +108,7 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
.field("field1", "value1")
|
||||
.endObject()
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext);
|
||||
|
@ -126,9 +127,7 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
.field("size", 123)
|
||||
.startObject("sort").startObject("_score").endObject().endObject()
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext);
|
||||
|
@ -151,10 +150,8 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
XContentBuilder docSource = jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
request.docSource(docSource.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
Mockito.when(request.docSource()).thenReturn(docSource.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
ParsedDocument parsedDocument = parser.parse(request, context, mapperService, queryShardContext);
|
||||
|
@ -180,10 +177,8 @@ public class PercolateDocumentParserTests extends ESTestCase {
|
|||
XContentBuilder docSource = jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.endObject();
|
||||
PercolateShardRequest request = new PercolateShardRequest(new ShardId(index, 0), null);
|
||||
request.documentType("type");
|
||||
request.source(source.bytes());
|
||||
request.docSource(docSource.bytes());
|
||||
Mockito.when(request.source()).thenReturn(source.bytes());
|
||||
Mockito.when(request.docSource()).thenReturn(docSource.bytes());
|
||||
|
||||
PercolateContext context = new PercolateContext(request, new SearchShardTarget("_node", "_index", 0), mapperService);
|
||||
try {
|
||||
|
|
|
@ -92,7 +92,7 @@ public class SearchWithRandomExceptionsIT extends ESIntegTestCase {
|
|||
.put(indexSettings())
|
||||
.put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate)
|
||||
.put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate)
|
||||
.put(MockEngineSupport.WRAP_READER_RATIO, 1.0d);
|
||||
.put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d);
|
||||
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
|
||||
assertAcked(prepareCreate("test")
|
||||
.setSettings(settings)
|
||||
|
|
|
@ -31,13 +31,13 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.store.MockFSDirectoryService;
|
||||
import org.elasticsearch.test.store.MockFSIndexStore;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
|
@ -46,7 +46,11 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFa
|
|||
|
||||
public class SearchWithRandomIOExceptionsIT extends ESIntegTestCase {
|
||||
|
||||
@TestLogging("action.search.type:TRACE,index.shard:TRACE")
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(MockFSIndexStore.TestPlugin.class);
|
||||
}
|
||||
|
||||
public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
|
||||
String mapping = XContentFactory.jsonBuilder().
|
||||
startObject().
|
||||
|
|
|
@ -274,8 +274,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
int numRescores = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numRescores; i++) {
|
||||
// NORELEASE need a random rescore builder method
|
||||
RescoreBuilder rescoreBuilder = new RescoreBuilder();
|
||||
rescoreBuilder.rescorer(RescoreBuilder.queryRescorer(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20),
|
||||
RescoreBuilder rescoreBuilder = new RescoreBuilder(RescoreBuilder.queryRescorer(QueryBuilders.termQuery(randomAsciiOfLengthBetween(5, 20),
|
||||
randomAsciiOfLengthBetween(5, 20))));
|
||||
builder.addRescorer(rescoreBuilder);
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.index.query.QueryBuilders;
|
|||
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHits;
|
||||
import org.elasticsearch.search.rescore.QueryRescoreMode;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -541,7 +542,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
.setQueryWeight(0.5f).setRescoreQueryWeight(0.4f);
|
||||
|
||||
if (!"".equals(scoreModes[innerMode])) {
|
||||
innerRescoreQuery.setScoreMode(scoreModes[innerMode]);
|
||||
innerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[innerMode]));
|
||||
}
|
||||
|
||||
SearchResponse searchResponse = client()
|
||||
|
@ -564,7 +565,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
.boost(4.0f)).setQueryWeight(0.5f).setRescoreQueryWeight(0.4f);
|
||||
|
||||
if (!"".equals(scoreModes[outerMode])) {
|
||||
outerRescoreQuery.setScoreMode(scoreModes[outerMode]);
|
||||
outerRescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreModes[outerMode]));
|
||||
}
|
||||
|
||||
searchResponse = client()
|
||||
|
@ -612,7 +613,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
.setRescoreQueryWeight(secondaryWeight);
|
||||
|
||||
if (!"".equals(scoreMode)) {
|
||||
rescoreQuery.setScoreMode(scoreMode);
|
||||
rescoreQuery.setScoreMode(QueryRescoreMode.fromString(scoreMode));
|
||||
}
|
||||
|
||||
SearchResponse rescored = client()
|
||||
|
@ -683,11 +684,11 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
int numDocs = indexRandomNumbers("keyword", 1, true);
|
||||
QueryRescorer eightIsGreat = RescoreBuilder.queryRescorer(
|
||||
QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", English.intToEnglish(8)),
|
||||
ScoreFunctionBuilders.weightFactorFunction(1000.0f)).boostMode(CombineFunction.REPLACE)).setScoreMode("total");
|
||||
ScoreFunctionBuilders.weightFactorFunction(1000.0f)).boostMode(CombineFunction.REPLACE)).setScoreMode(QueryRescoreMode.Total);
|
||||
QueryRescorer sevenIsBetter = RescoreBuilder.queryRescorer(
|
||||
QueryBuilders.functionScoreQuery(QueryBuilders.termQuery("field1", English.intToEnglish(7)),
|
||||
ScoreFunctionBuilders.weightFactorFunction(10000.0f)).boostMode(CombineFunction.REPLACE))
|
||||
.setScoreMode("total");
|
||||
.setScoreMode(QueryRescoreMode.Total);
|
||||
|
||||
// First set the rescore window large enough that both rescores take effect
|
||||
SearchRequestBuilder request = client().prepareSearch();
|
||||
|
@ -704,10 +705,10 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
// Now use one rescore to drag the number we're looking for into the window of another
|
||||
QueryRescorer ninetyIsGood = RescoreBuilder.queryRescorer(
|
||||
QueryBuilders.functionScoreQuery(QueryBuilders.queryStringQuery("*ninety*"), ScoreFunctionBuilders.weightFactorFunction(1000.0f))
|
||||
.boostMode(CombineFunction.REPLACE)).setScoreMode("total");
|
||||
.boostMode(CombineFunction.REPLACE)).setScoreMode(QueryRescoreMode.Total);
|
||||
QueryRescorer oneToo = RescoreBuilder.queryRescorer(
|
||||
QueryBuilders.functionScoreQuery(QueryBuilders.queryStringQuery("*one*"), ScoreFunctionBuilders.weightFactorFunction(1000.0f))
|
||||
.boostMode(CombineFunction.REPLACE)).setScoreMode("total");
|
||||
.boostMode(CombineFunction.REPLACE)).setScoreMode(QueryRescoreMode.Total);
|
||||
request.clearRescorers().addRescorer(ninetyIsGood, numDocs).addRescorer(oneToo, 10);
|
||||
response = request.setSize(2).get();
|
||||
assertFirstHit(response, hasId("91"));
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.rescore;
|
||||
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder.Rescorer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class QueryRescoreBuilderTests extends ESTestCase {
|
||||
|
||||
private static final int NUMBER_OF_TESTBUILDERS = 20;
|
||||
private static NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
/**
|
||||
* setup for the whole base test class
|
||||
*/
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.registerPrototype(Rescorer.class, org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(QueryBuilder.class, new MatchAllQueryBuilder());
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
namedWriteableRegistry = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test serialization and deserialization of the rescore builder
|
||||
*/
|
||||
public void testSerialization() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
RescoreBuilder original = randomRescoreBuilder();
|
||||
RescoreBuilder deserialized = serializedCopy(original);
|
||||
assertEquals(deserialized, original);
|
||||
assertEquals(deserialized.hashCode(), original.hashCode());
|
||||
assertNotSame(deserialized, original);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test equality and hashCode properties
|
||||
*/
|
||||
public void testEqualsAndHashcode() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
RescoreBuilder firstBuilder = randomRescoreBuilder();
|
||||
assertFalse("rescore builder is equal to null", firstBuilder.equals(null));
|
||||
assertFalse("rescore builder is equal to incompatible type", firstBuilder.equals(""));
|
||||
assertTrue("rescore builder is not equal to self", firstBuilder.equals(firstBuilder));
|
||||
assertThat("same rescore builder's hashcode returns different values if called multiple times", firstBuilder.hashCode(),
|
||||
equalTo(firstBuilder.hashCode()));
|
||||
assertThat("different rescore builder should not be equal", mutate(firstBuilder), not(equalTo(firstBuilder)));
|
||||
|
||||
RescoreBuilder secondBuilder = serializedCopy(firstBuilder);
|
||||
assertTrue("rescore builder is not equal to self", secondBuilder.equals(secondBuilder));
|
||||
assertTrue("rescore builder is not equal to its copy", firstBuilder.equals(secondBuilder));
|
||||
assertTrue("equals is not symmetric", secondBuilder.equals(firstBuilder));
|
||||
assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(firstBuilder.hashCode()));
|
||||
|
||||
RescoreBuilder thirdBuilder = serializedCopy(secondBuilder);
|
||||
assertTrue("rescore builder is not equal to self", thirdBuilder.equals(thirdBuilder));
|
||||
assertTrue("rescore builder is not equal to its copy", secondBuilder.equals(thirdBuilder));
|
||||
assertThat("rescore builder copy's hashcode is different from original hashcode", secondBuilder.hashCode(), equalTo(thirdBuilder.hashCode()));
|
||||
assertTrue("equals is not transitive", firstBuilder.equals(thirdBuilder));
|
||||
assertThat("rescore builder copy's hashcode is different from original hashcode", firstBuilder.hashCode(), equalTo(thirdBuilder.hashCode()));
|
||||
assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder));
|
||||
assertTrue("equals is not symmetric", thirdBuilder.equals(firstBuilder));
|
||||
}
|
||||
}
|
||||
|
||||
private RescoreBuilder mutate(RescoreBuilder original) throws IOException {
|
||||
RescoreBuilder mutation = serializedCopy(original);
|
||||
if (randomBoolean()) {
|
||||
Integer windowSize = original.windowSize();
|
||||
if (windowSize != null) {
|
||||
mutation.windowSize(windowSize + 1);
|
||||
} else {
|
||||
mutation.windowSize(randomIntBetween(0, 100));
|
||||
}
|
||||
} else {
|
||||
QueryRescorer queryRescorer = (QueryRescorer) mutation.rescorer();
|
||||
switch (randomIntBetween(0, 3)) {
|
||||
case 0:
|
||||
queryRescorer.setQueryWeight(queryRescorer.getQueryWeight() + 0.1f);
|
||||
break;
|
||||
case 1:
|
||||
queryRescorer.setRescoreQueryWeight(queryRescorer.getRescoreQueryWeight() + 0.1f);
|
||||
break;
|
||||
case 2:
|
||||
QueryRescoreMode other;
|
||||
do {
|
||||
other = randomFrom(QueryRescoreMode.values());
|
||||
} while (other == queryRescorer.getScoreMode());
|
||||
queryRescorer.setScoreMode(other);
|
||||
break;
|
||||
case 3:
|
||||
// only increase the boost to make it a slightly different query
|
||||
queryRescorer.getRescoreQuery().boost(queryRescorer.getRescoreQuery().boost() + 0.1f);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unexpected random mutation in test");
|
||||
}
|
||||
}
|
||||
return mutation;
|
||||
}
|
||||
|
||||
/**
|
||||
* create random shape that is put under test
|
||||
*/
|
||||
private static RescoreBuilder randomRescoreBuilder() {
|
||||
QueryBuilder<MatchAllQueryBuilder> queryBuilder = new MatchAllQueryBuilder().boost(randomFloat()).queryName(randomAsciiOfLength(20));
|
||||
org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer rescorer = new
|
||||
org.elasticsearch.search.rescore.RescoreBuilder.QueryRescorer(queryBuilder);
|
||||
if (randomBoolean()) {
|
||||
rescorer.setQueryWeight(randomFloat());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
rescorer.setRescoreQueryWeight(randomFloat());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
rescorer.setScoreMode(randomFrom(QueryRescoreMode.values()));
|
||||
}
|
||||
RescoreBuilder builder = new RescoreBuilder(rescorer);
|
||||
if (randomBoolean()) {
|
||||
builder.windowSize(randomIntBetween(0, 100));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static RescoreBuilder serializedCopy(RescoreBuilder original) throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
original.writeTo(output);
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
|
||||
return RescoreBuilder.PROTOYPE.readFrom(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.rescore;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Test fixing the ordinals and names in {@link QueryRescoreMode}. These should not be changed since we
|
||||
* use the names in the parser and the ordinals in serialization.
|
||||
*/
|
||||
public class QueryRescoreModeTests extends ESTestCase {
|
||||
|
||||
/**
|
||||
* Test @link {@link QueryRescoreMode} enum ordinals and names, since serilaization relies on it
|
||||
*/
|
||||
public void testQueryRescoreMode() throws IOException {
|
||||
float primary = randomFloat();
|
||||
float secondary = randomFloat();
|
||||
assertEquals(0, QueryRescoreMode.Avg.ordinal());
|
||||
assertEquals("avg", QueryRescoreMode.Avg.toString());
|
||||
assertEquals((primary + secondary)/2.0f, QueryRescoreMode.Avg.combine(primary, secondary), Float.MIN_VALUE);
|
||||
|
||||
assertEquals(1, QueryRescoreMode.Max.ordinal());
|
||||
assertEquals("max", QueryRescoreMode.Max.toString());
|
||||
assertEquals(Math.max(primary, secondary), QueryRescoreMode.Max.combine(primary, secondary), Float.MIN_VALUE);
|
||||
|
||||
assertEquals(2, QueryRescoreMode.Min.ordinal());
|
||||
assertEquals("min", QueryRescoreMode.Min.toString());
|
||||
assertEquals(Math.min(primary, secondary), QueryRescoreMode.Min.combine(primary, secondary), Float.MIN_VALUE);
|
||||
|
||||
assertEquals(3, QueryRescoreMode.Total.ordinal());
|
||||
assertEquals("sum", QueryRescoreMode.Total.toString());
|
||||
assertEquals(primary + secondary, QueryRescoreMode.Total.combine(primary, secondary), Float.MIN_VALUE);
|
||||
|
||||
assertEquals(4, QueryRescoreMode.Multiply.ordinal());
|
||||
assertEquals("product", QueryRescoreMode.Multiply.toString());
|
||||
assertEquals(primary * secondary, QueryRescoreMode.Multiply.combine(primary, secondary), Float.MIN_VALUE);
|
||||
}
|
||||
}
|
|
@ -36,7 +36,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.DocumentMissingException;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
|
|
|
@ -259,3 +259,28 @@ the awareness allocation feature). In order to enable it, set `cloud.node.auto_a
|
|||
|
||||
If you are using any EC2 api compatible service, you can set the endpoint you want to use by setting
|
||||
`cloud.aws.ec2.endpoint` to your URL provider.
|
||||
|
||||
[[cloud-aws-best-practices]]
|
||||
==== Best Practices in AWS
|
||||
|
||||
Collection of best practices and other information around running Elasticsearch on AWS.
|
||||
|
||||
===== Instance/Disk
|
||||
When selecting disk please be aware of the following order of preference:
|
||||
|
||||
* https://aws.amazon.com/efs/[EFS] - Avoid as the sacrifices made to offer durability, shared storage, and grow/shrink come at performance cost, such file systems have been known to cause corruption of indices, and due to Elasticsearch being distributed and having built-in replication, the benefits that EFS offers are not needed.
|
||||
* https://aws.amazon.com/ebs/[EBS] - Works well if running a small cluster (1-2 nodes) and cannot tolerate the loss all storage backing a node easily or if running indices with no replicas. If EBS is used, then leverage provisioned IOPS to ensure performance.
|
||||
* http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html[Instance Store] - When running clusters of larger size and with replicas the ephemeral nature of Instance Store is ideal since Elasticsearch can tolerate the loss of shards. With Instance Store one gets the performance benefit of having disk physically attached to the host running the instance and also the cost benefit of avoiding paying extra for EBS.
|
||||
|
||||
|
||||
Prefer https://aws.amazon.com/amazon-linux-ami/[Amazon Linux AMIs] as since Elasticsearch runs on the JVM, OS dependencies are very minimal and one can benefit from the lightweight nature, support, and performance tweaks specific to EC2 that the Amazon Linux AMIs offer.
|
||||
|
||||
===== Networking
|
||||
* Networking throttling takes place on smaller instance types in both the form of https://lab.getbase.com/how-we-discovered-limitations-on-the-aws-tcp-stack/[bandwidth and number of connections]. Therefore if large number of connections are needed and networking is becoming a bottleneck, avoid https://aws.amazon.com/ec2/instance-types/[instance types] with networking labeled as `Moderate` or `Low`.
|
||||
* Multicast is not supported, even when in an VPC; the aws cloud plugin which joins by performing a security group lookup.
|
||||
* When running in multiple http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html[availability zones] be sure to leverage https://www.elastic.co/guide/en/elasticsearch/reference/master/allocation-awareness.html[shard allocation awareness] so that not all copies of shard data reside in the same availability zone.
|
||||
* Do not span a cluster across regions. If necessary, use a tribe node.
|
||||
|
||||
===== Misc
|
||||
* If you have split your nodes into roles, consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html[tagging the EC2 instances] by role to make it easier to filter and view your EC2 instances in the AWS console.
|
||||
* Consider https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/terminating-instances.html#Using_ChangingDisableAPITermination[enabling termination protection] for all of your instances to avoid accidentally terminating a node in the cluster and causing a potentially disruptive reallocation.
|
||||
|
|
|
@ -58,9 +58,9 @@ curl -XPUT 'localhost:9200/my_index' -d '
|
|||
[WARNING]
|
||||
========================
|
||||
In the above example, the "/opt/data/my_index" path is a shared filesystem that
|
||||
must be available on every node in the Elasticsearch cluster. You must also
|
||||
ensure that the Elasticsearch process has the correct permissions to read from
|
||||
and write to the directory used in the `index.data_path` setting.
|
||||
must be available on every data node in the Elasticsearch cluster. You must
|
||||
also ensure that the Elasticsearch process has the correct permissions to read
|
||||
from and write to the directory used in the `index.data_path` setting.
|
||||
========================
|
||||
|
||||
An index that has been created with the `index.shadow_replicas` setting set to
|
||||
|
|
|
@ -54,9 +54,11 @@ GET /_search
|
|||
{
|
||||
"script_fields": {
|
||||
"my_field": {
|
||||
"file": "my_script",
|
||||
"params": {
|
||||
"my_var": 2
|
||||
"script": {
|
||||
"file": "my_script",
|
||||
"params": {
|
||||
"my_var": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,13 +56,14 @@ tribe:
|
|||
metadata: true
|
||||
--------------------------------
|
||||
|
||||
The tribe node can also configure blocks on indices explicitly:
|
||||
The tribe node can also configure blocks on selected indices:
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------
|
||||
tribe:
|
||||
blocks:
|
||||
indices.write: hk*,ldn*
|
||||
write.indices: hk*,ldn*
|
||||
metadata.indices: hk*,ldn*
|
||||
--------------------------------
|
||||
|
||||
When there is a conflict and multiple clusters hold the same index, by default
|
||||
|
|
|
@ -70,3 +70,74 @@ final `_score` for each document.
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
==== Scoring with `bool.filter`
|
||||
|
||||
Queries specified under the `filter` element have no effect on scoring --
|
||||
scores are returned as `0`. Scores are only affected by the query that has
|
||||
been specified. For instance, all three of the following queries return
|
||||
all documents where the `status` field contains the term `active`.
|
||||
|
||||
This first query assigns a score of `0` to all documents, as no scoring
|
||||
query has been specified:
|
||||
|
||||
[source,json]
|
||||
---------------------------------
|
||||
GET _search
|
||||
{
|
||||
"query": {
|
||||
"bool": {
|
||||
"filter": {
|
||||
"term": {
|
||||
"status": "active"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
---------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
This `bool` query has a `match_all` query, which assigns a score of `1.0` to
|
||||
all documents.
|
||||
|
||||
[source,json]
|
||||
---------------------------------
|
||||
GET _search
|
||||
{
|
||||
"query": {
|
||||
"bool": {
|
||||
"query": {
|
||||
"match_all": {}
|
||||
},
|
||||
"filter": {
|
||||
"term": {
|
||||
"status": "active"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
---------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
This `constant_score` query behaves in exactly the same way as the second example above.
|
||||
The `constant_score` query assigns a score of `1.0` to all documents matched
|
||||
by the filter.
|
||||
|
||||
[source,json]
|
||||
---------------------------------
|
||||
GET _search
|
||||
{
|
||||
"query": {
|
||||
"constant_score": {
|
||||
"filter": {
|
||||
"term": {
|
||||
"status": "active"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
---------------------------------
|
||||
// AUTOSENSE
|
||||
|
|
|
@ -44,7 +44,7 @@ using the <<cluster-nodes-info>> API, with:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
curl localhost:9200/_nodes/process?pretty
|
||||
curl localhost:9200/_nodes/stats/process?pretty
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
|
|
|
@ -90,8 +90,8 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent<AzureSto
|
|||
logger.trace("selecting a client for account [{}], mode [{}]", account, mode.name());
|
||||
AzureStorageSettings azureStorageSettings = null;
|
||||
|
||||
if (this.primaryStorageSettings == null || this.secondariesStorageSettings.isEmpty()) {
|
||||
throw new IllegalArgumentException("No azure storage can be found. Check your elasticsearch.yml.");
|
||||
if (this.primaryStorageSettings == null) {
|
||||
throw new IllegalArgumentException("No primary azure storage can be found. Check your elasticsearch.yml.");
|
||||
}
|
||||
|
||||
if (account != null) {
|
||||
|
|
|
@ -47,10 +47,20 @@ public class AzureStorageServiceTest extends ESTestCase {
|
|||
azureStorageService.getSelectedClient("whatever", LocationMode.PRIMARY_ONLY);
|
||||
fail("we should have raised an IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), is("No azure storage can be found. Check your elasticsearch.yml."));
|
||||
assertThat(e.getMessage(), is("No primary azure storage can be found. Check your elasticsearch.yml."));
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetSelectedClientWithNoSecondary() {
|
||||
AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(Settings.builder()
|
||||
.put("cloud.azure.storage.azure1.account", "myaccount1")
|
||||
.put("cloud.azure.storage.azure1.key", "mykey1")
|
||||
.build());
|
||||
azureStorageService.doStart();
|
||||
CloudBlobClient client = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client.getEndpoint(), is(URI.create("https://azure1")));
|
||||
}
|
||||
|
||||
public void testGetSelectedClientPrimary() {
|
||||
AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(settings);
|
||||
azureStorageService.doStart();
|
||||
|
|
|
@ -188,3 +188,31 @@
|
|||
index \s+ alias \n
|
||||
test \s+ test_1 \n
|
||||
$/
|
||||
|
||||
---
|
||||
"Alias against closed index":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
aliases:
|
||||
test_alias: {}
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
cat.aliases: {}
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^
|
||||
test_alias \s+
|
||||
test_index \s+
|
||||
- \s+
|
||||
- \s+
|
||||
- \s+
|
||||
$/
|
||||
|
||||
|
|
|
@ -205,3 +205,25 @@ setup:
|
|||
- is_true: test_index
|
||||
|
||||
- is_true: test_index_2
|
||||
|
||||
---
|
||||
"Get alias against closed indices":
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_index_2
|
||||
|
||||
- do:
|
||||
indices.get_alias:
|
||||
name: test_alias
|
||||
|
||||
- is_true: test_index
|
||||
- is_true: test_index_2
|
||||
|
||||
- do:
|
||||
indices.get_alias:
|
||||
name: test_alias
|
||||
expand_wildcards: open
|
||||
|
||||
- is_true: test_index
|
||||
- is_false: test_index_2
|
||||
|
|
|
@ -101,9 +101,8 @@ import org.elasticsearch.index.mapper.DocumentMapper;
|
|||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType.Loading;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.MergeSchedulerConfig;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
|
@ -511,10 +510,10 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
|
||||
private static Settings.Builder setRandomIndexTranslogSettings(Random random, Settings.Builder builder) {
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 300), ByteSizeUnit.MB));
|
||||
builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 300), ByteSizeUnit.MB));
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)); // just don't flush
|
||||
builder.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(1, ByteSizeUnit.PB)); // just don't flush
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(IndexSettings.INDEX_TRANSLOG_DURABILITY, RandomPicks.randomFrom(random, Translog.Durability.values()));
|
||||
|
@ -1800,14 +1799,30 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins);
|
||||
}
|
||||
|
||||
/** Return the mock plugins the cluster should use. These may be randomly omitted based on the cluster seed. */
|
||||
/** Return the mock plugins the cluster should use */
|
||||
protected Collection<Class<? extends Plugin>> getMockPlugins() {
|
||||
return pluginList(MockTransportService.TestPlugin.class,
|
||||
MockFSIndexStore.TestPlugin.class,
|
||||
NodeMocksPlugin.class,
|
||||
MockEngineFactoryPlugin.class,
|
||||
MockSearchService.TestPlugin.class,
|
||||
AssertingLocalTransport.TestPlugin.class);
|
||||
final ArrayList<Class<? extends Plugin>> mocks = new ArrayList<>();
|
||||
if (randomBoolean()) { // sometimes run without those completely
|
||||
if (randomBoolean()) {
|
||||
mocks.add(MockTransportService.TestPlugin.class);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
mocks.add(MockFSIndexStore.TestPlugin.class);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
mocks.add(NodeMocksPlugin.class);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
mocks.add(MockEngineFactoryPlugin.class);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
mocks.add(MockSearchService.TestPlugin.class);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
mocks.add(AssertingLocalTransport.TestPlugin.class);
|
||||
}
|
||||
}
|
||||
return Collections.unmodifiableList(mocks);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -367,7 +367,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
private Collection<Class<? extends Plugin>> getPlugins(long seed) {
|
||||
private Collection<Class<? extends Plugin>> getPlugins() {
|
||||
Set<Class<? extends Plugin>> plugins = new HashSet<>(nodeConfigurationSource.nodePlugins());
|
||||
plugins.addAll(mockPlugins);
|
||||
if (isLocalTransportConfigured() == false) {
|
||||
|
@ -589,7 +589,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
assert Thread.holdsLock(this);
|
||||
ensureOpen();
|
||||
settings = getSettings(nodeId, seed, settings);
|
||||
Collection<Class<? extends Plugin>> plugins = getPlugins(seed);
|
||||
Collection<Class<? extends Plugin>> plugins = getPlugins();
|
||||
String name = buildNodeName(nodeId);
|
||||
assert !nodes.containsKey(name);
|
||||
Settings finalSettings = settingsBuilder()
|
||||
|
|
|
@ -51,18 +51,6 @@ public abstract class NodeConfigurationSource {
|
|||
*/
|
||||
public abstract Settings nodeSettings(int nodeOrdinal);
|
||||
|
||||
/** Plugins that will be randomly added to the node */
|
||||
public Collection<Class<? extends Plugin>> mockPlugins() {
|
||||
List<Class<? extends Plugin>> plugins = new ArrayList<>();
|
||||
plugins.add(MockTransportService.TestPlugin.class);
|
||||
plugins.add(MockFSIndexStore.TestPlugin.class);
|
||||
plugins.add(NodeMocksPlugin.class);
|
||||
plugins.add(MockEngineFactoryPlugin.class);
|
||||
plugins.add(MockSearchService.TestPlugin.class);
|
||||
plugins.add(AssertingLocalTransport.TestPlugin.class);
|
||||
return plugins;
|
||||
}
|
||||
|
||||
/** Returns plugins that should be loaded on the node */
|
||||
public Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.emptyList();
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
|
@ -50,9 +51,16 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
*/
|
||||
public final class MockEngineSupport {
|
||||
|
||||
public static final String WRAP_READER_RATIO = "index.engine.mock.random.wrap_reader_ratio";
|
||||
public static final String READER_WRAPPER_TYPE = "index.engine.mock.random.wrapper";
|
||||
public static final String FLUSH_ON_CLOSE_RATIO = "index.engine.mock.flush_on_close.ratio";
|
||||
/**
|
||||
* Allows tests to wrap an index reader randomly with a given ratio. This is disabled by default ie. <tt>0.0d</tt> since reader wrapping is insanely
|
||||
* slow if {@link org.apache.lucene.index.AssertingDirectoryReader} is used.
|
||||
*/
|
||||
public static final Setting<Double> WRAP_READER_RATIO = Setting.doubleSetting("index.engine.mock.random.wrap_reader_ratio", 0.0d, 0.0d, false, Setting.Scope.INDEX);
|
||||
/**
|
||||
* Allows tests to prevent an engine from being flushed on close ie. to test translog recovery...
|
||||
*/
|
||||
public static final Setting<Boolean> DISABLE_FLUSH_ON_CLOSE = Setting.boolSetting("index.mock.disable_flush_on_close", false, false, Setting.Scope.INDEX);
|
||||
|
||||
|
||||
private final AtomicBoolean closing = new AtomicBoolean(false);
|
||||
private final ESLogger logger = Loggers.getLogger(Engine.class);
|
||||
|
@ -61,20 +69,24 @@ public final class MockEngineSupport {
|
|||
private final QueryCachingPolicy filterCachingPolicy;
|
||||
private final SearcherCloseable searcherCloseable;
|
||||
private final MockContext mockContext;
|
||||
private final boolean disableFlushOnClose;
|
||||
|
||||
public boolean isFlushOnCloseDisabled() {
|
||||
return disableFlushOnClose;
|
||||
}
|
||||
|
||||
|
||||
public static class MockContext {
|
||||
private final Random random;
|
||||
private final boolean wrapReader;
|
||||
private final Class<? extends FilterDirectoryReader> wrapper;
|
||||
private final Settings indexSettings;
|
||||
private final double flushOnClose;
|
||||
|
||||
public MockContext(Random random, boolean wrapReader, Class<? extends FilterDirectoryReader> wrapper, Settings indexSettings) {
|
||||
this.random = random;
|
||||
this.wrapReader = wrapReader;
|
||||
this.wrapper = wrapper;
|
||||
this.indexSettings = indexSettings;
|
||||
flushOnClose = indexSettings.getAsDouble(FLUSH_ON_CLOSE_RATIO, 0.5d);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,7 +97,7 @@ public final class MockEngineSupport {
|
|||
filterCachingPolicy = config.getQueryCachingPolicy();
|
||||
final long seed = settings.getAsLong(ESIntegTestCase.SETTING_INDEX_SEED, 0l);
|
||||
Random random = new Random(seed);
|
||||
final double ratio = settings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow
|
||||
final double ratio = WRAP_READER_RATIO.get(settings);
|
||||
boolean wrapReader = random.nextDouble() < ratio;
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader);
|
||||
|
@ -93,6 +105,7 @@ public final class MockEngineSupport {
|
|||
mockContext = new MockContext(random, wrapReader, wrapper, settings);
|
||||
this.searcherCloseable = new SearcherCloseable();
|
||||
LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine
|
||||
this.disableFlushOnClose = DISABLE_FLUSH_ON_CLOSE.get(settings);
|
||||
}
|
||||
|
||||
enum CloseAction {
|
||||
|
@ -105,9 +118,9 @@ public final class MockEngineSupport {
|
|||
* Returns the CloseAction to execute on the actual engine. Note this method changes the state on
|
||||
* the first call and treats subsequent calls as if the engine passed is already closed.
|
||||
*/
|
||||
public CloseAction flushOrClose(Engine engine, CloseAction originalAction) throws IOException {
|
||||
public CloseAction flushOrClose(CloseAction originalAction) throws IOException {
|
||||
if (closing.compareAndSet(false, true)) { // only do the random thing if we are the first call to this since super.flushOnClose() calls #close() again and then we might end up with a stackoverflow.
|
||||
if (mockContext.flushOnClose > mockContext.random.nextDouble()) {
|
||||
if (mockContext.random.nextBoolean()) {
|
||||
return CloseAction.FLUSH_AND_CLOSE;
|
||||
} else {
|
||||
return CloseAction.CLOSE;
|
||||
|
|
|
@ -49,9 +49,9 @@ final class MockInternalEngine extends InternalEngine {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
switch (support().flushOrClose(this, MockEngineSupport.CloseAction.CLOSE)) {
|
||||
switch (support().flushOrClose(MockEngineSupport.CloseAction.CLOSE)) {
|
||||
case FLUSH_AND_CLOSE:
|
||||
super.flushAndClose();
|
||||
flushAndCloseInternal();
|
||||
break;
|
||||
case CLOSE:
|
||||
super.close();
|
||||
|
@ -62,16 +62,24 @@ final class MockInternalEngine extends InternalEngine {
|
|||
@Override
|
||||
public void flushAndClose() throws IOException {
|
||||
if (randomizeFlushOnClose) {
|
||||
switch (support().flushOrClose(this, MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) {
|
||||
switch (support().flushOrClose(MockEngineSupport.CloseAction.FLUSH_AND_CLOSE)) {
|
||||
case FLUSH_AND_CLOSE:
|
||||
super.flushAndClose();
|
||||
flushAndCloseInternal();
|
||||
break;
|
||||
case CLOSE:
|
||||
super.close();
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
flushAndCloseInternal();
|
||||
}
|
||||
}
|
||||
|
||||
private void flushAndCloseInternal() throws IOException {
|
||||
if (support().isFlushOnCloseDisabled() == false) {
|
||||
super.flushAndClose();
|
||||
} else {
|
||||
super.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue