Remove remaining line length violations for o.e.index (#35652)

This commit removes line length violations in the classes under
org.elasticsearch.index.
This commit is contained in:
Jim Ferenczi 2018-11-20 08:09:14 +01:00 committed by GitHub
parent 29ef442841
commit a5f5ceb869
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
61 changed files with 1520 additions and 837 deletions

View File

@ -45,38 +45,6 @@
<!-- Hopefully temporary suppression of LineLength on files that don't pass it. We should remove these when we the
files start to pass. -->
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]CompositeIndexEventListener.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexSettings.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicyConfig.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLog.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisRegistry.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]CustomAnalyzerProvider.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]ShingleTokenFilterFactory.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]cache[/\\]bitset[/\\]BitsetFilterCache.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]codec[/\\]PerFieldMappingPostingFormatCodec.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ElasticsearchConcurrentMergeScheduler.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]Engine.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngine.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]LiveVersionMap.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]get[/\\]ShardGetService.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]merge[/\\]MergeStats.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryValidationException.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]MultiMatchQuery.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexEventListener.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexSearcherWrapper.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexShard.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexingStats.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]ShardPath.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]ShardStateMetaData.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]StoreRecovery.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]Store.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]termvectors[/\\]TermVectorsService.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]translog[/\\]BaseTranslogReader.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]translog[/\\]Translog.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]translog[/\\]TranslogReader.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]translog[/\\]TranslogSnapshot.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]translog[/\\]TranslogWriter.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]GcNames.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]HotThreads.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
@ -133,34 +101,6 @@
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]NodeEnvironmentTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]explain[/\\]ExplainActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexingSlowLogTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicySettingsTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]SearchSlowLogTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PreBuiltAnalyzerTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngineMergeIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngineTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoolQueryBuilderTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]BoostingQueryBuilderTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]GeoDistanceQueryBuilderTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MoreLikeThisQueryBuilderTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MultiMatchQueryBuilderTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]SpanNotQueryBuilderTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]functionscore[/\\]FunctionScoreTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]geo[/\\]GeoUtilsTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]nested[/\\]AbstractNumberNestedSortingTestCase.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]nested[/\\]DoubleNestedSortingTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]nested[/\\]FloatNestedSortingTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]nested[/\\]LongNestedSortingTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]search[/\\]nested[/\\]NestedSortingTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexShardIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexShardTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]ShardPathTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]similarity[/\\]SimilarityTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedFileIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]CorruptedTranslogIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]StoreTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]translog[/\\]TranslogTests.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indexing[/\\]IndexActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indexlifecycle[/\\]IndexLifecycleActionIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]jvm[/\\]JvmGcMonitorServiceSettingsTests.java" checks="LineLength" />
@ -224,5 +164,4 @@
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]validate[/\\]SimpleValidateQueryIT.java" checks="LineLength" />
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]versioning[/\\]SimpleVersioningIT.java" checks="LineLength" />
<suppress files="modules[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ContextExampleTests.java" checks="LineLength" />
<suppress files="modules[/\\]reindex[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]reindex[/\\]TransportUpdateByQueryAction.java" checks="LineLength" />
</suppressions>

View File

@ -90,8 +90,8 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
@Override
protected boolean needsSourceDocumentVersions() {
/*
* We always need the version of the source document so we can report a version conflict if we try to delete it and it has been
* changed.
* We always need the version of the source document so we can report a version conflict if we try to delete it and it has
* been changed.
*/
return true;
}

View File

@ -60,7 +60,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.shardRoutingChanged(indexShard, oldRouting, newRouting);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback",
indexShard.shardId().getId()), e);
}
}
}
@ -71,7 +72,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardCreated(indexShard);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback",
indexShard.shardId().getId()), e);
throw e;
}
}
@ -83,7 +85,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardStarted(indexShard);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback",
indexShard.shardId().getId()), e);
throw e;
}
}
@ -96,7 +99,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.beforeIndexShardClosed(shardId, indexShard, indexSettings);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback",
shardId.getId()), e);
throw e;
}
}
@ -109,7 +113,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardClosed(shardId, indexShard, indexSettings);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback",
shardId.getId()), e);
throw e;
}
}
@ -121,19 +126,22 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.onShardInactive(indexShard);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback",
indexShard.shardId().getId()), e);
throw e;
}
}
}
@Override
public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {
public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState,
@Nullable String reason) {
for (IndexEventListener listener : listeners) {
try {
listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback",
indexShard.shardId().getId()), e);
throw e;
}
}
@ -169,7 +177,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.beforeIndexShardCreated(shardId, indexSettings);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e);
logger.warn(() ->
new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e);
throw e;
}
}
@ -206,7 +215,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.beforeIndexShardDeleted(shardId, indexSettings);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback",
shardId.getId()), e);
throw e;
}
}
@ -219,7 +229,8 @@ final class CompositeIndexEventListener implements IndexEventListener {
try {
listener.afterIndexShardDeleted(shardId, indexSettings);
} catch (Exception e) {
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e);
logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback",
shardId.getId()), e);
throw e;
}
}

View File

@ -71,16 +71,18 @@ public final class IndexSettings {
(value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope);
public static final Setting<Boolean> INDEX_WARMER_ENABLED_SETTING =
Setting.boolSetting("index.warmer.enabled", true, Property.Dynamic, Property.IndexScope);
public static final Setting<String> INDEX_CHECK_ON_STARTUP = new Setting<>("index.shard.check_on_startup", "false", (s) -> {
switch(s) {
case "false":
case "true":
case "checksum":
return s;
default:
throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, checksum] but was: " + s);
}
}, Property.IndexScope);
public static final Setting<String> INDEX_CHECK_ON_STARTUP =
new Setting<>("index.shard.check_on_startup", "false", (s) -> {
switch (s) {
case "false":
case "true":
case "checksum":
return s;
default:
throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of " +
"[true, false, checksum] but was: " + s);
}
}, Property.IndexScope);
/**
* Index setting describing the maximum value of from + size on a query.
@ -125,7 +127,8 @@ public final class IndexSettings {
* indexing with offsets or term vectors is recommended.
*/
public static final Setting<Integer> MAX_ANALYZED_OFFSET_SETTING =
Setting.intSetting("index.highlight.max_analyzed_offset", 1000000, 1, Property.Dynamic, Property.IndexScope);
Setting.intSetting("index.highlight.max_analyzed_offset", 1000000, 1,
Property.Dynamic, Property.IndexScope);
/**
@ -168,7 +171,8 @@ public final class IndexSettings {
* because they both do the same thing: control the size of the heap of hits.
*/
public static final Setting<Integer> MAX_RESCORE_WINDOW_SETTING =
Setting.intSetting("index.max_rescore_window", MAX_RESULT_WINDOW_SETTING, 1, Property.Dynamic, Property.IndexScope);
Setting.intSetting("index.max_rescore_window", MAX_RESULT_WINDOW_SETTING, 1,
Property.Dynamic, Property.IndexScope);
/**
* Index setting describing the maximum number of filters clauses that can be used
* in an adjacency_matrix aggregation. The max number of buckets produced by
@ -197,8 +201,8 @@ public final class IndexSettings {
* the chance of ops based recoveries.
**/
public static final Setting<TimeValue> INDEX_TRANSLOG_RETENTION_AGE_SETTING =
Setting.timeSetting("index.translog.retention.age", TimeValue.timeValueHours(12), TimeValue.timeValueMillis(-1), Property.Dynamic,
Property.IndexScope);
Setting.timeSetting("index.translog.retention.age", TimeValue.timeValueHours(12), TimeValue.timeValueMillis(-1),
Property.Dynamic, Property.IndexScope);
/**
* Controls how many translog files that are no longer needed for persistence reasons
@ -234,8 +238,8 @@ public final class IndexSettings {
*/
public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60);
public static final Setting<TimeValue> INDEX_GC_DELETES_SETTING =
Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic,
Property.IndexScope);
Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS),
Property.Dynamic, Property.IndexScope);
/**
* Specifies if the index should use soft-delete instead of hard-delete for update/delete operations.
@ -249,13 +253,14 @@ public final class IndexSettings {
* If soft-deletes is enabled, an engine by default will retain all operations up to the global checkpoint.
**/
public static final Setting<Long> INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING =
Setting.longSetting("index.soft_deletes.retention.operations", 0, 0, Property.IndexScope, Property.Dynamic);
Setting.longSetting("index.soft_deletes.retention.operations", 0, 0,
Property.IndexScope, Property.Dynamic);
/**
* The maximum number of refresh listeners allows on this shard.
*/
public static final Setting<Integer> MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0,
Property.Dynamic, Property.IndexScope);
public static final Setting<Integer> MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners",
1000, 0, Property.Dynamic, Property.IndexScope);
/**
* The maximum number of slices allowed in a scroll request
@ -447,16 +452,23 @@ public final class IndexSettings {
defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, mergePolicyConfig::setDeletesPctAllowed);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, mergePolicyConfig::setFloorSegmentSetting);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, mergePolicyConfig::setMaxMergesAtOnce);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, mergePolicyConfig::setMaxMergesAtOnceExplicit);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, mergePolicyConfig::setMaxMergedSegment);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, mergePolicyConfig::setSegmentsPerTier);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING,
mergePolicyConfig::setDeletesPctAllowed);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING,
mergePolicyConfig::setExpungeDeletesAllowed);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING,
mergePolicyConfig::setFloorSegmentSetting);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING,
mergePolicyConfig::setMaxMergesAtOnce);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING,
mergePolicyConfig::setMaxMergesAtOnceExplicit);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING,
mergePolicyConfig::setMaxMergedSegment);
scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING,
mergePolicyConfig::setSegmentsPerTier);
scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING,
mergeSchedulerConfig::setMaxThreadAndMergeCount);
scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING,
MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, mergeSchedulerConfig::setMaxThreadAndMergeCount);
scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.AUTO_THROTTLE_SETTING, mergeSchedulerConfig::setAutoThrottle);
scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability);
scopedSettings.addSettingsUpdateConsumer(MAX_RESULT_WINDOW_SETTING, this::setMaxResultWindow);
@ -590,14 +602,16 @@ public final class IndexSettings {
}
/**
* Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one setting has changed.
* Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one
* setting has changed.
*
* @return <code>true</code> iff any setting has been updated otherwise <code>false</code>.
*/
public synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) {
final Settings newSettings = indexMetaData.getSettings();
if (version.equals(Version.indexCreated(newSettings)) == false) {
throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " + Version.indexCreated(newSettings));
throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " +
Version.indexCreated(newSettings));
}
final String newUUID = newSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
if (newUUID.equals(getUUID()) == false) {
@ -674,7 +688,8 @@ public final class IndexSettings {
public ByteSizeValue getTranslogRetentionSize() { return translogRetentionSize; }
/**
* Returns the transaction log retention age which controls the maximum age (time from creation) that translog files will be kept around
* Returns the transaction log retention age which controls the maximum age (time from creation) that translog files will be kept
* around
*/
public TimeValue getTranslogRetentionAge() { return translogRetentionAge; }

View File

@ -129,8 +129,8 @@ public final class MergePolicyConfig {
public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d;
public static final double DEFAULT_DELETES_PCT_ALLOWED = 33.0d;
public static final Setting<Double> INDEX_COMPOUND_FORMAT_SETTING =
new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio,
Property.Dynamic, Property.IndexScope);
new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO),
MergePolicyConfig::parseNoCFSRatio, Property.Dynamic, Property.IndexScope);
public static final Setting<Double> INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING =
Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d,
@ -156,8 +156,8 @@ public final class MergePolicyConfig {
public static final Setting<Double> INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING =
Setting.doubleSetting("index.merge.policy.deletes_pct_allowed", DEFAULT_DELETES_PCT_ALLOWED, 20.0d, 50.0d,
Property.Dynamic, Property.IndexScope);
public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin
// don't convert to Setting<> and register... we only set this in tests and register via a plugin
public static final String INDEX_MERGE_ENABLED = "index.merge.enabled";
MergePolicyConfig(Logger logger, IndexSettings indexSettings) {
this.logger = logger;
@ -165,14 +165,16 @@ public final class MergePolicyConfig {
ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING);
int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING);
int maxMergeAtOnceExplicit = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING);
// TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments?
// TODO is this really a good default number for max_merge_segment, what happens for large indices,
// won't they end up with many segments?
ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING);
double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING);
double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING);
double deletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING);
this.mergesEnabled = indexSettings.getSettings().getAsBoolean(INDEX_MERGE_ENABLED, true);
if (mergesEnabled == false) {
logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED);
logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production" +
" environments", INDEX_MERGE_ENABLED);
}
maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier);
mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING));
@ -184,8 +186,11 @@ public final class MergePolicyConfig {
mergePolicy.setSegmentsPerTier(segmentsPerTier);
mergePolicy.setDeletesPctAllowed(deletesPctAllowed);
if (logger.isTraceEnabled()) {
logger.trace("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], deletes_pct_allowed[{}]",
forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, deletesPctAllowed);
logger.trace("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," +
" max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}]," +
" deletes_pct_allowed[{}]",
forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier,
deletesPctAllowed);
}
}
@ -229,7 +234,9 @@ public final class MergePolicyConfig {
if (newMaxMergeAtOnce <= 1) {
newMaxMergeAtOnce = 2;
}
logger.debug("changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or equal to it", maxMergeAtOnce, newMaxMergeAtOnce, segmentsPerTier);
logger.debug("changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or " +
"equal to it",
maxMergeAtOnce, newMaxMergeAtOnce, segmentsPerTier);
maxMergeAtOnce = newMaxMergeAtOnce;
}
return maxMergeAtOnce;
@ -253,7 +260,8 @@ public final class MergePolicyConfig {
}
return value;
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex);
throw new IllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: " +
"[" + noCFSRatio + "]", ex);
}
}
}

View File

@ -86,22 +86,30 @@ public final class SearchSlowLog implements SearchOperationListener {
this.queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query");
this.fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch");
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING,
this::setQueryWarnThreshold);
this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, this::setQueryInfoThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING,
this::setQueryInfoThreshold);
this.queryInfoThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, this::setQueryDebugThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING,
this::setQueryDebugThreshold);
this.queryDebugThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, this::setQueryTraceThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING,
this::setQueryTraceThreshold);
this.queryTraceThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, this::setFetchWarnThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING,
this::setFetchWarnThreshold);
this.fetchWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING, this::setFetchInfoThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING,
this::setFetchInfoThreshold);
this.fetchInfoThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, this::setFetchDebugThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING,
this::setFetchDebugThreshold);
this.fetchDebugThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING, this::setFetchTraceThreshold);
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING,
this::setFetchTraceThreshold);
this.fetchTraceThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING).nanos();
indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_LEVEL, this::setLevel);
@ -170,7 +178,8 @@ public final class SearchSlowLog implements SearchOperationListener {
Strings.collectionToDelimitedString(context.groupStats(), ",", "", "", sb);
sb.append("], ");
}
sb.append("search_type[").append(context.searchType()).append("], total_shards[").append(context.numberOfShards()).append("], ");
sb.append("search_type[").append(context.searchType()).append("], total_shards[")
.append(context.numberOfShards()).append("], ");
if (context.request().source() != null) {
sb.append("source[").append(context.request().source().toString(FORMAT_PARAMS)).append("], ");
} else {

View File

@ -169,7 +169,8 @@ public final class AnalysisRegistry implements Closeable {
public Map<String, CharFilterFactory> buildCharFilterFactories(IndexSettings indexSettings) throws IOException {
final Map<String, Settings> charFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_CHAR_FILTER);
return buildMapping(Component.CHAR_FILTER, indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.preConfiguredCharFilterFactories);
return buildMapping(Component.CHAR_FILTER, indexSettings, charFiltersSettings, charFilters,
prebuiltAnalysis.preConfiguredCharFilterFactories);
}
public Map<String, AnalyzerProvider<?>> buildAnalyzerFactories(IndexSettings indexSettings) throws IOException {
@ -287,7 +288,8 @@ public final class AnalysisRegistry implements Closeable {
if (currentSettings.get("tokenizer") != null) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment);
} else {
throw new IllegalArgumentException(component + " [" + name + "] must specify either an analyzer type, or a tokenizer");
throw new IllegalArgumentException(component + " [" + name + "] " +
"must specify either an analyzer type, or a tokenizer");
}
} else if (typeName.equals("custom")) {
factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment);
@ -425,14 +427,15 @@ public final class AnalysisRegistry implements Closeable {
tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
}
for (Map.Entry<String, AnalyzerProvider<?>> entry : normalizerProviders.entrySet()) {
processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers,
"keyword", tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories);
processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, "keyword",
tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories);
processNormalizerFactory(entry.getKey(), entry.getValue(), whitespaceNormalizers,
"whitespace", () -> new WhitespaceTokenizer(), tokenFilterFactoryFactories, charFilterFactoryFactories);
}
if (!analyzers.containsKey("default")) {
processAnalyzerFactory(indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS),
processAnalyzerFactory(indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null,
"default", Settings.Builder.EMPTY_SETTINGS),
analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories);
}
if (!analyzers.containsKey("default_search")) {
@ -447,7 +450,8 @@ public final class AnalysisRegistry implements Closeable {
throw new IllegalArgumentException("no default analyzer configured");
}
if (analyzers.containsKey("default_index")) {
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]");
throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use " +
"[index.analysis.analyzer.default] instead for index [" + index.getName() + "]");
}
NamedAnalyzer defaultSearchAnalyzer = analyzers.getOrDefault("default_search", defaultAnalyzer);
NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.getOrDefault("default_search_quote", defaultSearchAnalyzer);

View File

@ -55,7 +55,8 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
TokenizerFactory tokenizer = tokenizers.get(tokenizerName);
if (tokenizer == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find tokenizer under name [" + tokenizerName + "]");
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find tokenizer under name " +
"[" + tokenizerName + "]");
}
List<String> charFilterNames = analyzerSettings.getAsList("char_filter");
@ -63,7 +64,8 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
for (String charFilterName : charFilterNames) {
CharFilterFactory charFilter = charFilters.get(charFilterName);
if (charFilter == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find char_filter under name [" + charFilterName + "]");
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find char_filter under name " +
"[" + charFilterName + "]");
}
charFiltersList.add(charFilter);
}
@ -79,7 +81,8 @@ public class CustomAnalyzerProvider extends AbstractIndexAnalyzerProvider<Custom
for (String tokenFilterName : tokenFilterNames) {
TokenFilterFactory tokenFilter = tokenFilters.get(tokenFilterName);
if (tokenFilter == null) {
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name [" + tokenFilterName + "]");
throw new IllegalArgumentException("Custom Analyzer [" + name() + "] failed to find filter under name " +
"[" + tokenFilterName + "]");
}
tokenFilter = tokenFilter.getChainAwareTokenFilterFactory(tokenizer, charFiltersList, tokenFilterList, tokenFilters::get);
tokenFilterList.add(tokenFilter);

View File

@ -46,15 +46,16 @@ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
+ " must be less than or equal to: [" + maxAllowedShingleDiff + "] but was [" + shingleDiff + "]. This limit"
+ " can be set by changing the [" + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() + "] index level setting.");
} else {
deprecationLogger.deprecated("Deprecated big difference between maxShingleSize and minShingleSize in Shingle TokenFilter,"
+ "expected difference must be less than or equal to: [" + maxAllowedShingleDiff + "]");
deprecationLogger.deprecated("Deprecated big difference between maxShingleSize and minShingleSize" +
" in Shingle TokenFilter, expected difference must be less than or equal to: [" + maxAllowedShingleDiff + "]");
}
}
Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false);
String tokenSeparator = settings.get("token_separator", ShingleFilter.DEFAULT_TOKEN_SEPARATOR);
String fillerToken = settings.get("filler_token", ShingleFilter.DEFAULT_FILLER_TOKEN);
factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator, fillerToken);
factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles,
tokenSeparator, fillerToken);
}
@ -83,10 +84,12 @@ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory {
private final String name;
public Factory(String name) {
this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, false, ShingleFilter.DEFAULT_TOKEN_SEPARATOR, ShingleFilter.DEFAULT_FILLER_TOKEN);
this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true,
false, ShingleFilter.DEFAULT_TOKEN_SEPARATOR, ShingleFilter.DEFAULT_FILLER_TOKEN);
}
Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, String tokenSeparator, String fillerToken) {
Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles,
String tokenSeparator, String fillerToken) {
this.maxShingleSize = maxShingleSize;
this.outputUnigrams = outputUnigrams;
this.outputUnigramsIfNoShingles = outputUnigramsIfNoShingles;

View File

@ -71,7 +71,8 @@ import java.util.concurrent.Executor;
* and require that it should always be around should use this cache, otherwise the
* {@link org.elasticsearch.index.cache.query.QueryCache} should be used instead.
*/
public final class BitsetFilterCache extends AbstractIndexComponent implements IndexReader.ClosedListener, RemovalListener<IndexReader.CacheKey, Cache<Query, BitsetFilterCache.Value>>, Closeable {
public final class BitsetFilterCache extends AbstractIndexComponent
implements IndexReader.ClosedListener, RemovalListener<IndexReader.CacheKey, Cache<Query, BitsetFilterCache.Value>>, Closeable {
public static final Setting<Boolean> INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING =
Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, Property.IndexScope);
@ -261,10 +262,12 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements I
final long start = System.nanoTime();
getAndLoadIfNotPresent(filterToWarm, ctx);
if (indexShard.warmerService().logger().isTraceEnabled()) {
indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start));
indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]",
filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start));
}
} catch (Exception e) {
indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e);
indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load " +
"bitset for [{}]", filterToWarm), e);
} finally {
latch.countDown();
}

View File

@ -42,7 +42,8 @@ public class PerFieldMappingPostingFormatCodec extends Lucene80Codec {
private final MapperService mapperService;
static {
assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) : "PerFieldMappingPostingFormatCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC;
assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) :
"PerFieldMappingPostingFormatCodec must subclass the latest " + "lucene codec: " + Lucene.LATEST_CODEC;
}
public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) {

View File

@ -92,7 +92,9 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
onGoingMerges.add(onGoingMerge);
if (logger.isTraceEnabled()) {
logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", OneMergeHelper.getSegmentName(merge), merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), new ByteSizeValue(merge.estimatedMergeBytes));
logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size",
OneMergeHelper.getSegmentName(merge), merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes),
new ByteSizeValue(merge.estimatedMergeBytes));
}
try {
beforeMerge(onGoingMerge);
@ -123,7 +125,8 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
totalMergeThrottledTime.inc(throttledMS);
String message = String.format(Locale.ROOT,
"merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], [%s throttled], [%,.1f MB written], [%,.1f MB/sec throttle]",
"merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], " +
"[%s throttled], [%,.1f MB written], [%,.1f MB/sec throttle]",
OneMergeHelper.getSegmentName(merge),
TimeValue.timeValueMillis(tookMS),
totalSizeInBytes/1024f/1024f,
@ -167,7 +170,8 @@ class ElasticsearchConcurrentMergeScheduler extends ConcurrentMergeScheduler {
@Override
protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException {
MergeThread thread = super.getMergeThread(writer, merge);
thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " + thread.getName()));
thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " +
thread.getName()));
return thread;
}

View File

@ -128,14 +128,14 @@ public abstract class Engine implements Closeable {
protected final SetOnce<Exception> failedEngine = new SetOnce<>();
/*
* on {@code lastWriteNanos} we use System.nanoTime() to initialize this since:
* - we use the value for figuring out if the shard / engine is active so if we startup and no write has happened yet we still consider it active
* for the duration of the configured active to inactive period. If we initialize to 0 or Long.MAX_VALUE we either immediately or never mark it
* inactive if no writes at all happen to the shard.
* - we also use this to flush big-ass merges on an inactive engine / shard but if we we initialize 0 or Long.MAX_VALUE we either immediately or never
* commit merges even though we shouldn't from a user perspective (this can also have funky sideeffects in tests when we open indices with lots of segments
* and suddenly merges kick in.
* NOTE: don't use this value for anything accurate it's a best effort for freeing up diskspace after merges and on a shard level to reduce index buffer sizes on
* inactive shards.
* - we use the value for figuring out if the shard / engine is active so if we startup and no write has happened yet we still
* consider it active for the duration of the configured active to inactive period. If we initialize to 0 or Long.MAX_VALUE we
* either immediately or never mark it inactive if no writes at all happen to the shard.
* - we also use this to flush big-ass merges on an inactive engine / shard but if we we initialize 0 or Long.MAX_VALUE we either
* immediately or never commit merges even though we shouldn't from a user perspective (this can also have funky side effects in
* tests when we open indices with lots of segments and suddenly merges kick in.
* NOTE: don't use this value for anything accurate it's a best effort for freeing up diskspace after merges and on a shard level to
* reduce index buffer sizes on inactive shards.
*/
protected volatile long lastWriteNanos = System.nanoTime();
@ -156,7 +156,8 @@ public abstract class Engine implements Closeable {
this.shardId = engineConfig.getShardId();
this.allocationId = engineConfig.getAllocationId();
this.store = engineConfig.getStore();
this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name
// we use the engine class directly here to make sure all subclasses have the same logger name
this.logger = Loggers.getLogger(Engine.class,
engineConfig.getShardId());
this.eventListener = engineConfig.getEventListener();
}
@ -291,7 +292,8 @@ public abstract class Engine implements Closeable {
assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS";
long throttleTimeNS = System.nanoTime() - startOfThrottleNS;
if (throttleTimeNS >= 0) {
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number
// Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want
// to add a negative number
throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS));
}
}
@ -729,12 +731,14 @@ public abstract class Engine implements Closeable {
* Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive).
* The returned snapshot can be retrieved from either Lucene index or translog files.
*/
public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException;
public abstract Translog.Snapshot readHistoryOperations(String source,
MapperService mapperService, long startingSeqNo) throws IOException;
/**
* Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine.
*/
public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException;
public abstract int estimateNumberOfHistoryOperations(String source,
MapperService mapperService, long startingSeqNo) throws IOException;
/**
* Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog)
@ -838,9 +842,11 @@ public abstract class Engine implements Closeable {
boolean useCompoundFile = segmentCommitInfo.info.getUseCompoundFile();
if (useCompoundFile) {
try {
directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ);
directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(),
segmentCommitInfo.info, IOContext.READ);
} catch (IOException e) {
logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e);
logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and " +
"SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e);
return ImmutableOpenMap.of();
}
@ -856,14 +862,17 @@ public abstract class Engine implements Closeable {
files = directory.listAll();
} catch (IOException e) {
final Directory finalDirectory = directory;
logger.warn(() -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e);
logger.warn(() ->
new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e);
return ImmutableOpenMap.of();
}
} else {
try {
files = segmentReader.getSegmentInfo().files().toArray(new String[]{});
} catch (IOException e) {
logger.warn(() -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e);
logger.warn(() ->
new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]",
segmentReader, segmentReader.getSegmentInfo()), e);
return ImmutableOpenMap.of();
}
}
@ -876,10 +885,12 @@ public abstract class Engine implements Closeable {
length = directory.fileLength(file);
} catch (NoSuchFileException | FileNotFoundException e) {
final Directory finalDirectory = directory;
logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e);
logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]",
finalDirectory, file), e);
} catch (IOException e) {
final Directory finalDirectory = directory;
logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e);
logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]",
finalDirectory, file), e);
}
if (length == 0L) {
continue;
@ -892,7 +903,8 @@ public abstract class Engine implements Closeable {
directory.close();
} catch (IOException e) {
final Directory finalDirectory = directory;
logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e);
logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]",
finalDirectory), e);
}
}
@ -1081,7 +1093,8 @@ public abstract class Engine implements Closeable {
/**
* Triggers a forced merge on this engine
*/
public abstract void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException, IOException;
public abstract void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes,
boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException, IOException;
/**
* Snapshots the most recent index and returns a handle to it. If needed will try and "commit" the
@ -1099,8 +1112,8 @@ public abstract class Engine implements Closeable {
/**
* If the specified throwable contains a fatal error in the throwable graph, such a fatal error will be thrown. Callers should ensure
* that there are no catch statements that would catch an error in the stack as the fatal error here should go uncaught and be handled
* by the uncaught exception handler that we install during bootstrap. If the specified throwable does indeed contain a fatal error, the
* specified message will attempt to be logged before throwing the fatal error. If the specified throwable does not contain a fatal
* by the uncaught exception handler that we install during bootstrap. If the specified throwable does indeed contain a fatal error,
* the specified message will attempt to be logged before throwing the fatal error. If the specified throwable does not contain a fatal
* error, this method is a no-op.
*
* @param maybeMessage the message to maybe log
@ -1129,7 +1142,9 @@ public abstract class Engine implements Closeable {
store.incRef();
try {
if (failedEngine.get() != null) {
logger.warn(() -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure);
logger.warn(() ->
new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]",
reason), failure);
return;
}
// this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine
@ -1147,7 +1162,8 @@ public abstract class Engine implements Closeable {
// the shard is initializing
if (Lucene.isCorruptionException(failure)) {
try {
store.markStoreCorrupted(new IOException("failed engine (reason: [" + reason + "])", ExceptionsHelper.unwrapCorruption(failure)));
store.markStoreCorrupted(new IOException("failed engine (reason: [" + reason + "])",
ExceptionsHelper.unwrapCorruption(failure)));
} catch (IOException e) {
logger.warn("Couldn't mark store corrupted", e);
}
@ -1162,7 +1178,8 @@ public abstract class Engine implements Closeable {
store.decRef();
}
} else {
logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure);
logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should " +
"be failed by now [{}]", reason), failure);
}
}
@ -1415,7 +1432,8 @@ public abstract class Engine implements Closeable {
}
public Delete(String type, String id, Term uid, long primaryTerm) {
this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL,
Origin.PRIMARY, System.nanoTime());
}
public Delete(Delete template, VersionType versionType) {
@ -1613,7 +1631,9 @@ public abstract class Engine implements Closeable {
try {
logger.debug("flushing shard on close - this might take some time to sync files to disk");
try {
flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running.
// TODO we might force a flush in the future since we have the write lock already even though recoveries
// are running.
flush();
} catch (AlreadyClosedException ex) {
logger.debug("engine already closed - skipping flushAndClose");
}
@ -1750,7 +1770,8 @@ public abstract class Engine implements Closeable {
}
/**
* Request that this engine throttle incoming indexing requests to one thread. Must be matched by a later call to {@link #deactivateThrottling()}.
* Request that this engine throttle incoming indexing requests to one thread.
* Must be matched by a later call to {@link #deactivateThrottling()}.
*/
public abstract void activateThrottling();

View File

@ -258,7 +258,8 @@ public class InternalEngine extends Engine {
// disable the MSU optimization during recovery. Here we prefer to maintain the consistency of LocalCheckpointTracker.
if (localCheckpoint < maxSeqNo && engineConfig.getIndexSettings().isSoftDeleteEnabled()) {
try (Searcher searcher = searcherSupplier.get()) {
Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo, tracker::markSeqNoAsCompleted);
Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo,
tracker::markSeqNoAsCompleted);
}
}
return tracker;
@ -445,7 +446,8 @@ public class InternalEngine extends Engine {
pendingTranslogRecovery.set(false); // we are good - now we can commit
if (opsRecovered > 0) {
logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]",
opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration());
opsRecovered, translogGeneration == null ? null :
translogGeneration.translogFileGeneration, translog.currentFileGeneration());
commitIndexWriter(indexWriter, translog, null);
refreshLastCommittedSegmentInfos();
refresh("translog_recovery");
@ -453,11 +455,14 @@ public class InternalEngine extends Engine {
translog.trimUnreferencedReaders();
}
private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) throws IOException {
private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy,
LongSupplier globalCheckpointSupplier) throws IOException {
final TranslogConfig translogConfig = engineConfig.getTranslogConfig();
final String translogUUID = loadTranslogUUIDFromLastCommit();
// We expect that this shard already exists, so it must already have an existing translog else something is badly wrong!
return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, engineConfig.getPrimaryTermSupplier());
return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier,
engineConfig.getPrimaryTermSupplier());
}
// Package private for testing purposes only
@ -505,7 +510,8 @@ public class InternalEngine extends Engine {
@Override
public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException {
if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) {
try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) {
try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo),
Long.MAX_VALUE, false)) {
return snapshot.totalOperations();
}
} else {
@ -624,7 +630,8 @@ public class InternalEngine extends Engine {
TranslogLeafReader reader = new TranslogLeafReader((Translog.Index) operation, engineConfig
.getIndexSettings().getIndexVersionCreated());
return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader::close),
new VersionsAndSeqNoResolver.DocIdAndVersion(0, ((Translog.Index) operation).version(), reader, 0));
new VersionsAndSeqNoResolver.DocIdAndVersion(0,
((Translog.Index) operation).version(), reader, 0));
}
} catch (IOException e) {
maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event
@ -818,8 +825,8 @@ public class InternalEngine extends Engine {
* - is preserved in the transaction log
* - and is assigned before we start to index / replicate
* NOTE: it's not important for this timestamp to be consistent across nodes etc. it's just a number that is in the common
* case increasing and can be used in the failure case when we retry and resent documents to establish a happens before relationship.
* for instance:
* case increasing and can be used in the failure case when we retry and resent documents to establish a happens before
* relationship. For instance:
* - doc A has autoGeneratedIdTimestamp = 10, isRetry = false
* - doc B has autoGeneratedIdTimestamp = 9, isRetry = false
*
@ -827,11 +834,12 @@ public class InternalEngine extends Engine {
* - now doc A' has autoGeneratedIdTimestamp = 10, isRetry = true
*
* if A' arrives on the shard first we update maxUnsafeAutoIdTimestamp to 10 and use update document. All subsequent
* documents that arrive (A and B) will also use updateDocument since their timestamps are less than maxUnsafeAutoIdTimestamp.
* While this is not strictly needed for doc B it is just much simpler to implement since it will just de-optimize some doc in the worst case.
* documents that arrive (A and B) will also use updateDocument since their timestamps are less than
* maxUnsafeAutoIdTimestamp. While this is not strictly needed for doc B it is just much simpler to implement since it
* will just de-optimize some doc in the worst case.
*
* if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped or calls
* updateDocument.
* if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped
* or calls updateDocument.
*/
final IndexingStrategy plan = indexingStrategyForOperation(index);
@ -1103,14 +1111,16 @@ public class InternalEngine extends Engine {
}
public static IndexingStrategy optimizedAppendOnly(long seqNoForIndexing, long versionForIndexing) {
return new IndexingStrategy(true, false, true, false, seqNoForIndexing, versionForIndexing, null);
return new IndexingStrategy(true, false, true,
false, seqNoForIndexing, versionForIndexing, null);
}
public static IndexingStrategy skipDueToVersionConflict(
VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long term) {
final IndexResult result = new IndexResult(e, currentVersion, term);
return new IndexingStrategy(
currentNotFoundOrDeleted, false, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result);
currentNotFoundOrDeleted, false, false, false,
SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result);
}
static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted,
@ -1121,16 +1131,19 @@ public class InternalEngine extends Engine {
static IndexingStrategy overrideExistingAsIfNotThere(
long seqNoForIndexing, long versionForIndexing) {
return new IndexingStrategy(true, true, true, false, seqNoForIndexing, versionForIndexing, null);
return new IndexingStrategy(true, true, true,
false, seqNoForIndexing, versionForIndexing, null);
}
public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long seqNoForIndexing,
long versionForIndexing) {
return new IndexingStrategy(currentNotFoundOrDeleted, false, false, false, seqNoForIndexing, versionForIndexing, null);
return new IndexingStrategy(currentNotFoundOrDeleted, false, false,
false, seqNoForIndexing, versionForIndexing, null);
}
static IndexingStrategy processAsStaleOp(boolean addStaleOpToLucene, long seqNoForIndexing, long versionForIndexing) {
return new IndexingStrategy(false, false, false, addStaleOpToLucene, seqNoForIndexing, versionForIndexing, null);
return new IndexingStrategy(false, false, false,
addStaleOpToLucene, seqNoForIndexing, versionForIndexing, null);
}
}
@ -1143,13 +1156,15 @@ public class InternalEngine extends Engine {
final VersionValue versionValue = versionMap.getVersionForAssert(index.uid().bytes());
if (versionValue != null) {
if (versionValue.isDelete() == false || allowDeleted == false) {
throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " + versionValue + ")");
throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " +
versionValue + ")");
}
} else {
try (Searcher searcher = acquireSearcher("assert doc doesn't exist", SearcherScope.INTERNAL)) {
final long docsWithId = searcher.searcher().count(new TermQuery(index.uid()));
if (docsWithId > 0) {
throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId + "] times in index");
throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId +
"] times in index");
}
}
}
@ -1367,22 +1382,26 @@ public class InternalEngine extends Engine {
VersionConflictEngineException e, long currentVersion, long term, boolean currentlyDeleted) {
final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
final DeleteResult deleteResult = new DeleteResult(e, currentVersion, term, unassignedSeqNo, currentlyDeleted == false);
return new DeletionStrategy(false, false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult);
return new DeletionStrategy(false, false, currentlyDeleted, unassignedSeqNo,
Versions.NOT_FOUND, deleteResult);
}
static DeletionStrategy processNormally(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) {
return new DeletionStrategy(true, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null);
return new DeletionStrategy(true, false, currentlyDeleted, seqNoOfDeletion,
versionOfDeletion, null);
}
public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted,
long seqNoOfDeletion, long versionOfDeletion) {
return new DeletionStrategy(false, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null);
return new DeletionStrategy(false, false, currentlyDeleted, seqNoOfDeletion,
versionOfDeletion, null);
}
static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted,
long seqNoOfDeletion, long versionOfDeletion) {
return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null);
return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, seqNoOfDeletion,
versionOfDeletion, null);
}
}
@ -1390,7 +1409,8 @@ public class InternalEngine extends Engine {
public void maybePruneDeletes() {
// It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it
// every 1/4 of gcDeletesInMillis:
if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) {
if (engineConfig.isEnableGcDeletes() &&
engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) {
pruneDeletedTombstones();
}
}
@ -1421,8 +1441,9 @@ public class InternalEngine extends Engine {
try {
final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newNoopTombstoneDoc(noOp.reason());
tombstone.updateSeqID(noOp.seqNo(), noOp.primaryTerm());
// A noop tombstone does not require a _version but it's added to have a fully dense docvalues for the version field.
// 1L is selected to optimize the compression because it might probably be the most common value in version field.
// A noop tombstone does not require a _version but it's added to have a fully dense docvalues for the version
// field. 1L is selected to optimize the compression because it might probably be the most common value in
// version field.
tombstone.version().setLongValue(1L);
assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]";
final ParseContext.Document doc = tombstone.docs().get(0);
@ -1585,7 +1606,8 @@ public class InternalEngine extends Engine {
@Override
public boolean shouldPeriodicallyFlush() {
ensureOpen();
final long translogGenerationOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY));
final long translogGenerationOfLastCommit =
Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY));
final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes();
if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) {
return false;
@ -1811,7 +1833,8 @@ public class InternalEngine extends Engine {
* thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler
* syncs calls to findForcedMerges.
*/
assert indexWriter.getConfig().getMergePolicy() instanceof ElasticsearchMergePolicy : "MergePolicy is " + indexWriter.getConfig().getMergePolicy().getClass().getName();
assert indexWriter.getConfig().getMergePolicy() instanceof ElasticsearchMergePolicy : "MergePolicy is " +
indexWriter.getConfig().getMergePolicy().getClass().getName();
ElasticsearchMergePolicy mp = (ElasticsearchMergePolicy) indexWriter.getConfig().getMergePolicy();
optimizeLock.lock();
try {
@ -1859,7 +1882,8 @@ public class InternalEngine extends Engine {
throw e;
} finally {
try {
mp.setUpgradeInProgress(false, false); // reset it just to make sure we reset it in a case of an error
// reset it just to make sure we reset it in a case of an error
mp.setUpgradeInProgress(false, false);
} finally {
optimizeLock.unlock();
}
@ -1991,7 +2015,8 @@ public class InternalEngine extends Engine {
@Override
protected final void closeNoLock(String reason, CountDownLatch closedLatch) {
if (isClosed.compareAndSet(false, true)) {
assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself";
assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() :
"Either the write lock must be held or the engine must be currently be failing itself";
try {
this.versionMap.clear();
if (internalSearcherManager != null) {
@ -2123,7 +2148,8 @@ public class InternalEngine extends Engine {
}
if (warmer != null) {
try {
assert searcher.getIndexReader() instanceof ElasticsearchDirectoryReader : "this class needs an ElasticsearchDirectoryReader but got: " + searcher.getIndexReader().getClass();
assert searcher.getIndexReader() instanceof ElasticsearchDirectoryReader :
"this class needs an ElasticsearchDirectoryReader but got: " + searcher.getIndexReader().getClass();
warmer.warm(new Searcher("top_reader_warming", searcher, () -> {}));
} catch (Exception e) {
if (isEngineClosed.get() == false) {
@ -2195,11 +2221,13 @@ public class InternalEngine extends Engine {
int maxNumMerges = mergeScheduler.getMaxMergeCount();
if (numMergesInFlight.decrementAndGet() < maxNumMerges) {
if (isThrottling.getAndSet(false)) {
logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges);
logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}",
numMergesInFlight, maxNumMerges);
deactivateThrottling();
}
}
if (indexWriter.hasPendingMerges() == false && System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) {
if (indexWriter.hasPendingMerges() == false &&
System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) {
// NEVER do this on a merge thread since we acquire some locks blocking here and if we concurrently rollback the writer
// we deadlock on engine#close for instance.
engineConfig.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() {
@ -2551,7 +2579,8 @@ public class InternalEngine extends Engine {
return super.softUpdateDocument(term, doc, softDeletes);
}
@Override
public long softUpdateDocuments(Term term, Iterable<? extends Iterable<? extends IndexableField>> docs, Field... softDeletes) throws IOException {
public long softUpdateDocuments(Term term, Iterable<? extends Iterable<? extends IndexableField>> docs,
Field... softDeletes) throws IOException {
assert softDeleteEnabled : "Call #softUpdateDocuments but soft-deletes is disabled";
return super.softUpdateDocuments(term, docs, softDeletes);
}

View File

@ -40,8 +40,9 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
private static final class VersionLookup {
/** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account
* for the CHM entry here, and account for BytesRef/VersionValue against the tombstones, since refresh would not clear this RAM. */
/** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones,
* we only account for the CHM entry here, and account for BytesRef/VersionValue against the tombstones, since refresh would not
* clear this RAM. */
final AtomicLong ramBytesUsed = new AtomicLong();
private static final VersionLookup EMPTY = new VersionLookup(Collections.emptyMap());
@ -123,7 +124,8 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
}
Maps() {
this(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()), VersionLookup.EMPTY, false);
this(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()),
VersionLookup.EMPTY, false);
}
boolean isSafeAccessMode() {
@ -252,8 +254,8 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
// means Lucene did not actually open a new reader because it detected no changes, it's possible old has some entries in it, which
// is fine: it means they were actually already included in the previously opened reader, so we can still safely drop them in that
// case. This is because we assign new maps (in beforeRefresh) slightly before Lucene actually flushes any segments for the
// reopen, and so any concurrent indexing requests can still sneak in a few additions to that current map that are in fact reflected
// in the previous reader. We don't touch tombstones here: they expire on their own index.gc_deletes timeframe:
// reopen, and so any concurrent indexing requests can still sneak in a few additions to that current map that are in fact
// reflected in the previous reader. We don't touch tombstones here: they expire on their own index.gc_deletes timeframe:
maps = maps.invalidateOldMap();
assert (unsafeKeysMap = unsafeKeysMap.invalidateOldMap()) != null;
@ -416,8 +418,8 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
maps = new Maps();
tombstones.clear();
// NOTE: we can't zero this here, because a refresh thread could be calling InternalEngine.pruneDeletedTombstones at the same time,
// and this will lead to an assert trip. Presumably it's fine if our ramBytesUsedTombstones is non-zero after clear since the index
// is being closed:
// and this will lead to an assert trip. Presumably it's fine if our ramBytesUsedTombstones is non-zero after clear since the
// index is being closed:
//ramBytesUsedTombstones.set(0);
}
@ -455,7 +457,8 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
return maps.current.map;
}
/** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones (exposed via reader but not yet GC'd). */
/** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones
* (exposed via reader but not yet GC'd). */
Map<BytesRef, DeleteVersionValue> getAllTombstones() {
return tombstones;
}
@ -471,7 +474,8 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
}
boolean assertKeyedLockHeldByCurrentThread(BytesRef uid) {
assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() + "], uid [" + uid.utf8ToString() + "]";
assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() +
"], uid [" + uid.utf8ToString() + "]";
return true;
}
}

View File

@ -61,17 +61,19 @@ public final class ShardGetService extends AbstractIndexShardComponent {
private final IndexShard indexShard;
public ShardGetService(IndexSettings indexSettings, IndexShard indexShard,
MapperService mapperService) {
MapperService mapperService) {
super(indexShard.shardId(), indexSettings);
this.mapperService = mapperService;
this.indexShard = indexShard;
}
public GetStats stats() {
return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()),
missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
}
public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
public GetResult get(String type, String id, String[] gFields, boolean realtime, long version,
VersionType versionType, FetchSourceContext fetchSourceContext) {
return get(type, id, gFields, realtime, version, versionType, fetchSourceContext, false);
}
@ -105,7 +107,8 @@ public final class ShardGetService extends AbstractIndexShardComponent {
* <p>
* Note: Call <b>must</b> release engine searcher associated with engineGetResult!
*/
public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) {
public GetResult get(Engine.GetResult engineGetResult, String id, String type,
String[] fields, FetchSourceContext fetchSourceContext) {
if (!engineGetResult.exists()) {
return new GetResult(shardId.getIndexName(), type, id, -1, false, null, null);
}
@ -176,7 +179,8 @@ public final class ShardGetService extends AbstractIndexShardComponent {
}
}
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) {
private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext,
Engine.GetResult get, MapperService mapperService) {
Map<String, DocumentField> fields = null;
BytesReference source = null;
DocIdAndVersion docIdAndVersion = get.docIdAndVersion();

View File

@ -51,8 +51,9 @@ public class MergeStats implements Streamable, ToXContentFragment {
}
public void add(long totalMerges, long totalMergeTime, long totalNumDocs, long totalSizeInBytes, long currentMerges, long currentNumDocs, long currentSizeInBytes,
long stoppedTimeMillis, long throttledTimeMillis, double mbPerSecAutoThrottle) {
public void add(long totalMerges, long totalMergeTime, long totalNumDocs, long totalSizeInBytes,
long currentMerges, long currentNumDocs, long currentSizeInBytes,
long stoppedTimeMillis, long throttledTimeMillis, double mbPerSecAutoThrottle) {
this.total += totalMerges;
this.totalTimeInMillis += totalMergeTime;
this.totalNumDocs += totalNumDocs;

View File

@ -401,7 +401,8 @@ public final class QueryBuilders {
* @param filterFunctionBuilders the filters and functions to execute
* @return the function score query
*/
public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, FunctionScoreQueryBuilder.FilterFunctionBuilder[] filterFunctionBuilders) {
public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder,
FunctionScoreQueryBuilder.FilterFunctionBuilder[] filterFunctionBuilders) {
return new FunctionScoreQueryBuilder(queryBuilder, filterFunctionBuilders);
}

View File

@ -37,7 +37,8 @@ public class QueryValidationException extends ValidationException {
* @param validationException an initial exception. Can be {@code null}, in which case a new exception is created.
* @return a {@link QueryValidationException} with added validation error message
*/
public static QueryValidationException addValidationError(String queryId, String validationError, QueryValidationException validationException) {
public static QueryValidationException addValidationError(String queryId, String validationError,
QueryValidationException validationException) {
if (validationException == null) {
validationException = new QueryValidationException();
}
@ -52,7 +53,8 @@ public class QueryValidationException extends ValidationException {
* @param validationException an initial exception. Can be {@code null}, in which case a new exception is created.
* @return a {@link QueryValidationException} with added validation error message
*/
public static QueryValidationException addValidationErrors(List<String> validationErrors, QueryValidationException validationException) {
public static QueryValidationException addValidationErrors(List<String> validationErrors,
QueryValidationException validationException) {
if (validationException == null) {
validationException = new QueryValidationException();
}

View File

@ -57,16 +57,19 @@ public class MultiMatchQuery extends MatchQuery {
super(context);
}
private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException {
private Query parseAndApply(Type type, String fieldName, Object value,
String minimumShouldMatch, Float boostValue) throws IOException {
Query query = parse(type, fieldName, value);
query = Queries.maybeApplyMinimumShouldMatch(query, minimumShouldMatch);
if (query != null && boostValue != null && boostValue != AbstractQueryBuilder.DEFAULT_BOOST && query instanceof MatchNoDocsQuery == false) {
if (query != null && boostValue != null &&
boostValue != AbstractQueryBuilder.DEFAULT_BOOST && query instanceof MatchNoDocsQuery == false) {
query = new BoostQuery(query, boostValue);
}
return query;
}
public Query parse(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException {
public Query parse(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames,
Object value, String minimumShouldMatch) throws IOException {
final Query result;
// reset query builder
queryBuilder = null;
@ -104,7 +107,8 @@ public class MultiMatchQuery extends MatchQuery {
this.tieBreaker = tieBreaker;
}
public List<Query> buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException{
public List<Query> buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames,
Object value, String minimumShouldMatch) throws IOException{
List<Query> queries = new ArrayList<>();
for (String fieldName : fieldNames.keySet()) {
Float boostValue = fieldNames.get(fieldName);
@ -159,7 +163,8 @@ public class MultiMatchQuery extends MatchQuery {
}
@Override
public List<Query> buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames, Object value, String minimumShouldMatch) throws IOException {
public List<Query> buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map<String, Float> fieldNames,
Object value, String minimumShouldMatch) throws IOException {
Map<Analyzer, List<FieldAndFieldType>> groups = new HashMap<>();
List<Query> queries = new ArrayList<>();
for (Map.Entry<String, Float> entry : fieldNames.entrySet()) {

View File

@ -81,7 +81,8 @@ public interface IndexEventListener {
* @param currentState the new shard state
* @param reason the reason for the state change if there is one, null otherwise
*/
default void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {}
default void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState,
IndexShardState currentState, @Nullable String reason) {}
/**
* Called when a shard is marked as inactive

View File

@ -68,7 +68,8 @@ public class IndexSearcherWrapper {
* This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
*/
public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException {
final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
final ElasticsearchDirectoryReader elasticsearchDirectoryReader =
ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
if (elasticsearchDirectoryReader == null) {
throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
}
@ -76,8 +77,9 @@ public class IndexSearcherWrapper {
DirectoryReader reader = wrap(nonClosingReaderWrapper);
if (reader != nonClosingReaderWrapper) {
if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) {
throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate" +
" to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs");
throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey," +
" wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be " +
"used as cache keys since their are used only per request which would lead to subtle bugs");
}
if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) {
// prevent that somebody wraps with a non-filter reader

View File

@ -222,9 +222,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
// for primaries, we only allow to write when actually started (so the cluster has decided we started)
// in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be
// in state RECOVERING or POST_RECOVERY.
// for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent
// a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source
private static final EnumSet<IndexShardState> writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
// for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on
// version checks to make sure its consistent a relocated shard can also be target of a replication if the relocation target has not
// been marked as active yet and is syncing it's changes back to the relocation source
private static final EnumSet<IndexShardState> writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING,
IndexShardState.POST_RECOVERY, IndexShardState.STARTED);
private final IndexSearcherWrapper searcherWrapper;
@ -412,10 +414,12 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
currentRouting = this.shardRouting;
if (!newRouting.shardId().equals(shardId())) {
throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId());
throw new IllegalArgumentException("Trying to set a routing entry with shardId " +
newRouting.shardId() + " on a shard with shardId " + shardId());
}
if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " +
currentRouting + ", new " + newRouting);
}
if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) {
throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current "
@ -435,10 +439,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
} else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isRelocated() &&
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
// if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery
// failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two
// active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
// if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard
// routing occur (e.g. due to recovery failure / cancellation). The reason is that at the moment we cannot safely
// reactivate primary mode without risking two active primaries.
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " +
newRouting.state());
}
assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED :
"routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state;
@ -475,8 +480,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
"primary terms can only go up; current term [" + pendingPrimaryTerm + "], new term [" + newPrimaryTerm + "]";
/*
* Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we
* increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is
* incremented.
* increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary
* term is incremented.
*/
// to prevent primary relocation handoff while resync is not completed
boolean resyncStarted = primaryReplicaResyncInProgress.compareAndSet(false, true);
@ -522,13 +527,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
public void onResponse(ResyncTask resyncTask) {
logger.info("primary-replica resync completed with {} operations",
resyncTask.getResyncedOperations());
boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
boolean resyncCompleted =
primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
}
@Override
public void onFailure(Exception e) {
boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false);
boolean resyncCompleted =
primaryReplicaResyncInProgress.compareAndSet(true, false);
assert resyncCompleted : "primary-replica resync finished but was not started";
if (state == IndexShardState.CLOSED) {
// ignore, shutting down
@ -594,7 +601,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation
* @throws InterruptedException if blocking operations is interrupted
*/
public void relocated(final Consumer<ReplicationTracker.PrimaryContext> consumer) throws IllegalIndexShardStateException, InterruptedException {
public void relocated(final Consumer<ReplicationTracker.PrimaryContext> consumer)
throws IllegalIndexShardStateException, InterruptedException {
assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting;
try {
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
@ -1191,7 +1199,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
}
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
public IndexShard postRecovery(String reason)
throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
synchronized (mutex) {
if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId);
@ -1462,7 +1471,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
public void readAllowed() throws IllegalIndexShardStateException {
IndexShardState state = this.state; // one time volatile read
if (readAllowedStates.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " + readAllowedStates.toString());
throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " +
readAllowedStates.toString());
}
}
@ -1476,7 +1486,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
if (origin.isRecovery()) {
if (state != IndexShardState.RECOVERING) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]");
throw new IllegalIndexShardStateException(shardId, state,
"operation only allowed when recovering, origin [" + origin + "]");
}
} else {
if (origin == Engine.Operation.Origin.PRIMARY) {
@ -1488,13 +1499,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]";
}
if (writeAllowedStates.contains(state) == false) {
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStates + ", origin [" + origin + "]");
throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " +
writeAllowedStates + ", origin [" + origin + "]");
}
}
}
private boolean assertPrimaryMode() {
assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard " + shardRouting + " is not a primary shard in primary mode";
assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard " + shardRouting +
" is not a primary shard in primary mode";
return true;
}
@ -1571,9 +1584,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
return path;
}
public boolean recoverFromLocalShards(BiConsumer<String, MappingMetaData> mappingUpdateConsumer, List<IndexShard> localShards) throws IOException {
public boolean recoverFromLocalShards(BiConsumer<String, MappingMetaData> mappingUpdateConsumer,
List<IndexShard> localShards) throws IOException {
assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard";
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " + recoveryState.getRecoverySource();
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " +
recoveryState.getRecoverySource();
final List<LocalShardSnapshot> snapshots = new ArrayList<>();
try {
for (IndexShard shard : localShards) {
@ -1601,7 +1616,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
public boolean restoreFromRepository(Repository repository) {
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + recoveryState.getRecoverySource();
assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " +
recoveryState.getRecoverySource();
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
return storeRecovery.recoverFromRepository(this, repository);
}
@ -1690,7 +1706,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing.
* This parameter should be only enabled when the entire requesting range is below the global checkpoint.
*/
public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException {
public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo,
long toSeqNo, boolean requiredFullRange) throws IOException {
return getEngine().newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange);
}
@ -1949,7 +1966,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* @param primaryContext the sequence number context
*/
public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) {
assert shardRouting.primary() && shardRouting.isRelocationTarget() : "only primary relocation target can update allocation IDs from primary context: " + shardRouting;
assert shardRouting.primary() && shardRouting.isRelocationTarget() :
"only primary relocation target can update allocation IDs from primary context: " + shardRouting;
assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) &&
getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint();
synchronized (mutex) {
@ -2090,7 +2108,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
recoveryListener.onRecoveryDone(recoveryState);
}
} catch (Exception e) {
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
recoveryListener.onRecoveryFailure(recoveryState,
new RecoveryFailedException(recoveryState, null, e), true);
}
});
break;
@ -2100,7 +2119,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener);
} catch (Exception e) {
failShard("corrupted preexisting index", e);
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
recoveryListener.onRecoveryFailure(recoveryState,
new RecoveryFailedException(recoveryState, null, e), true);
}
break;
case SNAPSHOT:
@ -2113,7 +2133,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
recoveryListener.onRecoveryDone(recoveryState);
}
} catch (Exception e) {
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
recoveryListener.onRecoveryFailure(recoveryState,
new RecoveryFailedException(recoveryState, null, e), true);
}
});
break;
@ -2407,7 +2428,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
}
public int getActiveOperationsCount() {
return indexShardOperationPermits.getActiveOperationsCount(); // refCount is incremented on successful acquire and decremented on close
// refCount is incremented on successful acquire and decremented on close
return indexShardOperationPermits.getActiveOperationsCount();
}
/**

View File

@ -49,7 +49,8 @@ public class IndexingStats implements Streamable, ToXContentFragment {
Stats() {}
public Stats(long indexCount, long indexTimeInMillis, long indexCurrent, long indexFailedCount, long deleteCount, long deleteTimeInMillis, long deleteCurrent, long noopUpdateCount, boolean isThrottled, long throttleTimeInMillis) {
public Stats(long indexCount, long indexTimeInMillis, long indexCurrent, long indexFailedCount, long deleteCount,
long deleteTimeInMillis, long deleteCurrent, long noopUpdateCount, boolean isThrottled, long throttleTimeInMillis) {
this.indexCount = indexCount;
this.indexTimeInMillis = indexTimeInMillis;
this.indexCurrent = indexCurrent;

View File

@ -45,10 +45,14 @@ public final class ShardPath {
private final boolean isCustomDataPath;
public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) {
assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " + dataPath.toString();
assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString();
assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString();
assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString();
assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) :
"dataPath must end with the shard ID but didn't: " + dataPath.toString();
assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) :
"shardStatePath must end with the shard ID but didn't: " + dataPath.toString();
assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) :
"dataPath must end with index path id but didn't: " + dataPath.toString();
assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) :
"shardStatePath must end with index path id but didn't: " + dataPath.toString();
if (isCustomDataPath && dataPath.equals(shardStatePath)) {
throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths");
}
@ -111,7 +115,8 @@ public final class ShardPath {
* directories with a valid shard state exist the one with the highest version will be used.
* <b>Note:</b> this method resolves custom data locations for the shard.
*/
public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException {
public static ShardPath loadShardPath(Logger logger, NodeEnvironment env,
ShardId shardId, IndexSettings indexSettings) throws IOException {
final Path[] paths = env.availableShardPaths(shardId);
final int nodeLockId = env.getNodeLockId();
final Path sharedDataPath = env.sharedDataPath();
@ -165,7 +170,8 @@ public final class ShardPath {
* This method tries to delete left-over shards where the index name has been reused but the UUID is different
* to allow the new shard to be allocated.
*/
public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env, ShardLock lock, IndexSettings indexSettings) throws IOException {
public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env,
ShardLock lock, IndexSettings indexSettings) throws IOException {
final String indexUUID = indexSettings.getUUID();
final Path[] paths = env.availableShardPaths(lock.getShardId());
for (Path path : paths) {
@ -226,7 +232,8 @@ public final class ShardPath {
.filter((path) -> pathsToSpace.get(path).subtract(estShardSizeInBytes).compareTo(BigInteger.ZERO) > 0)
// Sort by the number of shards for this index
.sorted((p1, p2) -> {
int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), pathToShardCount.getOrDefault(p2, 0L));
int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L),
pathToShardCount.getOrDefault(p2, 0L));
if (cmp == 0) {
// if the number of shards is equal, tie-break with the number of total shards
cmp = Integer.compare(dataPathToShardCount.getOrDefault(p1.path, 0),

View File

@ -88,7 +88,8 @@ public final class ShardStateMetaData {
return "primary [" + primary + "], allocation [" + allocationId + "]";
}
public static final MetaDataStateFormat<ShardStateMetaData> FORMAT = new MetaDataStateFormat<ShardStateMetaData>(SHARD_STATE_FILE_PREFIX) {
public static final MetaDataStateFormat<ShardStateMetaData> FORMAT =
new MetaDataStateFormat<ShardStateMetaData>(SHARD_STATE_FILE_PREFIX) {
@Override
protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException {

View File

@ -98,7 +98,8 @@ final class StoreRecovery {
return false;
}
boolean recoverFromLocalShards(BiConsumer<String, MappingMetaData> mappingUpdateConsumer, final IndexShard indexShard, final List<LocalShardSnapshot> shards) throws IOException {
boolean recoverFromLocalShards(BiConsumer<String, MappingMetaData> mappingUpdateConsumer,
final IndexShard indexShard, final List<LocalShardSnapshot> shards) throws IOException {
if (canRecover(indexShard)) {
RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
assert recoveryType == RecoverySource.Type.LOCAL_SHARDS: "expected local shards recovery type: " + recoveryType;
@ -133,7 +134,8 @@ final class StoreRecovery {
internalRecoverFromStore(indexShard);
// just trigger a merge to do housekeeping on the
// copied segments - we will also see them in stats etc.
indexShard.getEngine().forceMerge(false, -1, false, false, false);
indexShard.getEngine().forceMerge(false, -1, false,
false, false);
} catch (IOException ex) {
throw new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", ex);
}
@ -304,7 +306,8 @@ final class StoreRecovery {
// to call post recovery.
final IndexShardState shardState = indexShard.state();
final RecoveryState recoveryState = indexShard.recoveryState();
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING :
"recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]";
if (logger.isTraceEnabled()) {
RecoveryState.Index index = recoveryState.getIndex();
@ -316,11 +319,13 @@ final class StoreRecovery {
.append(new ByteSizeValue(index.recoveredBytes())).append("]\n");
sb.append(" : reusing_files [").append(index.reusedFileCount()).append("] with total_size [")
.append(new ByteSizeValue(index.reusedBytes())).append("]\n");
sb.append(" verify_index : took [").append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [")
.append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
sb.append(" verify_index : took [")
.append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [")
.append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n");
sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations())
.append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]");
logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb);
logger.trace("recovery completed from [shard_store], took [{}]\n{}",
timeValueMillis(recoveryState.getTimer().time()), sb);
} else if (logger.isDebugEnabled()) {
logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time()));
}
@ -371,7 +376,8 @@ final class StoreRecovery {
files += " (failure=" + ExceptionsHelper.detailedMessage(inner) + ")";
}
if (indexShouldExists) {
throw new IndexShardRecoveryException(shardId, "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
throw new IndexShardRecoveryException(shardId,
"shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e);
}
}
if (si != null) {
@ -457,7 +463,8 @@ final class StoreRecovery {
snapshotShardId = new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, shardId.id());
}
final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName);
repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(),
restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
final Store store = indexShard.store();
store.bootstrapNewHistory();
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();

View File

@ -459,7 +459,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* can be successfully opened. This includes reading the segment infos and possible
* corruption markers.
*/
public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException {
public static boolean canOpenIndex(Logger logger, Path indexLocation,
ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException {
try {
tryOpenIndex(indexLocation, shardId, shardLocker, logger);
} catch (Exception ex) {
@ -474,7 +475,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* segment infos and possible corruption markers. If the index can not
* be opened, an exception is thrown
*/
public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException {
public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker,
Logger logger) throws IOException, ShardLockObtainFailedException {
try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5));
Directory dir = new SimpleFSDirectory(indexLocation)) {
failIfCorrupted(dir, shardId);
@ -489,7 +491,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* Note: Checksums are calculated by default since version 4.8.0. This method only adds the
* verification against the checksum in the given metadata and does not add any significant overhead.
*/
public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException {
public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata,
final IOContext context) throws IOException {
IndexOutput output = directory().createOutput(fileName, context);
boolean success = false;
try {
@ -537,7 +540,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
public static void checkIntegrity(final StoreFileMetaData md, final Directory directory) throws IOException {
try (IndexInput input = directory.openInput(md.name(), IOContext.READONCE)) {
if (input.length() != md.length()) { // first check the length no matter how old this file is
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() +
" : file truncated?", input);
}
// throw exception if the file is corrupt
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
@ -650,7 +654,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
for (String existingFile : directory.listAll()) {
if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
// don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete
// checksum)
continue;
}
try {
directory.deleteFile(reason, existingFile);
@ -660,7 +666,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|| existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)
|| existingFile.startsWith(CORRUPTED)) {
// TODO do we need to also fail this if we can't delete the pending commit file?
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit
// point around?
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
}
logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
@ -685,13 +692,14 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
StoreFileMetaData remote = sourceMetaData.get(meta.name());
// if we have different files then they must have no checksums; otherwise something went wrong during recovery.
// we have that problem when we have an empty index is only a segments_1 file so we can't tell if it's a Lucene 4.8 file
// and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files come out as
// different in the diff. That's why we have to double check here again if the rest of it matches.
// and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files
// come out as different in the diff. That's why we have to double check here again if the rest of it matches.
// all is fine this file is just part of a commit or a segment that is different
if (local.isSame(remote) == false) {
logger.debug("Files are different on the recovery target: {} ", recoveryDiff);
throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null);
throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " +
remote, null);
}
}
} else {
@ -851,7 +859,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
numDocs = Lucene.getNumDocs(segmentCommitInfos);
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); // we don't know which version was used to write so we take the max version.
// we don't know which version was used to write so we take the max version.
Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion();
for (SegmentCommitInfo info : segmentCommitInfos) {
final Version version = info.info.getVersion();
if (version == null) {
@ -862,7 +871,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
maxVersion = version;
}
for (String file : info.files()) {
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
checksumFromLuceneFile(directory, file, builder, logger, version,
SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
}
}
if (maxVersion == null) {
@ -878,7 +888,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
// Lucene checks the checksum after it tries to lookup the codec etc.
// in that case we might get only IAE or similar exceptions while we are really corrupt...
// TODO we should check the checksum in lucene if we hit an exception
logger.warn(() -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex);
logger.warn(() ->
new ParameterizedMessage("failed to build store metadata. checking segment info integrity " +
"(with commit [{}])", commit == null ? "no" : "yes"), ex);
Lucene.checkSegmentInfoIntegrity(directory);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) {
cex.addSuppressed(ex);
@ -902,10 +914,12 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
length = in.length();
if (length < CodecUtil.footerLength()) {
// truncated files trigger IAE if we seek negative... these files are really corrupted though
throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in);
throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " +
CodecUtil.footerLength() + " but was: " + in.length(), in);
}
if (readFileAsHash) {
final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); // additional safety we checksum the entire file we read the hash for...
// additional safety we checksum the entire file we read the hash for...
final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in);
hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, length), length);
checksum = digestToString(verifyingIndexInput.verify());
} else {
@ -964,19 +978,21 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
* <ul>
* <li>all files in this segment have the same checksum</li>
* <li>all files in this segment have the same length</li>
* <li>the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code .si} file content as it's hash</li>
* <li>the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function,
* The metadata transfers the {@code .si} file content as it's hash</li>
* </ul>
* <p>
* The {@code .si} file contains a lot of diagnostics including a timestamp etc. in the future there might be
* unique segment identifiers in there hardening this method further.
* <p>
* The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files like
* deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated
* The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files
* like deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated
* as identical iff:
* <ul>
* <li>all files belonging to this commit have the same checksum</li>
* <li>all files belonging to this commit have the same length</li>
* <li>the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code segments_N} file content as it's hash</li>
* <li>the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function,
* The metadata transfers the {@code segments_N} file content as it's hash</li>
* </ul>
* <p>
* NOTE: this diff will not contain the {@code segments.gen} file. This file is omitted on recovery.
@ -994,7 +1010,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
final String segmentId = IndexFileNames.parseSegmentName(meta.name());
final String extension = IndexFileNames.getExtension(meta.name());
if (IndexFileNames.SEGMENTS.equals(segmentId) || DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) {
if (IndexFileNames.SEGMENTS.equals(segmentId) ||
DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) {
// only treat del files as per-commit files fnm files are generational but only for upgradable DV
perCommitStoreFiles.add(meta);
} else {
@ -1029,9 +1046,11 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
different.addAll(identicalFiles);
}
}
RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical), Collections.unmodifiableList(different), Collections.unmodifiableList(missing));
RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical),
Collections.unmodifiableList(different), Collections.unmodifiableList(missing));
assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0)
: "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]";
: "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" +
this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]";
return recoveryDiff;
}
@ -1184,8 +1203,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
}
throw new CorruptIndexException("verification failed (hardware problem?) : expected=" + metadata.checksum() +
" actual=" + actualChecksum + " footer=" + footerDigest +" writtenLength=" + writtenBytes + " expectedLength=" + metadata.length() +
" (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")");
" actual=" + actualChecksum + " footer=" + footerDigest +" writtenLength=" + writtenBytes + " expectedLength=" +
metadata.length() + " (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")");
}
@Override
@ -1203,7 +1222,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
} else {
verify(); // fail if we write more than expected
throw new AssertionError("write past EOF expected length: " + metadata.length() + " writtenBytes: " + writtenBytes);
throw new AssertionError("write past EOF expected length: " + metadata.length() +
" writtenBytes: " + writtenBytes);
}
}
out.writeByte(b);

View File

@ -80,7 +80,8 @@ public class TermVectorsService {
static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequest request, LongSupplier nanoTimeSupplier) {
final long startTime = nanoTimeSupplier.getAsLong();
final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(), request.type(), request.id());
final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(),
request.type(), request.id());
final Term uidTerm = indexShard.mapperService().createUidTerm(request.type(), request.id());
if (uidTerm == null) {
termVectorsResponse.setExists(false);
@ -96,7 +97,8 @@ public class TermVectorsService {
handleFieldWildcards(indexShard, request);
}
try (Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(), request.id(), uidTerm)
try (Engine.GetResult get = indexShard.get(new Engine.Get(request.realtime(), false, request.type(),
request.id(), uidTerm)
.version(request.version()).versionType(request.versionType()));
Engine.Searcher searcher = indexShard.acquireSearcher("term_vector")) {
Fields topLevelFields = fields(get.searcher() != null ? get.searcher().reader() : searcher.reader());
@ -192,7 +194,8 @@ public class TermVectorsService {
return true;
}
private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField, TermVectorsRequest request, Set<String> selectedFields) throws IOException {
private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField,
TermVectorsRequest request, Set<String> selectedFields) throws IOException {
/* only keep valid fields */
Set<String> validFields = new HashSet<>();
for (String field : selectedFields) {
@ -217,7 +220,8 @@ public class TermVectorsService {
getFields[getFields.length - 1] = SourceFieldMapper.NAME;
GetResult getResult = indexShard.getService().get(
get, request.id(), request.type(), getFields, null);
Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields);
Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(),
request.offsets(), request.perFieldAnalyzer(), validFields);
/* merge with existing Fields */
if (termVectorsByField == null) {
@ -257,7 +261,12 @@ public class TermVectorsService {
return selectedFields;
}
private static Fields generateTermVectors(IndexShard indexShard, Map<String, Object> source, Collection<DocumentField> getFields, boolean withOffsets, @Nullable Map<String, String> perFieldAnalyzer, Set<String> fields) throws IOException {
private static Fields generateTermVectors(IndexShard indexShard,
Map<String, Object> source,
Collection<DocumentField> getFields,
boolean withOffsets,
@Nullable Map<String, String> perFieldAnalyzer,
Set<String> fields) throws IOException {
Map<String, Collection<Object>> values = new HashMap<>();
for (DocumentField getField : getFields) {
String field = getField.getName();
@ -319,8 +328,9 @@ public class TermVectorsService {
String[] values = doc.getValues(field.name());
documentFields.add(new DocumentField(field.name(), Arrays.asList((Object[]) values)));
}
return generateTermVectors(indexShard, XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(),
documentFields, request.offsets(), request.perFieldAnalyzer(), seenFields);
return generateTermVectors(indexShard,
XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(), documentFields,
request.offsets(), request.perFieldAnalyzer(), seenFields);
}
private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc,

View File

@ -38,7 +38,8 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
protected final TranslogHeader header;
public BaseTranslogReader(long generation, FileChannel channel, Path path, TranslogHeader header) {
assert Translog.parseIdFromFileName(path) == generation : "generation mismatch. Path: " + Translog.parseIdFromFileName(path) + " but generation: " + generation;
assert Translog.parseIdFromFileName(path) == generation : "generation mismatch. Path: " +
Translog.parseIdFromFileName(path) + " but generation: " + generation;
this.generation = generation;
this.path = path;
@ -70,7 +71,8 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
/** read the size of the op (i.e., number of bytes, including the op size) written at the given position */
protected final int readSize(ByteBuffer reusableBuffer, long position) throws IOException {
// read op size from disk
assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + reusableBuffer.capacity() + "]";
assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" +
reusableBuffer.capacity() + "]";
reusableBuffer.clear();
reusableBuffer.limit(4);
readBytes(reusableBuffer, position);
@ -94,7 +96,8 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
* reads an operation at the given position and returns it. The buffer length is equal to the number
* of bytes reads.
*/
protected final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize, BufferedChecksumStreamInput reuse) throws IOException {
protected final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize,
BufferedChecksumStreamInput reuse) throws IOException {
final ByteBuffer buffer;
if (reusableBuffer.capacity() >= opSize) {
buffer = reusableBuffer;

View File

@ -75,8 +75,9 @@ import java.util.stream.Stream;
* In Elasticsearch there is one Translog instance per {@link org.elasticsearch.index.engine.InternalEngine}. The engine
* records the current translog generation {@link Translog#getGeneration()} in it's commit metadata using {@link #TRANSLOG_GENERATION_KEY}
* to reference the generation that contains all operations that have not yet successfully been committed to the engines lucene index.
* Additionally, since Elasticsearch 2.0 the engine also records a {@link #TRANSLOG_UUID_KEY} with each commit to ensure a strong association
* between the lucene index an the transaction log file. This UUID is used to prevent accidental recovery from a transaction log that belongs to a
* Additionally, since Elasticsearch 2.0 the engine also records a {@link #TRANSLOG_UUID_KEY} with each commit to ensure a strong
* association between the lucene index an the transaction log file. This UUID is used to prevent accidental recovery from a transaction
* log that belongs to a
* different engine.
* <p>
* Each Translog has only one translog file open for writes at any time referenced by a translog generation ID. This ID is written to a
@ -96,10 +97,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
/*
* TODO
* - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) but we can refactor as we go
* - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we need to be able to do random access reads even from the buffer
* - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this)
* but we can refactor as we go
* - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we
* need to be able to do random access reads even from the buffer
* - we need random exception on the FileSystem API tests for all this.
* - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already fsynced far enough
* - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already
* fsynced far enough
*/
public static final String TRANSLOG_GENERATION_KEY = "translog_generation";
public static final String TRANSLOG_UUID_KEY = "translog_uuid";
@ -170,12 +174,15 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
// we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this:
// https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example
//
// For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists
// if not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]";
// For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that
// file exists. If not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false ||
Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) :
"unexpected translog file: [" + nextTranslogFile + "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName());
logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a" +
" tragic exception when creating a new generation", nextTranslogFile.getFileName());
}
this.readers.addAll(recoverFromFiles(checkpoint));
if (readers.isEmpty()) {
@ -206,7 +213,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private ArrayList<TranslogReader> recoverFromFiles(Checkpoint checkpoint) throws IOException {
boolean success = false;
ArrayList<TranslogReader> foundTranslogs = new ArrayList<>();
final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX); // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
// a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX);
boolean tempFileRenamed = false;
try (ReleasableLock lock = writeLock.acquire()) {
logger.debug("open uncommitted translog checkpoint {}", checkpoint);
@ -232,7 +240,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
throw new IllegalStateException("translog file doesn't exist with generation: " + i + " recovering from: " +
minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
}
final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
final TranslogReader reader = openReader(committedTranslogFile,
Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() :
"Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "]" +
"translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]";
@ -250,7 +259,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (Files.exists(commitCheckpoint)) {
Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint);
if (checkpoint.equals(checkpointFromDisk) == false) {
throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk);
throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() +
" already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk);
}
} else {
// we first copy this into the temp-file and then fsync it followed by an atomic move into the target file
@ -281,7 +291,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException {
FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
try {
assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation;
assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " +
Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation;
TranslogReader reader = TranslogReader.open(channel, path, checkpoint, translogUUID);
channel = null;
return reader;
@ -302,7 +313,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try {
return Long.parseLong(matcher.group(1));
} catch (NumberFormatException e) {
throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " + fileName + "]", e);
throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " +
fileName + "]", e);
}
}
throw new IllegalArgumentException("can't parse id from file: " + fileName);
@ -835,7 +847,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
// acquire lock to make the two numbers roughly consistent (no file change half way)
try (ReleasableLock lock = readLock.acquire()) {
final long uncommittedGen = deletionPolicy.getTranslogGenerationOfLastCommit();
return new TranslogStats(totalOperations(), sizeInBytes(), totalOperationsByMinGen(uncommittedGen), sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge());
return new TranslogStats(totalOperations(), sizeInBytes(), totalOperationsByMinGen(uncommittedGen),
sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge());
}
}
@ -1741,7 +1754,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
try (ReleasableLock lock = writeLock.acquire()) {
if (generation != null) {
if (generation.translogUUID.equals(translogUUID) == false) {
throw new IllegalArgumentException("commit belongs to a different translog: " + generation.translogUUID + " vs. " + translogUUID);
throw new IllegalArgumentException("commit belongs to a different translog: " +
generation.translogUUID + " vs. " + translogUUID);
}
return generation.translogFileGeneration == currentFileGeneration();
}
@ -1858,12 +1872,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
ChannelFactory channelFactory, long primaryTerm) throws IOException {
IOUtils.rm(location);
Files.createDirectories(location);
final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1);
final Checkpoint checkpoint =
Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1);
final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME);
Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
IOUtils.fsync(checkpointFile, false);
final String translogUUID = UUIDs.randomBase64UUID();
TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory,
TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1,
location.resolve(getFilename(1)), channelFactory,
new ByteSizeValue(10), 1, initialGlobalCheckpoint,
() -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm,
new TragicExceptionHolder());

View File

@ -129,7 +129,8 @@ public class TranslogReader extends BaseTranslogReader implements Closeable {
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]");
}
if (position < getFirstOperationOffset()) {
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "]");
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" +
getFirstOperationOffset() + "]");
}
Channels.readFromFileChannelWithEofException(channel, position, buffer);
}

View File

@ -94,10 +94,12 @@ final class TranslogSnapshot extends BaseTranslogReader {
*/
protected void readBytes(ByteBuffer buffer, long position) throws IOException {
if (position >= length) {
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "], generation: [" + getGeneration() + "], path: [" + path + "]");
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "], generation: [" +
getGeneration() + "], path: [" + path + "]");
}
if (position < getFirstOperationOffset()) {
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "], generation: [" + getGeneration() + "], path: [" + path + "]");
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" +
getFirstOperationOffset() + "], generation: [" + getGeneration() + "], path: [" + path + "]");
}
Channels.readFromFileChannelWithEofException(channel, position, buffer);
}

View File

@ -128,7 +128,8 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy);
} catch (Exception exception) {
// if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that
// file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition
// file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation
// is an error condition
IOUtils.closeWhileHandlingException(channel);
throw exception;
}

View File

@ -45,8 +45,10 @@ import static org.hamcrest.Matchers.startsWith;
public class IndexingSlowLogTests extends ESTestCase {
public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException {
BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject());
ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder()
.startObject().field("foo", "bar").endObject());
ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1),
SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
"test", null, null, source, XContentType.JSON, null);
Index index = new Index("foo", "123");
// Turning off document logging doesn't log source[]
@ -68,7 +70,8 @@ public class IndexingSlowLogTests extends ESTestCase {
// Throwing a error if source cannot be converted
source = new BytesArray("invalid");
pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
pd = new ParsedDocument(new NumericDocValuesField("version", 1),
SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id",
"test", null, null, source, XContentType.JSON, null);
p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3);
@ -91,10 +94,12 @@ public class IndexingSlowLogTests extends ESTestCase {
IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY);
IndexingSlowLog log = new IndexingSlowLog(settings);
assertFalse(log.isReformat());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build()));
assertTrue(log.isReformat());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "false").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "false").build()));
assertFalse(log.isReformat());
settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY));
@ -107,7 +112,8 @@ public class IndexingSlowLogTests extends ESTestCase {
log = new IndexingSlowLog(settings);
assertTrue(log.isReformat());
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "NOT A BOOLEAN").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "NOT A BOOLEAN").build()));
fail();
} catch (IllegalArgumentException ex) {
final String expected = "illegal value can't update [index.indexing.slowlog.reformat] from [true] to [NOT A BOOLEAN]";
@ -115,7 +121,8 @@ public class IndexingSlowLogTests extends ESTestCase {
assertNotNull(ex.getCause());
assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class));
final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause();
assertThat(cause, hasToString(containsString("Failed to parse value [NOT A BOOLEAN] as only [true] or [false] are allowed.")));
assertThat(cause,
hasToString(containsString("Failed to parse value [NOT A BOOLEAN] as only [true] or [false] are allowed.")));
}
assertTrue(log.isReformat());
}
@ -130,14 +137,17 @@ public class IndexingSlowLogTests extends ESTestCase {
IndexingSlowLog log = new IndexingSlowLog(settings);
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
assertEquals(level, log.getLevel());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build()));
assertEquals(level, log.getLevel());
settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY));
@ -150,7 +160,8 @@ public class IndexingSlowLogTests extends ESTestCase {
log = new IndexingSlowLog(settings);
assertTrue(log.isReformat());
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), "NOT A LEVEL").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), "NOT A LEVEL").build()));
fail();
} catch (IllegalArgumentException ex) {
final String expected = "illegal value can't update [index.indexing.slowlog.level] from [TRACE] to [NOT A LEVEL]";
@ -178,7 +189,8 @@ public class IndexingSlowLogTests extends ESTestCase {
assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getIndexInfoThreshold());
assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getIndexWarnThreshold());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "120ms")
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "120ms")
.put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "220ms")
.put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "320ms")
.put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "420ms").build()));
@ -206,28 +218,36 @@ public class IndexingSlowLogTests extends ESTestCase {
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexInfoThreshold());
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexWarnThreshold());
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE")
.build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.trace");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE")
.build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.debug");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE")
.build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.info");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE")
.build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.warn");

View File

@ -40,11 +40,16 @@ public class MergePolicySettingsTests extends ESTestCase {
assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5));
assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger,
indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
assertThat(new MergePolicyConfig(logger,
indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0));
assertThat(new MergePolicyConfig(logger,
indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger,
indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger,
indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0));
}
@ -54,7 +59,8 @@ public class MergePolicySettingsTests extends ESTestCase {
}
public void testNoMerges() {
MergePolicyConfig mp = new MergePolicyConfig(logger, indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()));
MergePolicyConfig mp = new MergePolicyConfig(logger,
indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build()));
assertTrue(mp.getMergePolicy() instanceof NoMergePolicy);
}
@ -76,47 +82,81 @@ public class MergePolicySettingsTests extends ESTestCase {
public void testTieredMergePolicySettingsUpdate() throws IOException {
IndexSettings indexSettings = indexSettings(Settings.EMPTY);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(),
MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(),
MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(),
MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(),
MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0);
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(),
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(),
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(),
MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001);
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(),
new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(),
new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(),
MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(),
MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(),
MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(),
MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build()));
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0);
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () ->
indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build())));
indexSettings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build())));
final Throwable cause = exc.getCause();
assertThat(cause.getMessage(), containsString("must be <= 50.0"));
indexSettings.updateIndexMetaData(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(),
MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(),
new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(),
MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(),
new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(),
MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0);
assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(),
MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0);
}
public Settings build(String value) {

View File

@ -170,7 +170,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
SearchContext searchContext = createSearchContext(index);
SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery());
searchContext.request().source(source);
searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID, "my_id")));
searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null,
Collections.singletonMap(Task.X_OPAQUE_ID, "my_id")));
SearchSlowLog.SlowLogSearchContextPrinter p = new SearchSlowLog.SlowLogSearchContextPrinter(searchContext, 10);
assertThat(p.toString(), startsWith("[foo][0]"));
// Makes sure that output doesn't contain any new lines
@ -188,14 +189,17 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
SearchSlowLog log = new SearchSlowLog(settings);
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
assertEquals(level, log.getLevel());
level = randomFrom(SlowLogLevel.values());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
assertEquals(level, log.getLevel());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build()));
assertEquals(level, log.getLevel());
settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY));
@ -207,7 +211,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
settings = new IndexSettings(metaData, Settings.EMPTY);
log = new SearchSlowLog(settings);
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), "NOT A LEVEL").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), "NOT A LEVEL").build()));
fail();
} catch (IllegalArgumentException ex) {
final String expected = "illegal value can't update [index.search.slowlog.level] from [TRACE] to [NOT A LEVEL]";
@ -235,7 +240,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getQueryInfoThreshold());
assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getQueryWarnThreshold());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "120ms")
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "120ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "220ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "320ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "420ms").build()));
@ -263,28 +269,36 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryInfoThreshold());
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryWarnThreshold());
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder()
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.trace");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder()
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.debug");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder()
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.info");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder()
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.query.warn");
@ -306,7 +320,8 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getFetchInfoThreshold());
assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getFetchWarnThreshold());
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "120ms")
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "120ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "220ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "320ms")
.put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "420ms").build()));
@ -334,28 +349,36 @@ public class SearchSlowLogTests extends ESSingleNodeTestCase {
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchInfoThreshold());
assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchWarnThreshold());
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(),
"NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.trace");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(),
"NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.debug");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(),
"NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.info");
}
try {
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "NOT A TIME VALUE").build()));
settings.updateIndexMetaData(newIndexMeta("index",
Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(),
"NOT A TIME VALUE").build()));
fail();
} catch (IllegalArgumentException ex) {
assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.warn");

View File

@ -86,11 +86,12 @@ public class PreBuiltAnalyzerTests extends ESSingleNodeTestCase {
Version randomVersion = randomVersion(random());
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build();
NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX, randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get();
NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX,
randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get();
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field").field("type", "text").field("analyzer", analyzerName).endObject().endObject()
.endObject().endObject();
.startObject("properties").startObject("field").field("type", "text")
.field("analyzer", analyzerName).endObject().endObject().endObject().endObject();
MapperService mapperService = createIndex("test", indexSettings, "type", mapping).mapperService();
MappedFieldType fieldType = mapperService.fullName("field");

View File

@ -55,24 +55,32 @@ public class InternalEngineMergeIT extends ESIntegTestCase {
final int numDocs = scaledRandomIntBetween(100, 1000);
BulkRequestBuilder request = client().prepareBulk();
for (int j = 0; j < numDocs; ++j) {
request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++)).source(jsonBuilder().startObject().field("l", randomLong()).endObject()));
request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++))
.source(jsonBuilder().startObject().field("l", randomLong()).endObject()));
}
BulkResponse response = request.execute().actionGet();
refresh();
assertNoFailures(response);
IndicesStatsResponse stats = client().admin().indices().prepareStats("test").setSegments(true).setMerge(true).get();
logger.info("index round [{}] - segments {}, total merges {}, current merge {}", i, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
IndicesStatsResponse stats = client().admin().indices().prepareStats("test")
.setSegments(true).setMerge(true).get();
logger.info("index round [{}] - segments {}, total merges {}, current merge {}",
i, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
stats.getPrimaries().getMerge().getCurrent());
}
final long upperNumberSegments = 2 * numOfShards * 10;
awaitBusy(() -> {
IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get();
logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards,
stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
stats.getPrimaries().getMerge().getCurrent());
long current = stats.getPrimaries().getMerge().getCurrent();
long count = stats.getPrimaries().getSegments().getCount();
return count < upperNumberSegments && current == 0;
});
IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get();
logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent());
logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards,
stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
stats.getPrimaries().getMerge().getCurrent());
long count = stats.getPrimaries().getSegments().getCount();
assertThat(count, Matchers.lessThanOrEqualTo(upperNumberSegments));
}

View File

@ -113,7 +113,8 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
}
}
private static List<BooleanClause> getBooleanClauses(List<QueryBuilder> queryBuilders, BooleanClause.Occur occur, QueryShardContext context) throws IOException {
private static List<BooleanClause> getBooleanClauses(List<QueryBuilder> queryBuilders,
BooleanClause.Occur occur, QueryShardContext context) throws IOException {
List<BooleanClause> clauses = new ArrayList<>();
for (QueryBuilder query : queryBuilders) {
Query innerQuery = query.toQuery(context);
@ -184,11 +185,13 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
assertEquals(0, bq.getMinimumNumberShouldMatch());
// Filters have a minShouldMatch of 0/1
ConstantScoreQuery csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().must(termQuery("foo", "bar")))).toQuery(createShardContext());
ConstantScoreQuery csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery()
.must(termQuery("foo", "bar")))).toQuery(createShardContext());
bq = (BooleanQuery) csq.getQuery();
assertEquals(0, bq.getMinimumNumberShouldMatch());
csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().should(termQuery("foo", "bar")))).toQuery(createShardContext());
csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().should(termQuery("foo", "bar"))))
.toQuery(createShardContext());
bq = (BooleanQuery) csq.getQuery();
assertEquals(1, bq.getMinimumNumberShouldMatch());
}
@ -411,7 +414,8 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
boolQueryBuilder = new BoolQueryBuilder();
boolQueryBuilder.must(new TermQueryBuilder("foo","bar"));
boolQueryBuilder.filter(new BoolQueryBuilder().should(new TermQueryBuilder("foo","bar")).filter(new MatchNoneQueryBuilder()));
boolQueryBuilder.filter(new BoolQueryBuilder().should(new TermQueryBuilder("foo","bar"))
.filter(new MatchNoneQueryBuilder()));
rewritten = Rewriteable.rewrite(boolQueryBuilder, createShardContext());
assertEquals(new MatchNoneQueryBuilder(), rewritten);
}

View File

@ -33,7 +33,8 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase<BoostingQue
@Override
protected BoostingQueryBuilder doCreateTestQueryBuilder() {
BoostingQueryBuilder query = new BoostingQueryBuilder(RandomQueryBuilder.createQuery(random()), RandomQueryBuilder.createQuery(random()));
BoostingQueryBuilder query = new BoostingQueryBuilder(RandomQueryBuilder.createQuery(random()),
RandomQueryBuilder.createQuery(random()));
query.negativeBoost(2.0f / randomIntBetween(1, 20));
return query;
}
@ -90,8 +91,10 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase<BoostingQue
}
public void testRewrite() throws IOException {
QueryBuilder positive = randomBoolean() ? new MatchAllQueryBuilder() : new WrapperQueryBuilder(new TermQueryBuilder("pos", "bar").toString());
QueryBuilder negative = randomBoolean() ? new MatchAllQueryBuilder() : new WrapperQueryBuilder(new TermQueryBuilder("neg", "bar").toString());
QueryBuilder positive = randomBoolean() ? new MatchAllQueryBuilder() :
new WrapperQueryBuilder(new TermQueryBuilder("pos", "bar").toString());
QueryBuilder negative = randomBoolean() ? new MatchAllQueryBuilder() :
new WrapperQueryBuilder(new TermQueryBuilder("neg", "bar").toString());
BoostingQueryBuilder qb = new BoostingQueryBuilder(positive, negative);
QueryBuilder rewrite = qb.rewrite(createShardContext());
if (positive instanceof MatchAllQueryBuilder && negative instanceof MatchAllQueryBuilder) {

View File

@ -328,14 +328,16 @@ public class GeoDistanceQueryBuilderTests extends AbstractQueryTestCase<GeoDista
}
public void testIgnoreUnmapped() throws IOException {
final GeoDistanceQueryBuilder queryBuilder = new GeoDistanceQueryBuilder("unmapped").point(0.0, 0.0).distance("20m");
final GeoDistanceQueryBuilder queryBuilder =
new GeoDistanceQueryBuilder("unmapped").point(0.0, 0.0).distance("20m");
queryBuilder.ignoreUnmapped(true);
QueryShardContext shardContext = createShardContext();
Query query = queryBuilder.toQuery(shardContext);
assertThat(query, notNullValue());
assertThat(query, instanceOf(MatchNoDocsQuery.class));
final GeoDistanceQueryBuilder failingQueryBuilder = new GeoDistanceQueryBuilder("unmapped").point(0.0, 0.0).distance("20m");
final GeoDistanceQueryBuilder failingQueryBuilder =
new GeoDistanceQueryBuilder("unmapped").point(0.0, 0.0).distance("20m");
failingQueryBuilder.ignoreUnmapped(false);
QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(shardContext));
assertThat(e.getMessage(), containsString("failed to find geo_point field [unmapped]"));

View File

@ -233,7 +233,8 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLik
if (request.doc() != null) {
generatedFields = generateFields(randomFields, request.doc().utf8ToString());
} else {
generatedFields = generateFields(request.selectedFields().toArray(new String[request.selectedFields().size()]), request.id());
generatedFields =
generateFields(request.selectedFields().toArray(new String[request.selectedFields().size()]), request.id());
}
EnumSet<TermVectorsRequest.Flag> flags = EnumSet.of(TermVectorsRequest.Flag.Positions, TermVectorsRequest.Flag.Offsets);
response.setFields(generatedFields, request.selectedFields(), flags, generatedFields);
@ -284,20 +285,24 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLik
public void testValidateEmptyLike() {
String[] likeTexts = randomBoolean() ? null : new String[0];
Item[] likeItems = randomBoolean() ? null : new Item[0];
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MoreLikeThisQueryBuilder(likeTexts, likeItems));
IllegalArgumentException e =
expectThrows(IllegalArgumentException.class, () -> new MoreLikeThisQueryBuilder(likeTexts, likeItems));
assertThat(e.getMessage(), containsString("requires either 'like' texts or items to be specified"));
}
public void testUnsupportedFields() throws IOException {
String unsupportedField = randomFrom(INT_FIELD_NAME, DOUBLE_FIELD_NAME, DATE_FIELD_NAME);
MoreLikeThisQueryBuilder queryBuilder = new MoreLikeThisQueryBuilder(new String[] {unsupportedField}, new String[]{"some text"}, null)
MoreLikeThisQueryBuilder queryBuilder =
new MoreLikeThisQueryBuilder(new String[] {unsupportedField}, new String[]{"some text"}, null)
.failOnUnsupportedField(true);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> queryBuilder.toQuery(createShardContext()));
assertThat(e.getMessage(), containsString("more_like_this only supports text/keyword fields"));
}
public void testMoreLikeThisBuilder() throws Exception {
Query parsedQuery = parseQuery(moreLikeThisQuery(new String[]{"name.first", "name.last"}, new String[]{"something"}, null).minTermFreq(1).maxQueryTerms(12)).toQuery(createShardContext());
Query parsedQuery =
parseQuery(moreLikeThisQuery(new String[]{"name.first", "name.last"}, new String[]{"something"}, null)
.minTermFreq(1).maxQueryTerms(12)).toQuery(createShardContext());
assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class));
MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery;
assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first"));

View File

@ -102,7 +102,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
if (randomBoolean()) {
query.slop(randomIntBetween(0, 5));
}
if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() && (query.type() == Type.BEST_FIELDS || query.type() == Type.MOST_FIELDS)) {
if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() &&
(query.type() == Type.BEST_FIELDS || query.type() == Type.MOST_FIELDS)) {
query.fuzziness(randomFuzziness(fieldName));
}
if (randomBoolean()) {
@ -197,7 +198,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
}
public void testToQueryMultipleFieldsDisableDismax() throws Exception {
Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(false).toQuery(createShardContext());
Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(false)
.toQuery(createShardContext());
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
DisjunctionMaxQuery dQuery = (DisjunctionMaxQuery) query;
assertThat(dQuery.getTieBreakerMultiplier(), equalTo(1.0f));
@ -207,7 +209,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
}
public void testToQueryMultipleFieldsDisMaxQuery() throws Exception {
Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(true).toQuery(createShardContext());
Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(true)
.toQuery(createShardContext());
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query;
assertThat(disMaxQuery.getTieBreakerMultiplier(), equalTo(0.0f));
@ -230,8 +233,10 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
}
public void testToQueryFieldMissing() throws Exception {
assertThat(multiMatchQuery("test").field(MISSING_WILDCARD_FIELD_NAME).toQuery(createShardContext()), instanceOf(MatchNoDocsQuery.class));
assertThat(multiMatchQuery("test").field(MISSING_FIELD_NAME).toQuery(createShardContext()), instanceOf(MatchNoDocsQuery.class));
assertThat(multiMatchQuery("test").field(MISSING_WILDCARD_FIELD_NAME).toQuery(createShardContext()),
instanceOf(MatchNoDocsQuery.class));
assertThat(multiMatchQuery("test").field(MISSING_FIELD_NAME).toQuery(createShardContext()),
instanceOf(MatchNoDocsQuery.class));
}
public void testFromJson() throws IOException {
@ -333,7 +338,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
qb.fuzzyTranspositions(false);
Query query = qb.toQuery(createShardContext());
FuzzyQuery expected = new FuzzyQuery(new Term(STRING_FIELD_NAME, "text"), 2, 2, 5, false);
FuzzyQuery expected = new FuzzyQuery(new Term(STRING_FIELD_NAME, "text"), 2, 2,
5, false);
assertEquals(expected, query);
}
@ -346,8 +352,9 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
context.getIndexSettings().updateIndexMetaData(
newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build())
newIndexMeta("index", context.getIndexSettings().getSettings(),
Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5")
.build())
);
MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello");
@ -361,7 +368,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
assertEquals(expected, query);
context.getIndexSettings().updateIndexMetaData(
newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
newIndexMeta("index", context.getIndexSettings().getSettings(),
Settings.builder().putList("index.query.default_field",
STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build())
);
// should fail because lenient defaults to false
@ -440,7 +448,8 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
)
.toQuery(createShardContext());
expected = new BooleanQuery.Builder()
.add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), 0f), BooleanClause.Occur.SHOULD)
.add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()),
0f), BooleanClause.Occur.SHOULD)
.build();
assertEquals(expected, query);
}

View File

@ -70,7 +70,8 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase<SpanNotQuery
}
public void testDist() {
SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), new SpanTermQueryBuilder("name2", "value2"));
SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"),
new SpanTermQueryBuilder("name2", "value2"));
assertThat(builder.pre(), equalTo(0));
assertThat(builder.post(), equalTo(0));
builder.dist(-4);
@ -82,7 +83,8 @@ public class SpanNotQueryBuilderTests extends AbstractQueryTestCase<SpanNotQuery
}
public void testPrePost() {
SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), new SpanTermQueryBuilder("name2", "value2"));
SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"),
new SpanTermQueryBuilder("name2", "value2"));
assertThat(builder.pre(), equalTo(0));
assertThat(builder.post(), equalTo(0));
builder.pre(-4).post(-4);

View File

@ -138,7 +138,8 @@ public class FunctionScoreTests extends ESTestCase {
}
@Override
public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) {
public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode,
XFieldComparatorSource.Nested nested, boolean reverse) {
throw new UnsupportedOperationException(UNSUPPORTED);
}
@ -228,7 +229,8 @@ public class FunctionScoreTests extends ESTestCase {
}
@Override
public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) {
public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode,
XFieldComparatorSource.Nested nested, boolean reverse) {
throw new UnsupportedOperationException(UNSUPPORTED);
}
@ -246,11 +248,14 @@ public class FunctionScoreTests extends ESTestCase {
private static final ScoreFunction RANDOM_SCORE_FUNCTION = new RandomScoreFunction(0, 0, new IndexFieldDataStub());
private static final ScoreFunction FIELD_VALUE_FACTOR_FUNCTION = new FieldValueFactorFunction("test", 1,
FieldValueFactorFunction.Modifier.LN, 1.0, null);
private static final ScoreFunction GAUSS_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
private static final ScoreFunction GAUSS_DECAY_FUNCTION =
new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
GaussDecayFunctionBuilder.GAUSS_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX);
private static final ScoreFunction EXP_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
private static final ScoreFunction EXP_DECAY_FUNCTION =
new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
ExponentialDecayFunctionBuilder.EXP_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX);
private static final ScoreFunction LIN_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
private static final ScoreFunction LIN_DECAY_FUNCTION =
new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0,
LinearDecayFunctionBuilder.LINEAR_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX);
private static final ScoreFunction WEIGHT_FACTOR_FUNCTION = new WeightFactorFunction(4);
private static final String TEXT = "The way out is through.";
@ -319,7 +324,8 @@ public class FunctionScoreTests extends ESTestCase {
}
public Explanation getFunctionScoreExplanation(IndexSearcher searcher, ScoreFunction scoreFunction) throws IOException {
FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, CombineFunction.AVG,0.0f, 100);
FunctionScoreQuery functionScoreQuery =
new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, CombineFunction.AVG,0.0f, 100);
Weight weight = searcher.createWeight(searcher.rewrite(functionScoreQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1f);
Explanation explanation = weight.explain(searcher.getIndexReader().leaves().get(0), 0);
return explanation.getDetails()[1];
@ -370,7 +376,8 @@ public class FunctionScoreTests extends ESTestCase {
checkFiltersFunctionScoreExplanation(functionExplanation, "random score function (seed: 0, field: test)", 0);
assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails().length, equalTo(0));
checkFiltersFunctionScoreExplanation(functionExplanation, "field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 1);
checkFiltersFunctionScoreExplanation(functionExplanation,
"field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 1);
assertThat(functionExplanation.getDetails()[0].getDetails()[1].getDetails()[1].getDetails().length, equalTo(0));
checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 2);
@ -408,7 +415,8 @@ public class FunctionScoreTests extends ESTestCase {
filterFunctions[i] = new FunctionScoreQuery.FilterScoreFunction(
new TermQuery(TERM), scoreFunctions[i]);
}
return new FunctionScoreQuery(new TermQuery(TERM), scoreMode, filterFunctions, combineFunction,Float.MAX_VALUE * -1, Float.MAX_VALUE);
return new FunctionScoreQuery(new TermQuery(TERM),
scoreMode, filterFunctions, combineFunction,Float.MAX_VALUE * -1, Float.MAX_VALUE);
}
public void checkFiltersFunctionScoreExplanation(Explanation randomExplanation, String functionExpl, int whichFunction) {
@ -627,15 +635,19 @@ public class FunctionScoreTests extends ESTestCase {
float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat();
ScoreFunction function = new DummyScoreFunction(combineFunction);
FunctionScoreQuery q = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
FunctionScoreQuery q1 = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
FunctionScoreQuery q =
new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
FunctionScoreQuery q1 =
new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
assertEquals(q, q);
assertEquals(q.hashCode(), q.hashCode());
assertEquals(q, q1);
assertEquals(q.hashCode(), q1.hashCode());
FunctionScoreQuery diffQuery = new FunctionScoreQuery(new TermQuery(new Term("foo", "baz")), function, combineFunction, minScore, maxBoost);
FunctionScoreQuery diffMinScore = new FunctionScoreQuery(q.getSubQuery(), function, combineFunction, minScore == null ? 1.0f : null, maxBoost);
FunctionScoreQuery diffQuery =
new FunctionScoreQuery(new TermQuery(new Term("foo", "baz")), function, combineFunction, minScore, maxBoost);
FunctionScoreQuery diffMinScore =
new FunctionScoreQuery(q.getSubQuery(), function, combineFunction, minScore == null ? 1.0f : null, maxBoost);
ScoreFunction otherFunction = new DummyScoreFunction(combineFunction);
FunctionScoreQuery diffFunction = new FunctionScoreQuery(q.getSubQuery(), otherFunction, combineFunction, minScore, maxBoost);
FunctionScoreQuery diffMaxBoost = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")),
@ -666,10 +678,12 @@ public class FunctionScoreTests extends ESTestCase {
Float minScore = randomBoolean() ? null : 1.0f;
Float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat();
FilterScoreFunction function = new FilterScoreFunction(new TermQuery(new Term("filter", "query")), scoreFunction);
FilterScoreFunction function =
new FilterScoreFunction(new TermQuery(new Term("filter", "query")), scoreFunction);
FunctionScoreQuery q = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")),
function, combineFunction, minScore, maxBoost);
FunctionScoreQuery q1 = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
FunctionScoreQuery q1 =
new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost);
assertEquals(q, q);
assertEquals(q.hashCode(), q.hashCode());
assertEquals(q, q1);
@ -684,7 +698,8 @@ public class FunctionScoreTests extends ESTestCase {
function, combineFunction, minScore == null ? 0.9f : null, maxBoost);
FilterScoreFunction otherFunc = new FilterScoreFunction(new TermQuery(new Term("filter", "other_query")), scoreFunction);
FunctionScoreQuery diffFunc = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), randomFrom(ScoreMode.values()),
randomBoolean() ? new ScoreFunction[] { function, otherFunc } : new ScoreFunction[] { otherFunc }, combineFunction, minScore, maxBoost);
randomBoolean() ? new ScoreFunction[] { function, otherFunc } :
new ScoreFunction[] { otherFunc }, combineFunction, minScore, maxBoost);
FunctionScoreQuery[] queries = new FunctionScoreQuery[] {
diffQuery,

View File

@ -89,8 +89,10 @@ public class GeoUtilsTests extends ESTestCase {
double equatorialDistance = 2 * Math.PI * 6378137.0;
double polarDistance = Math.PI * 6356752.314245;
assertThat(GeoUtils.geoHashCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
assertThat(GeoUtils.geoHashCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 8, 2))));
assertThat(GeoUtils.geoHashCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
assertThat(GeoUtils.geoHashCellSize(1),
equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 8, 2))));
assertThat(GeoUtils.geoHashCellSize(2),
equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
assertThat(GeoUtils.geoHashCellSize(3),
equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 256, 2))));
assertThat(GeoUtils.geoHashCellSize(4),
@ -167,13 +169,20 @@ public class GeoUtilsTests extends ESTestCase {
public void testQuadTreeCellSize() {
double equatorialDistance = 2 * Math.PI * 6378137.0;
double polarDistance = Math.PI * 6356752.314245;
assertThat(GeoUtils.quadTreeCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
assertThat(GeoUtils.quadTreeCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 2, 2) + Math.pow(equatorialDistance / 2, 2))));
assertThat(GeoUtils.quadTreeCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 4, 2))));
assertThat(GeoUtils.quadTreeCellSize(3), equalTo(Math.sqrt(Math.pow(polarDistance / 8, 2) + Math.pow(equatorialDistance / 8, 2))));
assertThat(GeoUtils.quadTreeCellSize(4), equalTo(Math.sqrt(Math.pow(polarDistance / 16, 2) + Math.pow(equatorialDistance / 16, 2))));
assertThat(GeoUtils.quadTreeCellSize(5), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
assertThat(GeoUtils.quadTreeCellSize(6), equalTo(Math.sqrt(Math.pow(polarDistance / 64, 2) + Math.pow(equatorialDistance / 64, 2))));
assertThat(GeoUtils.quadTreeCellSize(0),
equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2))));
assertThat(GeoUtils.quadTreeCellSize(1),
equalTo(Math.sqrt(Math.pow(polarDistance / 2, 2) + Math.pow(equatorialDistance / 2, 2))));
assertThat(GeoUtils.quadTreeCellSize(2),
equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 4, 2))));
assertThat(GeoUtils.quadTreeCellSize(3),
equalTo(Math.sqrt(Math.pow(polarDistance / 8, 2) + Math.pow(equatorialDistance / 8, 2))));
assertThat(GeoUtils.quadTreeCellSize(4),
equalTo(Math.sqrt(Math.pow(polarDistance / 16, 2) + Math.pow(equatorialDistance / 16, 2))));
assertThat(GeoUtils.quadTreeCellSize(5),
equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2))));
assertThat(GeoUtils.quadTreeCellSize(6),
equalTo(Math.sqrt(Math.pow(polarDistance / 64, 2) + Math.pow(equatorialDistance / 64, 2))));
assertThat(GeoUtils.quadTreeCellSize(7),
equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 128, 2))));
assertThat(GeoUtils.quadTreeCellSize(8),
@ -423,7 +432,8 @@ public class GeoUtilsTests extends ESTestCase {
while (parser.currentToken() != Token.VALUE_STRING) {
parser.nextToken();
}
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false));
Exception e = expectThrows(ElasticsearchParseException.class,
() -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false));
assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]"));
}
}
@ -506,7 +516,8 @@ public class GeoUtilsTests extends ESTestCase {
public void testParseGeoPointExtraField() throws IOException {
double lat = 0.0;
double lon = 0.0;
XContentBuilder json = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("foo", true).endObject();
XContentBuilder json = jsonBuilder().startObject()
.field("lat", lat).field("lon", lon).field("foo", true).endObject();
try (XContentParser parser = createParser(json)) {
parser.nextToken();
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
@ -518,7 +529,8 @@ public class GeoUtilsTests extends ESTestCase {
double lat = 0.0;
double lon = 0.0;
String geohash = "abcd";
XContentBuilder json = jsonBuilder().startObject().field("lat", lat).field("lon", lon).field("geohash", geohash).endObject();
XContentBuilder json = jsonBuilder().startObject()
.field("lat", lat).field("lon", lon).field("geohash", geohash).endObject();
try (XContentParser parser = createParser(json)) {
parser.nextToken();
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
@ -536,7 +548,8 @@ public class GeoUtilsTests extends ESTestCase {
parser.nextToken();
}
Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser));
assertThat(e.getMessage(), is("Exception parsing coordinates: found Z value [0.0] but [ignore_z_value] parameter is [false]"));
assertThat(e.getMessage(),
is("Exception parsing coordinates: found Z value [0.0] but [ignore_z_value] parameter is [false]"));
}
}
@ -599,7 +612,8 @@ public class GeoUtilsTests extends ESTestCase {
assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
assertThat(GeoUtils.geoHashLevelsForPrecision(size), equalTo(geohashPrefixTree.getLevelForDistance(degrees)));
assertThat("width at level " + i, gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
assertThat("width at level " + i,
gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
assertThat("height at level " + i, gNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height
/ GeoUtils.EARTH_POLAR_DISTANCE));
@ -620,7 +634,8 @@ public class GeoUtilsTests extends ESTestCase {
assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height));
assertThat(GeoUtils.quadTreeLevelsForPrecision(size), equalTo(quadPrefixTree.getLevelForDistance(degrees)));
assertThat("width at level " + i, qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
assertThat("width at level " + i,
qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR));
assertThat("height at level " + i, qNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height
/ GeoUtils.EARTH_POLAR_DISTANCE));
@ -629,11 +644,16 @@ public class GeoUtilsTests extends ESTestCase {
}
public void testParseGeoPointGeohashPositions() throws IOException {
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.TOP_LEFT), new GeoPoint(42.890625, -71.71875));
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.TOP_RIGHT), new GeoPoint(42.890625, -71.3671875));
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.71484375, -71.71875));
assertNormalizedPoint(parseGeohash("drt5", GeoUtils.EffectivePoint.BOTTOM_RIGHT), new GeoPoint(42.71484375, -71.3671875));
assertNormalizedPoint(parseGeohash("drtk", GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.890625, -71.3671875));
assertNormalizedPoint(parseGeohash("drt5",
GeoUtils.EffectivePoint.TOP_LEFT), new GeoPoint(42.890625, -71.71875));
assertNormalizedPoint(parseGeohash("drt5",
GeoUtils.EffectivePoint.TOP_RIGHT), new GeoPoint(42.890625, -71.3671875));
assertNormalizedPoint(parseGeohash("drt5",
GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.71484375, -71.71875));
assertNormalizedPoint(parseGeohash("drt5",
GeoUtils.EffectivePoint.BOTTOM_RIGHT), new GeoPoint(42.71484375, -71.3671875));
assertNormalizedPoint(parseGeohash("drtk",
GeoUtils.EffectivePoint.BOTTOM_LEFT), new GeoPoint(42.890625, -71.3671875));
}
private GeoPoint parseGeohash(String geohash, GeoUtils.EffectivePoint effectivePoint) throws IOException {

View File

@ -221,8 +221,10 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD
IndexSearcher searcher = new IndexSearcher(directoryReader);
Query parentFilter = new TermQuery(new Term("__type", "parent"));
Query childFilter = Queries.not(parentFilter);
XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
XFieldComparatorSource nestedComparatorSource =
createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
ToParentBlockJoinQuery query =
new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopFieldDocs topDocs = searcher.search(query, 5, sort);
@ -255,7 +257,8 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD
assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
childFilter = new TermQuery(new Term("filter_1", "T"));
nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
nestedComparatorSource =
createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter));
query = new ToParentBlockJoinQuery(
new ConstantScoreQuery(childFilter),
new QueryBitSetProducer(parentFilter),
@ -291,7 +294,8 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD
assertThat(topDocs.scoreDocs[4].doc, equalTo(3));
assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9));
nestedComparatorSource = createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter));
nestedComparatorSource =
createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter));
sort = new Sort(new SortField("field2", nestedComparatorSource, true));
topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
assertThat(topDocs.totalHits.value, equalTo(8L));
@ -307,7 +311,8 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD
assertThat(topDocs.scoreDocs[4].doc, equalTo(7));
assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(8));
nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
nestedComparatorSource =
createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
sort = new Sort(new SortField("field2", nestedComparatorSource));
topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort);
assertThat(topDocs.totalHits.value, equalTo(8L));
@ -332,8 +337,10 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException {
MultiValueMode sortMode = MultiValueMode.AVG;
Query childFilter = Queries.not(parentFilter);
XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
XFieldComparatorSource nestedComparatorSource =
createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
Query query =
new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopDocs topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));
@ -352,6 +359,7 @@ public abstract class AbstractNumberNestedSortingTestCase extends AbstractFieldD
protected abstract IndexableField createField(String name, int value);
protected abstract IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested);
protected abstract IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
Object missingValue, Nested nested);
}

View File

@ -51,7 +51,8 @@ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTestCas
}
@Override
protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
Object missingValue, Nested nested) {
IndexNumericFieldData fieldData = getForField(fieldName);
return new DoubleValuesComparatorSource(fieldData, missingValue, sortMode, nested);
}
@ -65,8 +66,10 @@ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTestCas
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException {
MultiValueMode sortMode = MultiValueMode.AVG;
Query childFilter = Queries.not(parentFilter);
XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127,
createNested(searcher, parentFilter, childFilter));
Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter),
new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopDocs topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));

View File

@ -51,7 +51,8 @@ public class FloatNestedSortingTests extends DoubleNestedSortingTests {
}
@Override
protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
Object missingValue, Nested nested) {
IndexNumericFieldData fieldData = getForField(fieldName);
return new FloatValuesComparatorSource(fieldData, missingValue, sortMode, nested);
}
@ -61,11 +62,14 @@ public class FloatNestedSortingTests extends DoubleNestedSortingTests {
return new SortedNumericDocValuesField(name, NumericUtils.floatToSortableInt(value));
}
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher,
IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException {
MultiValueMode sortMode = MultiValueMode.AVG;
Query childFilter = Queries.not(parentFilter);
XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
XFieldComparatorSource nestedComparatorSource =
createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter));
Query query =
new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopDocs topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));

View File

@ -34,7 +34,8 @@ public class LongNestedSortingTests extends AbstractNumberNestedSortingTestCase
}
@Override
protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) {
protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode,
Object missingValue, Nested nested) {
IndexNumericFieldData fieldData = getForField(fieldName);
return new LongValuesComparatorSource(fieldData, missingValue, sortMode, nested);
}

View File

@ -129,7 +129,8 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
searcher.getIndexReader().close();
}
private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData<?> indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException {
private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData<?> indexFieldData, String missingValue,
MultiValueMode sortMode, int n, boolean reverse) throws IOException {
Query parentFilter = new TermQuery(new Term("__type", "parent"));
Query childFilter = new TermQuery(new Term("__type", "child"));
SortField sortField = indexFieldData.sortField(missingValue, sortMode, createNested(searcher, parentFilter, childFilter), reverse);
@ -299,8 +300,10 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
PagedBytesIndexFieldData indexFieldData = getForField("field2");
Query parentFilter = new TermQuery(new Term("__type", "parent"));
Query childFilter = Queries.not(parentFilter);
BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
BytesRefFieldComparatorSource nestedComparatorSource =
new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
ToParentBlockJoinQuery query =
new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None);
Sort sort = new Sort(new SortField("field2", nestedComparatorSource));
TopFieldDocs topDocs = searcher.search(query, 5, sort);
@ -318,7 +321,8 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("i"));
sortMode = MultiValueMode.MAX;
nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
nestedComparatorSource =
new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
sort = new Sort(new SortField("field2", nestedComparatorSource, true));
topDocs = searcher.search(query, 5, sort);
assertThat(topDocs.totalHits.value, equalTo(7L));
@ -339,7 +343,8 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
bq.add(parentFilter, Occur.MUST_NOT);
bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST);
childFilter = bq.build();
nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
nestedComparatorSource =
new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter));
query = new ToParentBlockJoinQuery(
new ConstantScoreQuery(childFilter),
new QueryBitSetProducer(parentFilter),
@ -707,7 +712,8 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
.setFilter(queryBuilder)
.setNestedSort(new NestedSortBuilder("chapters.paragraphs"))
);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("2"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(76L));
@ -715,7 +721,8 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(87L));
sortBuilder.order(SortOrder.DESC);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
@ -735,7 +742,8 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
.setFilter(new RangeQueryBuilder("chapters.paragraphs.word_count").from(80L))
)
);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));
@ -743,7 +751,8 @@ public class NestedSortingTests extends AbstractFieldDataTestCase {
assertThat(((FieldDoc) topFields.scoreDocs[1]).fields[0], equalTo(Long.MAX_VALUE));
sortBuilder.order(SortOrder.DESC);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None), sortBuilder, queryShardContext, searcher);
topFields = search(new NestedQueryBuilder("chapters", queryBuilder, ScoreMode.None),
sortBuilder, queryShardContext, searcher);
assertThat(topFields.totalHits.value, equalTo(2L));
assertThat(searcher.doc(topFields.scoreDocs[0].doc).get("_id"), equalTo("4"));
assertThat(((FieldDoc) topFields.scoreDocs[0]).fields[0], equalTo(87L));

View File

@ -227,10 +227,13 @@ public class IndexShardIT extends ESSingleNodeTestCase {
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 200)));
IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400)
assertEquals(200,
indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
client().admin().indices().prepareUpdateSettings("test")
.setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400)
.build()).get();
assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
assertEquals(400,
indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
}
public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
@ -242,7 +245,8 @@ public class IndexShardIT extends ESSingleNodeTestCase {
.build();
createIndex("test", idxSettings);
ensureGreen("test");
client().prepareIndex("test", "bar", "1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
client().prepareIndex("test", "bar", "1")
.setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
SearchResponse response = client().prepareSearch("test").get();
assertHitCount(response, 1L);
client().admin().indices().prepareDelete("test").get();
@ -513,7 +517,8 @@ public class IndexShardIT extends ESSingleNodeTestCase {
IndexShard shard = indexService.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
client().prepareDelete("test", "test", "0").get();
client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).get();
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {};
shard.close("simon says", false);
@ -579,14 +584,17 @@ public class IndexShardIT extends ESSingleNodeTestCase {
.setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)).get();
// Generate a couple of segments
client().prepareIndex("test", "_doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).get();
client().prepareIndex("test", "_doc", "1")
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).get();
// Use routing so 2 documents are guaranteed to be on the same shard
String routing = randomAlphaOfLength(5);
client().prepareIndex("test", "_doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
client().prepareIndex("test", "_doc", "3").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
client().prepareIndex("test", "_doc", "2")
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
client().prepareIndex("test", "_doc", "3")
.setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
checkAccountingBreaker();
// Test that force merging causes the breaker to be correctly adjusted
@ -600,7 +608,8 @@ public class IndexShardIT extends ESSingleNodeTestCase {
// Test that we're now above the parent limit due to the segments
Exception e = expectThrows(Exception.class,
() -> client().prepareSearch("test").addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get());
() -> client().prepareSearch("test")
.addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get());
logger.info("--> got: {}", ExceptionsHelper.detailedMessage(e));
assertThat(ExceptionsHelper.detailedMessage(e), containsString("[parent] Data too large, data for [<agg [foo_terms]>]"));
@ -631,9 +640,10 @@ public class IndexShardIT extends ESSingleNodeTestCase {
CircuitBreakerService cbs, IndexingOperationListener... listeners) throws IOException {
ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(),
shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), indexService.similarityService(),
shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners), () -> {}, cbs);
shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(),
indexService.similarityService(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners),
() -> {}, cbs);
return newShard;
}
@ -739,7 +749,9 @@ public class IndexShardIT extends ESSingleNodeTestCase {
}
public void testGlobalCheckpointListeners() throws Exception {
createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build());
createIndex("test", Settings.builder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 0).build());
ensureGreen();
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexService test = indicesService.indexService(resolveIndex("test"));
@ -785,7 +797,9 @@ public class IndexShardIT extends ESSingleNodeTestCase {
}
public void testGlobalCheckpointListenerTimeout() throws InterruptedException {
createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build());
createIndex("test", Settings.builder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 0).build());
ensureGreen();
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
final IndexService test = indicesService.indexService(resolveIndex("test"));
@ -812,7 +826,9 @@ public class IndexShardIT extends ESSingleNodeTestCase {
}
public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Exception {
createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)
createIndex("test", Settings.builder()
.put("index.number_of_shards", 1)
.put("index.number_of_replicas", 0)
.put("index.refresh_interval", -1).build());
ensureGreen();
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
@ -838,7 +854,8 @@ public class IndexShardIT extends ESSingleNodeTestCase {
}
shard.refresh("test");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs, (long) searcher.reader().numDocs(), equalTo(numDocs + moreDocs));
assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs,
(long) searcher.reader().numDocs(), equalTo(numDocs + moreDocs));
}
assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs,
client().search(countRequest).actionGet().getHits().totalHits, equalTo(numDocs + moreDocs));

View File

@ -613,7 +613,8 @@ public class IndexShardTests extends IndexShardTestCase {
fail();
}
},
ThreadPool.Names.WRITE, "")).getMessage(), containsString("in primary mode cannot be a replication target"));
ThreadPool.Names.WRITE, "")).getMessage(),
containsString("in primary mode cannot be a replication target"));
}
closeShards(indexShard);
@ -720,7 +721,8 @@ public class IndexShardTests extends IndexShardTestCase {
assertTrue(onFailure.get());
assertThat(onFailureException.get(), instanceOf(IllegalStateException.class));
assertThat(
onFailureException.get(), hasToString(containsString("operation primary term [" + (primaryTerm - 1) + "] is too old")));
onFailureException.get(),
hasToString(containsString("operation primary term [" + (primaryTerm - 1) + "] is too old")));
}
{
@ -869,7 +871,8 @@ public class IndexShardTests extends IndexShardTestCase {
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build();
final IndexMetaData.Builder indexMetadata = IndexMetaData.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1);
final IndexMetaData.Builder indexMetadata =
IndexMetaData.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1);
final AtomicBoolean synced = new AtomicBoolean();
final IndexShard primaryShard =
newShard(shardRouting, indexMetadata.build(), null, new InternalEngineFactory(), () -> synced.set(true));
@ -1237,7 +1240,8 @@ public class IndexShardTests extends IndexShardTestCase {
allowShardFailures();
final ShardId shardId = new ShardId("index", "_na_", 0);
final ShardRouting shardRouting =
newShardRouting(shardId, "node", true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
newShardRouting(shardId, "node", true,
ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
@ -1771,7 +1775,8 @@ public class IndexShardTests extends IndexShardTestCase {
for (int i = 0; i < totalOps; i++) {
indexDoc(primarySource, "_doc", Integer.toString(i));
}
IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1));
IndexShardTestCase.updateRoutingEntry(primarySource,
primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1));
final IndexShard primaryTarget = newShard(primarySource.routingEntry().getTargetRelocatingShard());
updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetaData());
recoverReplica(primaryTarget, primarySource, true);
@ -1794,7 +1799,8 @@ public class IndexShardTests extends IndexShardTestCase {
updateMappings(otherShard, shard.indexSettings().getIndexMetaData());
SourceToParse sourceToParse = SourceToParse.source(shard.shardId().getIndexName(), "_doc", "1",
new BytesArray("{}"), XContentType.JSON);
otherShard.applyIndexOperationOnReplica(1, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
otherShard.applyIndexOperationOnReplica(1, 1,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
final ShardRouting primaryShardRouting = shard.routingEntry();
IndexShard newShard = reinitShard(otherShard, ShardRoutingHelper.initWithSameId(primaryShardRouting,
@ -1880,7 +1886,8 @@ public class IndexShardTests extends IndexShardTestCase {
assertTrue(ex.getMessage().contains("failed to fetch index version after copying it over"));
}
routing = ShardRoutingHelper.moveToUnassigned(routing, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so"));
routing = ShardRoutingHelper.moveToUnassigned(routing,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so"));
routing = ShardRoutingHelper.initialize(routing, newShard.routingEntry().currentNodeId());
assertTrue("it's already recovering, we should ignore new ones", newShard.ignoreRecoveryAttempt());
try {
@ -2019,7 +2026,8 @@ public class IndexShardTests extends IndexShardTestCase {
shard.refresh("test");
try (Engine.GetResult getResult = shard
.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
.get(new Engine.Get(false, false, "test", "1",
new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
assertTrue(getResult.exists());
assertNotNull(getResult.searcher());
}
@ -2060,7 +2068,8 @@ public class IndexShardTests extends IndexShardTestCase {
assertEquals(search.totalHits.value, 1);
}
try (Engine.GetResult getResult = newShard
.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
.get(new Engine.Get(false, false, "test", "1",
new Term(IdFieldMapper.NAME, Uid.encodeId("1"))))) {
assertTrue(getResult.exists());
assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader
assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader);
@ -2476,7 +2485,8 @@ public class IndexShardTests extends IndexShardTestCase {
}
assertThat(requestedMappingUpdates, hasKey("_doc"));
assertThat(requestedMappingUpdates.get("_doc").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}"));
assertThat(requestedMappingUpdates.get("_doc").get().source().string(),
equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}"));
closeShards(sourceShard, targetShard);
}
@ -2843,7 +2853,8 @@ public class IndexShardTests extends IndexShardTestCase {
.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum")))
.build();
final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData,
null, null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER);
null, null, indexShard.engineFactory,
indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER);
Store.MetadataSnapshot storeFileMetaDatas = newShard.snapshotStoreMetadata();
assertTrue("at least 2 files, commit and data: " + storeFileMetaDatas.toString(), storeFileMetaDatas.size() > 1);
@ -2913,7 +2924,8 @@ public class IndexShardTests extends IndexShardTestCase {
final String id = Integer.toString(i);
SourceToParse sourceToParse = SourceToParse.source(indexShard.shardId().getIndexName(), "_doc", id,
new BytesArray("{}"), XContentType.JSON);
indexShard.applyIndexOperationOnReplica(i, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
indexShard.applyIndexOperationOnReplica(i, 1,
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, sourceToParse);
if (!gap && i == localCheckpoint + 1) {
localCheckpoint++;
}
@ -3346,14 +3358,16 @@ public class IndexShardTests extends IndexShardTestCase {
.putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
.settings(settings)
.primaryTerm(0, 1).build();
ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState
.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
ShardRouting shardRouting =
TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true,
ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE);
final ShardId shardId = shardRouting.shardId();
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
AtomicBoolean markedInactive = new AtomicBoolean();
AtomicReference<IndexShard> primaryRef = new AtomicReference<>();
IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, new InternalEngineFactory(), () -> {
IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null,
new InternalEngineFactory(), () -> {
}, new IndexEventListener() {
@Override
public void onShardInactive(IndexShard indexShard) {

View File

@ -43,8 +43,10 @@ public class ShardPathTests extends ESTestCase {
ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0);
Path[] paths = env.availableShardPaths(shardId);
Path path = randomFrom(paths);
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(true, "0xDEADBEEF", AllocationId.newInitializing()), path);
ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(true, "0xDEADBEEF",
AllocationId.newInitializing()), path);
ShardPath shardPath =
ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
assertEquals(path, shardPath.getDataPath());
assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID());
assertEquals("foo", shardPath.getShardId().getIndexName());
@ -87,7 +89,8 @@ public class ShardPathTests extends ESTestCase {
public void testIllegalCustomDataPath() {
Index index = new Index("foo", "foo");
final Path path = createTempDir().resolve(index.getUUID()).resolve("0");
Exception e = expectThrows(IllegalArgumentException.class, () -> new ShardPath(true, path, path, new ShardId(index, 0)));
Exception e = expectThrows(IllegalArgumentException.class, () ->
new ShardPath(true, path, path, new ShardId(index, 0)));
assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths"));
}

View File

@ -146,7 +146,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
// no translog based flush - it might change the .liv / segments.N files
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
));
ensureGreen();
disableAllocation("test");
@ -222,7 +223,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
};
for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
for (MockIndexEventListener.TestEventListener eventListener :
internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
eventListener.setNewDelegate(listener);
}
try {
@ -230,7 +232,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
latch.await();
assertThat(exception, empty());
} finally {
for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
for (MockIndexEventListener.TestEventListener eventListener :
internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) {
eventListener.setNewDelegate(null);
}
}
@ -248,7 +251,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0")
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
// no translog based flush - it might change the .liv / segments.N files
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
));
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
@ -284,7 +288,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
assertThat(response.getStatus(), is(ClusterHealthStatus.RED));
ClusterState state = client().admin().cluster().prepareState().get().getState();
GroupShardsIterator<ShardIterator> shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
GroupShardsIterator<ShardIterator> shardIterators =
state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
for (ShardIterator iterator : shardIterators) {
ShardRouting routing;
while ((routing = iterator.nextOrNull()) != null) {
@ -338,8 +343,10 @@ public class CorruptedFileIT extends ESIntegTestCase {
final AtomicBoolean corrupt = new AtomicBoolean(true);
final CountDownLatch hasCorrupted = new CountDownLatch(1);
for (NodeStats dataNode : dataNodeStats) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> {
MockTransportService mockTransportService =
((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
(connection, requestId, action, request, options) -> {
if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes;
@ -353,7 +360,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
Settings build = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
.put("index.routing.allocation.include._name", primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build();
.put("index.routing.allocation.include._name",
primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
client().admin().cluster().prepareReroute().get();
hasCorrupted.await();
@ -369,7 +377,9 @@ public class CorruptedFileIT extends ESIntegTestCase {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
if (cluster().numDataNodes() < 3) {
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
internalCluster().startNode(Settings.builder()
.put(Node.NODE_DATA_SETTING.getKey(), true)
.put(Node.NODE_MASTER_SETTING.getKey(), false));
}
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
List<NodeStats> dataNodeStats = new ArrayList<>();
@ -406,14 +416,17 @@ public class CorruptedFileIT extends ESIntegTestCase {
assertHitCount(countResponse, numDocs);
final boolean truncate = randomBoolean();
for (NodeStats dataNode : dataNodeStats) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> {
MockTransportService mockTransportService =
((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()),
(connection, requestId, action, request, options) -> {
if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
if (truncate && req.length() > 1) {
BytesRef bytesRef = req.content().toBytesRef();
BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(),
array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
} else {
assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!";
final byte[] array = req.content().toBytesRef().bytes;
@ -466,12 +479,16 @@ public class CorruptedFileIT extends ESIntegTestCase {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
assertAcked(prepareCreate("test").setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files
));
assertAcked(
prepareCreate("test").setSettings(Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
// no checkindex - we corrupt shards on purpose
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
// no translog based flush - it might change the .liv / segments.N files
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
)
);
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
@ -496,10 +513,11 @@ public class CorruptedFileIT extends ESIntegTestCase {
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
logger.info("--> snapshot");
final CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
.setWaitForCompletion(true)
.setIndices("test")
.get();
final CreateSnapshotResponse createSnapshotResponse =
client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
.setWaitForCompletion(true)
.setIndices("test")
.get();
final SnapshotState snapshotState = createSnapshotResponse.getSnapshotInfo().state();
logger.info("--> snapshot terminated with state " + snapshotState);
final List<Path> files = listShardFiles(shardRouting);
@ -623,7 +641,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
Set<Path> files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
for (FsInfo.Path info : nodeStatses.getNodes().get(0).getFs()) {
String path = info.getPath();
Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
Path file = PathUtils.get(path)
.resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
if (Files.exists(file)) { // multi data path might only have one path in use
try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
for (Path item : stream) {
@ -666,8 +685,16 @@ public class CorruptedFileIT extends ESIntegTestCase {
final String newSegmentName = IndexFileNames.parseSegmentName(current.getFileName().toString());
final String oldSegmentName = IndexFileNames.parseSegmentName(last.getFileName().toString());
if (newSegmentName.equals(oldSegmentName)) {
int oldGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(last.getFileName().toString())).replace("_", ""), Character.MAX_RADIX);
int newGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(current.getFileName().toString())).replace("_", ""), Character.MAX_RADIX);
int oldGen =
Integer.parseInt(
IndexFileNames.stripExtension(
IndexFileNames.stripSegmentName(last.getFileName().toString())).replace("_", ""),
Character.MAX_RADIX
);
int newGen = Integer.parseInt(
IndexFileNames.stripExtension(
IndexFileNames.stripSegmentName(current.getFileName().toString())).replace("_", ""),
Character.MAX_RADIX);
if (newGen > oldGen) {
files.remove(last);
} else {

View File

@ -91,7 +91,8 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
// Restart the single node
internalCluster().fullRestart();
client().admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get();
client().admin().cluster().prepareHealth().setWaitForYellowStatus().
setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get();
try {
client().prepareSearch("test").setQuery(matchAllQuery()).get();
@ -130,13 +131,15 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
/** Disables translog flushing for the specified index */
private static void disableTranslogFlush(String index) {
Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).build();
Settings settings = Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).build();
client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
}
/** Enables translog flushing for the specified index */
private static void enableTranslogFlush(String index) {
Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build();
Settings settings = Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build();
client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get();
}
}

View File

@ -60,7 +60,8 @@ public class IndexStoreTests extends ESTestCase {
}
Settings settings = settingsBuilder.build();
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings);
FsDirectoryService service = new FsDirectoryService(indexSettings, null, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0)));
FsDirectoryService service = new FsDirectoryService(indexSettings, null,
new ShardPath(false, tempDir, tempDir, new ShardId(index, 0)));
try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) {
switch (type) {
case NIOFS:

View File

@ -193,9 +193,10 @@ public class StoreTests extends ESTestCase {
public void testVerifyingIndexOutputOnEmptyFile() throws IOException {
Directory dir = newDirectory();
IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0),
MIN_SUPPORTED_LUCENE_VERSION),
dir.createOutput("foo1.bar", IOContext.DEFAULT));
IndexOutput verifyingOutput =
new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0),
MIN_SUPPORTED_LUCENE_VERSION),
dir.createOutput("foo1.bar", IOContext.DEFAULT));
try {
Store.verify(verifyingOutput);
fail("should be a corrupted index");
@ -296,13 +297,15 @@ public class StoreTests extends ESTestCase {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// set default codec - all segments need checksums
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(),
new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
int docs = 1 + random().nextInt(100);
for (int i = 0; i < docs; i++) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body",
TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
@ -311,7 +314,8 @@ public class StoreTests extends ESTestCase {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body",
TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
@ -459,10 +463,13 @@ public class StoreTests extends ESTestCase {
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException {
for (String file : store.directory().listAll()) {
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) {
assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) &&
!IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) {
assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " +
store.directory().listAll().length, metadata.asMap().containsKey(file));
} else {
assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " +
store.directory().listAll().length, metadata.asMap().containsKey(file));
}
}
}
@ -473,7 +480,8 @@ public class StoreTests extends ESTestCase {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body",
TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
@ -595,14 +603,17 @@ public class StoreTests extends ESTestCase {
Store.MetadataSnapshot newCommitMetaData = store.getMetadata(null);
Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata);
if (delFile != null) {
assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
assertThat(newCommitDiff.identical.size(),
equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment
assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different
assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv"));
assertThat(newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment
} else {
assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
assertThat(newCommitDiff.identical.size(),
equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment
assertThat(newCommitDiff.different.size(), equalTo(0));
assertThat(newCommitDiff.missing.size(), equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different
assertThat(newCommitDiff.missing.size(),
equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different
}
deleteContent(store.directory());
@ -613,7 +624,8 @@ public class StoreTests extends ESTestCase {
final ShardId shardId = new ShardId("index", "_na_", 1);
Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId));
// this time random codec....
IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
IndexWriterConfig indexWriterConfig =
newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec());
// we keep all commits and that allows us clean based on multiple snapshots
indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE);
IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig);
@ -626,7 +638,8 @@ public class StoreTests extends ESTestCase {
}
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body",
TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
@ -635,7 +648,8 @@ public class StoreTests extends ESTestCase {
writer.commit();
Document doc = new Document();
doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body",
TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
writer.addDocument(doc);
}
@ -647,7 +661,8 @@ public class StoreTests extends ESTestCase {
if (random().nextBoolean()) {
Document doc = new Document();
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body",
TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
writer.updateDocument(new Term("id", "" + i), doc);
}
}
@ -695,7 +710,8 @@ public class StoreTests extends ESTestCase {
}
public void testOnCloseCallback() throws IOException {
final ShardId shardId = new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100));
final ShardId shardId =
new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100));
final AtomicInteger count = new AtomicInteger(0);
final ShardLock lock = new DummyShardLock(shardId);
@ -797,8 +813,10 @@ public class StoreTests extends ESTestCase {
}
protected Store.MetadataSnapshot createMetaDataSnapshot() {
StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
StoreFileMetaData storeFileMetaData1 =
new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
StoreFileMetaData storeFileMetaData2 =
new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION);
Map<String, StoreFileMetaData> storeFileMetaDataMap = new HashMap<>();
storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1);
storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2);
@ -839,7 +857,9 @@ public class StoreTests extends ESTestCase {
public void testStreamStoreFilesMetaData() throws Exception {
Store.MetadataSnapshot metadataSnapshot = createMetaDataSnapshot();
TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData = new TransportNodesListShardStoreMetaData.StoreFilesMetaData(new ShardId("test", "_na_", 0),metadataSnapshot);
TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData =
new TransportNodesListShardStoreMetaData.StoreFilesMetaData(new ShardId("test", "_na_", 0),
metadataSnapshot);
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
org.elasticsearch.Version targetNodeVersion = randomVersion(random());
@ -848,7 +868,8 @@ public class StoreTests extends ESTestCase {
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput in = new InputStreamStreamInput(inBuffer);
in.setVersion(targetNodeVersion);
TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData = TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in);
TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData =
TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in);
Iterator<StoreFileMetaData> outFiles = outStoreFileMetaData.iterator();
for (StoreFileMetaData inFile : inStoreFileMetaData) {
assertThat(inFile.name(), equalTo(outFiles.next().name()));
@ -867,7 +888,8 @@ public class StoreTests extends ESTestCase {
for (int i = 0; i < numDocs; i++) {
Document doc = new Document();
doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new TextField("body",
TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
docs.add(doc);
}
@ -893,7 +915,8 @@ public class StoreTests extends ESTestCase {
// expected
}
assertTrue(store.isMarkedCorrupted());
Lucene.cleanLuceneIndex(store.directory()); // we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call
// we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call
Lucene.cleanLuceneIndex(store.directory());
store.close();
}
@ -962,7 +985,8 @@ public class StoreTests extends ESTestCase {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid +"] caused by: CorruptIndexException[foo (resource=bar)]"));
assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid +
"] caused by: CorruptIndexException[foo (resource=bar)]"));
assertTrue(e.getMessage().contains(ExceptionsHelper.stackTrace(exception)));
}
@ -977,7 +1001,8 @@ public class StoreTests extends ESTestCase {
store.failIfCorrupted();
fail("should be corrupted");
} catch (CorruptIndexException e) {
assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid + "] caused by: CorruptIndexException[foo (resource=bar)]"));
assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid +
"] caused by: CorruptIndexException[foo (resource=bar)]"));
assertFalse(e.getMessage().contains(ExceptionsHelper.stackTrace(exception)));
}

View File

@ -524,7 +524,8 @@ public class TranslogTests extends ESTestCase {
}
public void testTotalTests() {
final TranslogStats total = new TranslogStats(0, 0, 0, 0, 1);
final TranslogStats total =
new TranslogStats(0, 0, 0, 0, 1);
final int n = randomIntBetween(0, 16);
final List<TranslogStats> statsList = new ArrayList<>(n);
for (int i = 0; i < n; i++) {
@ -552,21 +553,27 @@ public class TranslogTests extends ESTestCase {
}
public void testNegativeNumberOfOperations() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1, 1, 1, 1));
IllegalArgumentException e =
expectThrows(IllegalArgumentException.class,
() -> new TranslogStats(-1, 1, 1, 1, 1));
assertThat(e, hasToString(containsString("numberOfOperations must be >= 0")));
e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, -1, 1, 1));
e = expectThrows(IllegalArgumentException.class,
() -> new TranslogStats(1, 1, -1, 1, 1));
assertThat(e, hasToString(containsString("uncommittedOperations must be >= 0")));
}
public void testNegativeSizeInBytes() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1, 1));
IllegalArgumentException e =
expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1, 1));
assertThat(e, hasToString(containsString("translogSizeInBytes must be >= 0")));
e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, -1, 1));
e = expectThrows(IllegalArgumentException.class,
() -> new TranslogStats(1, 1, 1, -1, 1));
assertThat(e, hasToString(containsString("uncommittedSizeInBytes must be >= 0")));
}
public void testOldestEntryInSeconds() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, 1, -1));
IllegalArgumentException e =
expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, 1, -1));
assertThat(e, hasToString(containsString("earliestLastModifiedAge must be >= 0")));
}
@ -687,7 +694,8 @@ public class TranslogTests extends ESTestCase {
List<Long> batch = LongStream.rangeClosed(0, between(0, 100)).boxed().collect(Collectors.toList());
Randomness.shuffle(batch);
for (long seqNo : batch) {
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1});
Translog.Index op =
new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1});
translog.add(op);
}
translog.rollGeneration();
@ -767,7 +775,8 @@ public class TranslogTests extends ESTestCase {
final CountDownLatch downLatch = new CountDownLatch(1);
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
threads[i] =
new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
threads[i].setDaemon(true);
threads[i].start();
}
@ -832,7 +841,9 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))));
locations.add(
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))
);
}
translog.close();
@ -859,7 +870,9 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))));
locations.add(translog.add(
new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))
);
}
translog.sync();
@ -1118,13 +1131,16 @@ public class TranslogTests extends ESTestCase {
for (int op = 0; op < translogOperations; op++) {
int seqNo = ++count;
final Translog.Location location =
translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(),
Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
assertTrue("at least one operation pending", translog.syncNeeded());
assertTrue("this operation has not been synced", translog.ensureSynced(location));
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
// we are the last location so everything should be synced
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded());
seqNo = ++count;
translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(),
Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
assertTrue("one pending operation", translog.syncNeeded());
assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now
assertTrue("we only synced a previous operation yet", translog.syncNeeded());
@ -1153,17 +1169,20 @@ public class TranslogTests extends ESTestCase {
rollAndCommit(translog); // do this first so that there is at least one pending tlog entry
}
final Translog.Location location =
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
locations.add(location);
}
Collections.shuffle(locations, random());
if (randomBoolean()) {
assertTrue("at least one operation pending", translog.syncNeeded());
assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream()));
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
// we are the last location so everything should be synced
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded());
} else if (rarely()) {
rollAndCommit(translog);
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now
// not syncing now
assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream()));
assertFalse("no sync needed since no operations in current translog", translog.syncNeeded());
} else {
translog.sync();
@ -1181,7 +1200,8 @@ public class TranslogTests extends ESTestCase {
int count = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
translog.add(new Translog.Index("test", "" + op, op,
primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
if (rarely() && translogOperations > op + 1) {
rollAndCommit(translog);
}
@ -1218,7 +1238,8 @@ public class TranslogTests extends ESTestCase {
int lastSynced = -1;
long lastSyncedGlobalCheckpoint = globalCheckpoint.get();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
globalCheckpoint.set(globalCheckpoint.get() + randomIntBetween(1, 16));
}
@ -1233,7 +1254,8 @@ public class TranslogTests extends ESTestCase {
Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME));
try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
try (TranslogReader reader =
translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
assertEquals(lastSynced + 1, reader.totalOperations());
TranslogSnapshot snapshot = reader.newSnapshot();
@ -1276,7 +1298,8 @@ public class TranslogTests extends ESTestCase {
}
writer.sync();
final BaseTranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)));
final BaseTranslogReader reader = randomBoolean() ? writer :
translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)));
for (int i = 0; i < numOps; i++) {
ByteBuffer buffer = ByteBuffer.allocate(4);
reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i);
@ -1354,7 +1377,8 @@ public class TranslogTests extends ESTestCase {
int minUncommittedOp = -1;
final boolean commitOften = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
final boolean commit = commitOften ? frequently() : rarely();
if (commit && op < translogOperations - 1) {
rollAndCommit(translog);
@ -1375,8 +1399,10 @@ public class TranslogTests extends ESTestCase {
assertNull(snapshot.next());
}
} else {
translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration());
translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(),
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
assertEquals("lastCommitted must be 1 less than current",
translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) {
for (int i = minUncommittedOp; i < translogOperations; i++) {
@ -1397,7 +1423,8 @@ public class TranslogTests extends ESTestCase {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@ -1414,9 +1441,11 @@ public class TranslogTests extends ESTestCase {
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertEquals("lastCommitted must be 2 less than current - we never finished the commit",
translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) {
int upTo = sync ? translogOperations : prepareOp;
@ -1428,7 +1457,8 @@ public class TranslogTests extends ESTestCase {
}
}
if (randomBoolean()) { // recover twice
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice",
translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
@ -1438,7 +1468,8 @@ public class TranslogTests extends ESTestCase {
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
assertEquals("payload mismatch, synced: " + sync, i,
Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
@ -1453,7 +1484,8 @@ public class TranslogTests extends ESTestCase {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@ -1474,9 +1506,11 @@ public class TranslogTests extends ESTestCase {
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertEquals("lastCommitted must be 2 less than current - we never finished the commit",
translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) {
int upTo = sync ? translogOperations : prepareOp;
@ -1489,7 +1523,8 @@ public class TranslogTests extends ESTestCase {
}
if (randomBoolean()) { // recover twice
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice",
translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
@ -1499,7 +1534,8 @@ public class TranslogTests extends ESTestCase {
for (int i = 0; i < upTo; i++) {
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null synced: " + sync, next);
assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString()));
assertEquals("payload mismatch, synced: " + sync, i,
Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
}
@ -1513,7 +1549,8 @@ public class TranslogTests extends ESTestCase {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op,
primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@ -1528,21 +1565,28 @@ public class TranslogTests extends ESTestCase {
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
Checkpoint read = Checkpoint.read(ckp);
Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.NO_OPS_PERFORMED, 0);
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0,
SequenceNumbers.NO_OPS_PERFORMED, 0);
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)),
corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
try (Translog ignored = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
fail("corrupted");
} catch (IllegalStateException ex) {
assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3025, " +
"numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2} but got: Checkpoint{offset=0, numOps=0, " +
"generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage());
"numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2} " +
"but got: Checkpoint{offset=0, numOps=0, generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, " +
"minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage());
}
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)),
read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertEquals("lastCommitted must be 2 less than current - we never finished the commit",
translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) {
int upTo = sync ? translogOperations : prepareOp;
@ -1560,7 +1604,8 @@ public class TranslogTests extends ESTestCase {
List<Translog.Operation> ops = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")));
Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")));
ops.add(test);
}
Translog.writeOperations(out, ops);
@ -1687,7 +1732,8 @@ public class TranslogTests extends ESTestCase {
TranslogConfig config = getTranslogConfig(tempDir);
List<FileChannel> fileChannels = new ArrayList<>();
final Translog failableTLog =
getFailableTranslog(fail, config, randomBoolean(), false, null, createTranslogDeletionPolicy(), fileChannels);
getFailableTranslog(fail, config, randomBoolean(),
false, null, createTranslogDeletionPolicy(), fileChannels);
IOException expectedException = null;
int translogOperations = 0;
@ -1761,8 +1807,10 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(10, 100);
try (Translog translog2 = create(createTempDir())) {
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
}
int iters = randomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
@ -1788,7 +1836,8 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(1, 10);
int firstUncommitted = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
rollAndCommit(translog);
firstUncommitted = op + 1;
@ -1820,10 +1869,12 @@ public class TranslogTests extends ESTestCase {
}
public void testFailOnClosedWrite() throws IOException {
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(),
Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(),
Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
fail("closed");
} catch (AlreadyClosedException ex) {
// all is well
@ -1843,7 +1894,8 @@ public class TranslogTests extends ESTestCase {
final AtomicLong seqNoGenerator = new AtomicLong();
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions);
threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId,
writtenOperations, seqNoGenerator, threadExceptions);
threads[i].setDaemon(true);
threads[i].start();
}
@ -1941,7 +1993,8 @@ public class TranslogTests extends ESTestCase {
while (failed == false) {
try {
locations.add(translog.add(
new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(),
Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
translog.sync();
opsSynced++;
} catch (MockDirectoryWrapper.FakeIOException ex) {
@ -1962,7 +2015,8 @@ public class TranslogTests extends ESTestCase {
if (randomBoolean()) {
try {
locations.add(translog.add(
new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(),
Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
fail("we are already closed");
} catch (AlreadyClosedException ex) {
assertNotNull(ex.getCause());
@ -1996,14 +2050,17 @@ public class TranslogTests extends ESTestCase {
translog.close(); // we are closed
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration());
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertEquals("lastCommitted must be 1 less than current",
translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration());
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
assertEquals(opsSynced, snapshot.totalOperations());
for (int i = 0; i < opsSynced; i++) {
assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1,
assertEquals("expected operation" + i + " to be in the previous translog but wasn't",
tlog.currentFileGeneration() - 1,
locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
@ -2019,11 +2076,13 @@ public class TranslogTests extends ESTestCase {
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer borders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
locations.add(translog.add(
new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(),
lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
try (Translog.Snapshot snapshot = this.translog.newSnapshot()) {
assertEquals(opsAdded + 1, snapshot.totalOperations());
for (int i = 0; i < opsAdded; i++) {
assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(),
assertEquals("expected operation" + i + " to be in the current translog but wasn't",
translog.currentFileGeneration(),
locations.get(i).generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
@ -2036,13 +2095,16 @@ public class TranslogTests extends ESTestCase {
Path tempDir = createTempDir();
final FailSwitch fail = new FailSwitch();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy());
Translog translog = getFailableTranslog(fail, config, false, true, null,
createTranslogDeletionPolicy());
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(),
lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
fail.failAlways();
try {
Translog.Location location = translog.add(
new Translog.Index("test", "2", 1, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
new Translog.Index("test", "2", 1, primaryTerm.get(),
lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
translog.ensureSynced(location);
} else {
@ -2076,7 +2138,8 @@ public class TranslogTests extends ESTestCase {
List<LocationOperation> writtenOperations = Collections.synchronizedList(new ArrayList<>());
for (int i = 0; i < threadCount; i++) {
final int threadId = i;
threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, seqNoGenerator, threadExceptions) {
threads[i] = new TranslogThread(translog, downLatch, 200, threadId,
writtenOperations, seqNoGenerator, threadExceptions) {
@Override
protected Translog.Location add(Translog.Operation op) throws IOException {
Translog.Location add = super.add(op);
@ -2132,7 +2195,8 @@ public class TranslogTests extends ESTestCase {
}
}
try (Translog tlog =
new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
new Translog(config, translogUUID, createTranslogDeletionPolicy(),
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
Translog.Snapshot snapshot = tlog.newSnapshot()) {
if (writtenOperations.size() != snapshot.totalOperations()) {
for (int i = 0; i < threadCount; i++) {
@ -2143,7 +2207,8 @@ public class TranslogTests extends ESTestCase {
}
assertEquals(writtenOperations.size(), snapshot.totalOperations());
for (int i = 0; i < writtenOperations.size(); i++) {
assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation);
assertEquals("expected operation" + i + " to be in the previous translog but wasn't",
tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation);
Translog.Operation next = snapshot.next();
assertNotNull("operation " + i + " must be non-null", next);
assertEquals(next, writtenOperations.get(i).operation);
@ -2159,7 +2224,8 @@ public class TranslogTests extends ESTestCase {
public void testRecoveryFromAFutureGenerationCleansUp() throws IOException {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -2167,7 +2233,8 @@ public class TranslogTests extends ESTestCase {
translog.rollGeneration();
long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -2178,7 +2245,8 @@ public class TranslogTests extends ESTestCase {
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE));
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
assertThat(translog.getMinFileGeneration(), equalTo(1L));
// no trimming done yet, just recovered
for (long gen = 1; gen < translog.currentFileGeneration(); gen++) {
@ -2209,7 +2277,8 @@ public class TranslogTests extends ESTestCase {
translogUUID = translog.getTranslogUUID();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -2217,7 +2286,8 @@ public class TranslogTests extends ESTestCase {
translog.rollGeneration();
comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(),
Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -2234,7 +2304,8 @@ public class TranslogTests extends ESTestCase {
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE));
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
// we don't know when things broke exactly
assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L));
assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration));
@ -2300,8 +2371,10 @@ public class TranslogTests extends ESTestCase {
}
boolean success = false;
try {
final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation
ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel);
// don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation
final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp");
ThrowingFileChannel throwingFileChannel =
new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel);
success = true;
return throwingFileChannel;
} finally {
@ -2337,7 +2410,8 @@ public class TranslogTests extends ESTestCase {
private final boolean partialWrite;
private final boolean throwUnknownException;
public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException {
public ThrowingFileChannel(FailSwitch fail, boolean partialWrite,
boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException {
super(delegate);
this.fail = fail;
this.partialWrite = partialWrite;
@ -2426,7 +2500,8 @@ public class TranslogTests extends ESTestCase {
translog.add(new Translog.Index("test", "boom", 0, primaryTerm.get(), "boom".getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) {
new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(),
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) {
@Override
protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint)
throws IOException {
@ -2441,7 +2516,8 @@ public class TranslogTests extends ESTestCase {
}
public void testRecoverWithUnbackedNextGen() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(),
Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
@ -2457,7 +2533,8 @@ public class TranslogTests extends ESTestCase {
assertNotNull("operation 1 must be non-null", op);
assertEquals("payload mismatch for operation 1", 1, Integer.parseInt(op.getSource().source.utf8ToString()));
tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(2).getBytes(Charset.forName("UTF-8"))));
tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(),
Integer.toString(2).getBytes(Charset.forName("UTF-8"))));
}
try (Translog tlog = openTranslog(config, translog.getTranslogUUID());
@ -2466,16 +2543,19 @@ public class TranslogTests extends ESTestCase {
Translog.Operation secondOp = snapshot.next();
assertNotNull("operation 2 must be non-null", secondOp);
assertEquals("payload mismatch for operation 2", Integer.parseInt(secondOp.getSource().source.utf8ToString()), 2);
assertEquals("payload mismatch for operation 2",
Integer.parseInt(secondOp.getSource().source.utf8ToString()), 2);
Translog.Operation firstOp = snapshot.next();
assertNotNull("operation 1 must be non-null", firstOp);
assertEquals("payload mismatch for operation 1", Integer.parseInt(firstOp.getSource().source.utf8ToString()), 1);
assertEquals("payload mismatch for operation 1",
Integer.parseInt(firstOp.getSource().source.utf8ToString()), 1);
}
}
public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(),
Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
@ -2483,13 +2563,15 @@ public class TranslogTests extends ESTestCase {
// don't copy the new file
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
TranslogException ex = expectThrows(TranslogException.class, () -> new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get));
TranslogException ex = expectThrows(TranslogException.class, () -> new Translog(config, translog.getTranslogUUID(),
translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get));
assertEquals(ex.getMessage(), "failed to create new translog file");
assertEquals(ex.getCause().getClass(), FileAlreadyExistsException.class);
}
public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(),
Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
@ -2501,7 +2583,8 @@ public class TranslogTests extends ESTestCase {
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
// we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog"));
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
for (int i = 0; i < 1; i++) {
@ -2510,7 +2593,8 @@ public class TranslogTests extends ESTestCase {
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(),
Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
}
TranslogException ex = expectThrows(TranslogException.class,
@ -2541,12 +2625,14 @@ public class TranslogTests extends ESTestCase {
String generationUUID = null;
try {
boolean committing = false;
final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, createTranslogDeletionPolicy());
final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false,
generationUUID, createTranslogDeletionPolicy());
try {
LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
String doc = lineFileDocs.nextDoc().toString();
failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), doc.getBytes(Charset.forName("UTF-8"))));
failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(),
doc.getBytes(Charset.forName("UTF-8"))));
unsynced.add(doc);
if (randomBoolean()) {
failableTLog.sync();
@ -2554,7 +2640,8 @@ public class TranslogTests extends ESTestCase {
unsynced.clear();
}
if (randomFloat() < 0.1) {
failableTLog.sync(); // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails
// we have to sync here first otherwise we don't know if the sync succeeded if the commit fails
failableTLog.sync();
syncedDocs.addAll(unsynced);
unsynced.clear();
failableTLog.rollGeneration();
@ -2620,9 +2707,11 @@ public class TranslogTests extends ESTestCase {
deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery);
if (generationUUID == null) {
// we never managed to successfully create a translog, make it
generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(),
SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
}
try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
try (Translog translog = new Translog(config, generationUUID, deletionPolicy,
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
Translog.Snapshot snapshot = translog.newSnapshotFromGen(
new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) {
assertEquals(syncedDocs.size(), snapshot.totalOperations());
@ -2655,7 +2744,8 @@ public class TranslogTests extends ESTestCase {
public void testCheckpointOnDiskFull() throws IOException {
final Checkpoint checkpoint = randomCheckpoint();
Path tempDir = createTempDir();
Checkpoint.write(FileChannel::open, tempDir.resolve("foo.cpk"), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
Checkpoint.write(FileChannel::open, tempDir.resolve("foo.cpk"), checkpoint,
StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final Checkpoint checkpoint2 = randomCheckpoint();
try {
Checkpoint.write((p, o) -> {
@ -2719,7 +2809,8 @@ public class TranslogTests extends ESTestCase {
document.add(seqID.seqNo);
document.add(seqID.seqNoDocValue);
document.add(seqID.primaryTerm);
ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, XContentType.JSON,
ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null,
Arrays.asList(document), B_1, XContentType.JSON,
null);
Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm,
@ -2948,7 +3039,8 @@ public class TranslogTests extends ESTestCase {
for (int gen = 0; gen < generations; gen++) {
final int operations = randomIntBetween(1, 100);
for (int i = 0; i < operations; i++) {
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1});
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10),
seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1});
translog.add(op);
views.peek().add(op);
}
@ -2973,7 +3065,8 @@ public class TranslogTests extends ESTestCase {
List<Long> batch = LongStream.rangeClosed(0, between(0, 500)).boxed().collect(Collectors.toList());
Randomness.shuffle(batch);
for (Long seqNo : batch) {
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1});
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10),
seqNo, primaryTerm.get(), new byte[]{1});
translog.add(op);
latestOperations.put(op.seqNo(), op);
}
@ -3006,7 +3099,8 @@ public class TranslogTests extends ESTestCase {
public void testTranslogCloseInvariant() throws IOException {
assumeTrue("test only works with assertions enabled", Assertions.ENABLED);
class MisbehavingTranslog extends Translog {
MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException {
MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy,
LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException {
super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier);
}
@ -3035,7 +3129,8 @@ public class TranslogTests extends ESTestCase {
final TranslogConfig translogConfig = getTranslogConfig(path);
final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings());
final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get);
MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy,
() -> globalCheckpoint.get(), primaryTerm::get);
expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseDirectly());
expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtils());