mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Improve resiliency to auto-formatting in server (#48940)
Backport of #48450. Make a number of changes so that code in the `server` directory is more resilient to automatic formatting. This covers: * Reformatting multiline JSON to embed whitespace in the strings * Move some comments around to they aren't auto-formatted to a strange place. This also required moving some `&&` and `||` operators from the end-of-line to start-of-line`. * Add helper method `reformatJson()`, to strip whitespace from a JSON document using XContent methods. This is sometimes necessary where a test is comparing some machine-generated JSON with an expected value. Also, `HyperLogLogPlusPlus.java` is now excluded from formatting because it contains large data tables that don't reformat well with the current settings, and changing the settings would be worse for the rest of the codebase.
This commit is contained in:
parent
dd92830801
commit
014e1b1090
@ -114,7 +114,6 @@ subprojects {
|
||||
|
||||
spotless {
|
||||
java {
|
||||
|
||||
removeUnusedImports()
|
||||
eclipse().configFile rootProject.file('.eclipseformat.xml')
|
||||
trimTrailingWhitespace()
|
||||
|
@ -21,7 +21,7 @@
|
||||
configuration of classes that aren't in packages. -->
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]Dummy.java" checks="PackageDeclaration" />
|
||||
|
||||
<!-- Intentionally has long example curl commands to coinncide with sibling Painless tests. -->
|
||||
<!-- Intentionally has long example curl commands to coincide with sibling Painless tests. -->
|
||||
<suppress files="modules[/\\]lang-painless[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]painless[/\\]ContextExampleTests.java" checks="LineLength" />
|
||||
|
||||
<!--
|
||||
|
@ -140,6 +140,22 @@ dependencies {
|
||||
compileJava.options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked"
|
||||
|
||||
// Until this project is always being formatted with spotless, we need to
|
||||
// guard against `spotless()` not existing.
|
||||
try {
|
||||
spotless {
|
||||
java {
|
||||
// Contains large data tables that do not format well.
|
||||
targetExclude 'src/main/java/org/elasticsearch/search/aggregations/metrics/HyperLogLogPlusPlus.java'
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
if (e.getMessage().contains("Could not find method spotless") == false) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
forbiddenPatterns {
|
||||
exclude '**/*.json'
|
||||
exclude '**/*.jmx'
|
||||
|
@ -145,25 +145,31 @@ public final class BinaryDocValuesRangeQuery extends Query {
|
||||
INTERSECTS {
|
||||
@Override
|
||||
boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo) {
|
||||
// part of the other range must touch this range
|
||||
// this: |---------------|
|
||||
// other: |------|
|
||||
/*
|
||||
* part of the other range must touch this range
|
||||
* this: |---------------|
|
||||
* other: |------|
|
||||
*/
|
||||
return from.compareTo(otherTo) <= 0 && to.compareTo(otherFrom) >= 0;
|
||||
}
|
||||
}, WITHIN {
|
||||
@Override
|
||||
boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo) {
|
||||
// other range must entirely lie within this range
|
||||
// this: |---------------|
|
||||
// other: |------|
|
||||
/*
|
||||
* other range must entirely lie within this range
|
||||
* this: |---------------|
|
||||
* other: |------|
|
||||
*/
|
||||
return from.compareTo(otherFrom) <= 0 && to.compareTo(otherTo) >= 0;
|
||||
}
|
||||
}, CONTAINS {
|
||||
@Override
|
||||
boolean matches(BytesRef from, BytesRef to, BytesRef otherFrom, BytesRef otherTo) {
|
||||
// this and other range must overlap
|
||||
// this: |------|
|
||||
// other: |---------------|
|
||||
/*
|
||||
* this and other range must overlap
|
||||
* this: |------|
|
||||
* other: |---------------|
|
||||
*/
|
||||
return from.compareTo(otherFrom) >= 0 && to.compareTo(otherTo) <= 0;
|
||||
}
|
||||
}, CROSSES {
|
||||
|
@ -110,10 +110,12 @@ final class FetchSearchPhase extends SearchPhase {
|
||||
} else {
|
||||
ScoreDoc[] scoreDocs = reducedQueryPhase.sortedTopDocs.scoreDocs;
|
||||
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, scoreDocs);
|
||||
if (scoreDocs.length == 0) { // no docs to fetch -- sidestep everything and return
|
||||
// no docs to fetch -- sidestep everything and return
|
||||
if (scoreDocs.length == 0) {
|
||||
// we have to release contexts here to free up resources
|
||||
phaseResults.stream()
|
||||
.map(SearchPhaseResult::queryResult)
|
||||
.forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources
|
||||
.forEach(this::releaseIrrelevantSearchContext);
|
||||
finishPhase.run();
|
||||
} else {
|
||||
final ScoreDoc[] lastEmittedDocPerShard = isScrollSearch ?
|
||||
|
@ -117,10 +117,11 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
assignedShardsAdd(shard);
|
||||
if (shard.relocating()) {
|
||||
relocatingShards++;
|
||||
entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(),
|
||||
k -> new LinkedHashMap<>()); // LinkedHashMap to preserve order
|
||||
// add the counterpart shard with relocatingNodeId reflecting the source from which
|
||||
// LinkedHashMap to preserve order.
|
||||
// Add the counterpart shard with relocatingNodeId reflecting the source from which
|
||||
// it's relocating from.
|
||||
entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(),
|
||||
k -> new LinkedHashMap<>());
|
||||
ShardRouting targetShardRouting = shard.getTargetRelocatingShard();
|
||||
addInitialRecovery(targetShardRouting, indexShard.primary);
|
||||
previousValue = entries.put(targetShardRouting.shardId(), targetShardRouting);
|
||||
|
@ -256,7 +256,8 @@ public class Lucene {
|
||||
.setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
|
||||
.setMergePolicy(NoMergePolicy.INSTANCE) // no merges
|
||||
.setCommitOnClose(false) // no commits
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE))) // force creation - don't append...
|
||||
.setOpenMode(IndexWriterConfig.OpenMode.CREATE) // force creation - don't append...
|
||||
))
|
||||
{
|
||||
// do nothing and close this will kick of IndexFileDeleter which will remove all pending files
|
||||
}
|
||||
|
@ -514,15 +514,15 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
||||
if (this == o) return true;
|
||||
if (!(o instanceof Item)) return false;
|
||||
Item other = (Item) o;
|
||||
return Objects.equals(index, other.index) &&
|
||||
Objects.equals(type, other.type) &&
|
||||
Objects.equals(id, other.id) &&
|
||||
Objects.equals(doc, other.doc) &&
|
||||
Arrays.equals(fields, other.fields) && // otherwise we are comparing pointers
|
||||
Objects.equals(perFieldAnalyzer, other.perFieldAnalyzer) &&
|
||||
Objects.equals(routing, other.routing) &&
|
||||
Objects.equals(version, other.version) &&
|
||||
Objects.equals(versionType, other.versionType);
|
||||
return Objects.equals(index, other.index)
|
||||
&& Objects.equals(type, other.type)
|
||||
&& Objects.equals(id, other.id)
|
||||
&& Objects.equals(doc, other.doc)
|
||||
&& Arrays.equals(fields, other.fields) // otherwise we are comparing pointers
|
||||
&& Objects.equals(perFieldAnalyzer, other.perFieldAnalyzer)
|
||||
&& Objects.equals(routing, other.routing)
|
||||
&& Objects.equals(version, other.version)
|
||||
&& Objects.equals(versionType, other.versionType);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1208,23 +1208,23 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(MoreLikeThisQueryBuilder other) {
|
||||
return Arrays.equals(fields, other.fields) &&
|
||||
Arrays.equals(likeTexts, other.likeTexts) &&
|
||||
Arrays.equals(unlikeTexts, other.unlikeTexts) &&
|
||||
Arrays.equals(likeItems, other.likeItems) &&
|
||||
Arrays.equals(unlikeItems, other.unlikeItems) &&
|
||||
Objects.equals(maxQueryTerms, other.maxQueryTerms) &&
|
||||
Objects.equals(minTermFreq, other.minTermFreq) &&
|
||||
Objects.equals(minDocFreq, other.minDocFreq) &&
|
||||
Objects.equals(maxDocFreq, other.maxDocFreq) &&
|
||||
Objects.equals(minWordLength, other.minWordLength) &&
|
||||
Objects.equals(maxWordLength, other.maxWordLength) &&
|
||||
Arrays.equals(stopWords, other.stopWords) && // otherwise we are comparing pointers
|
||||
Objects.equals(analyzer, other.analyzer) &&
|
||||
Objects.equals(minimumShouldMatch, other.minimumShouldMatch) &&
|
||||
Objects.equals(boostTerms, other.boostTerms) &&
|
||||
Objects.equals(include, other.include) &&
|
||||
Objects.equals(failOnUnsupportedField, other.failOnUnsupportedField);
|
||||
return Arrays.equals(fields, other.fields)
|
||||
&& Arrays.equals(likeTexts, other.likeTexts)
|
||||
&& Arrays.equals(unlikeTexts, other.unlikeTexts)
|
||||
&& Arrays.equals(likeItems, other.likeItems)
|
||||
&& Arrays.equals(unlikeItems, other.unlikeItems)
|
||||
&& Objects.equals(maxQueryTerms, other.maxQueryTerms)
|
||||
&& Objects.equals(minTermFreq, other.minTermFreq)
|
||||
&& Objects.equals(minDocFreq, other.minDocFreq)
|
||||
&& Objects.equals(maxDocFreq, other.maxDocFreq)
|
||||
&& Objects.equals(minWordLength, other.minWordLength)
|
||||
&& Objects.equals(maxWordLength, other.maxWordLength)
|
||||
&& Arrays.equals(stopWords, other.stopWords) // otherwise we are comparing pointers
|
||||
&& Objects.equals(analyzer, other.analyzer)
|
||||
&& Objects.equals(minimumShouldMatch, other.minimumShouldMatch)
|
||||
&& Objects.equals(boostTerms, other.boostTerms)
|
||||
&& Objects.equals(include, other.include)
|
||||
&& Objects.equals(failOnUnsupportedField, other.failOnUnsupportedField);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -243,8 +243,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
// TODO: feels hacky, a block disables state persistence, and then we clean the allocated shards, maybe another flag in blocks?
|
||||
if (state.blocks().disableStatePersistence()) {
|
||||
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
|
||||
indicesService.removeIndex(indexService.index(), NO_LONGER_ASSIGNED,
|
||||
"cleaning index (disabled block persistence)"); // also cleans shards
|
||||
// also cleans shards
|
||||
indicesService.removeIndex(indexService.index(), NO_LONGER_ASSIGNED, "cleaning index (disabled block persistence)");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -748,8 +748,22 @@ public final class HyperLogLogPlusPlus implements Releasable {
|
||||
-527.016999999993, -664.681000000099, -680.306000000099, -704.050000000047, -850.486000000034, -757.43200000003,
|
||||
-713.308999999892, } };
|
||||
|
||||
private static final long[] THRESHOLDS = new long[] { 10, 20, 40, 80, 220, 400, 900, 1800, 3100, 6500, 11500, 20000, 50000, 120000,
|
||||
350000 };
|
||||
private static final long[] THRESHOLDS = new long[] {
|
||||
10,
|
||||
20,
|
||||
40,
|
||||
80,
|
||||
220,
|
||||
400,
|
||||
900,
|
||||
1800,
|
||||
3100,
|
||||
6500,
|
||||
11500,
|
||||
20000,
|
||||
50000,
|
||||
120000,
|
||||
350000 };
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
private final OpenBitSet algorithm;
|
||||
@ -773,15 +787,15 @@ public final class HyperLogLogPlusPlus implements Releasable {
|
||||
hashSet = new Hashset(initialBucketCount);
|
||||
final double alpha;
|
||||
switch (p) {
|
||||
case 4:
|
||||
alpha = 0.673;
|
||||
break;
|
||||
case 5:
|
||||
alpha = 0.697;
|
||||
break;
|
||||
default:
|
||||
alpha = 0.7213 / (1 + 1.079 / m);
|
||||
break;
|
||||
case 4:
|
||||
alpha = 0.673;
|
||||
break;
|
||||
case 5:
|
||||
alpha = 0.697;
|
||||
break;
|
||||
default:
|
||||
alpha = 0.7213 / (1 + 1.079 / m);
|
||||
break;
|
||||
}
|
||||
alphaMM = alpha * m * m;
|
||||
}
|
||||
@ -1050,8 +1064,8 @@ public final class HyperLogLogPlusPlus implements Releasable {
|
||||
|
||||
public boolean equals(long bucket, HyperLogLogPlusPlus other) {
|
||||
return Objects.equals(p, other.p)
|
||||
&& Objects.equals(algorithm.get(bucket), other.algorithm.get(bucket))
|
||||
&& Objects.equals(getComparableData(bucket), other.getComparableData(bucket));
|
||||
&& Objects.equals(algorithm.get(bucket), other.algorithm.get(bucket))
|
||||
&& Objects.equals(getComparableData(bucket), other.getComparableData(bucket));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,8 +62,8 @@ public abstract class WordScorer {
|
||||
// division by zero, by scoreUnigram.
|
||||
final long nTerms = terms.size();
|
||||
this.numTerms = nTerms == -1 ? reader.maxDoc() : nTerms;
|
||||
this.termsEnum = new FreqTermsEnum(reader, field, !useTotalTermFreq, useTotalTermFreq, null,
|
||||
BigArrays.NON_RECYCLING_INSTANCE); // non recycling for now
|
||||
// non recycling for now
|
||||
this.termsEnum = new FreqTermsEnum(reader, field, !useTotalTermFreq, useTotalTermFreq, null, BigArrays.NON_RECYCLING_INSTANCE);
|
||||
this.reader = reader;
|
||||
this.realWordLikelihood = realWordLikelihood;
|
||||
this.separator = separator;
|
||||
|
@ -782,15 +782,15 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||
}
|
||||
|
||||
private static boolean appearsToBeHTTPRequest(BytesReference headerBuffer) {
|
||||
return bufferStartsWith(headerBuffer, "GET") ||
|
||||
bufferStartsWith(headerBuffer, "POST") ||
|
||||
bufferStartsWith(headerBuffer, "PUT") ||
|
||||
bufferStartsWith(headerBuffer, "HEAD") ||
|
||||
bufferStartsWith(headerBuffer, "DELETE") ||
|
||||
return bufferStartsWith(headerBuffer, "GET")
|
||||
|| bufferStartsWith(headerBuffer, "POST")
|
||||
|| bufferStartsWith(headerBuffer, "PUT")
|
||||
|| bufferStartsWith(headerBuffer, "HEAD")
|
||||
|| bufferStartsWith(headerBuffer, "DELETE")
|
||||
// Actually 'OPTIONS'. But we are only guaranteed to have read six bytes at this point.
|
||||
bufferStartsWith(headerBuffer, "OPTION") ||
|
||||
bufferStartsWith(headerBuffer, "PATCH") ||
|
||||
bufferStartsWith(headerBuffer, "TRACE");
|
||||
|| bufferStartsWith(headerBuffer, "OPTION")
|
||||
|| bufferStartsWith(headerBuffer, "PATCH")
|
||||
|| bufferStartsWith(headerBuffer, "TRACE");
|
||||
}
|
||||
|
||||
private static boolean appearsToBeHTTPResponse(BytesReference headerBuffer) {
|
||||
|
@ -34,6 +34,7 @@ import java.util.Set;
|
||||
* In case you compare this with the original ISODateTimeFormat, make sure you use a diff
|
||||
* call, that ignores whitespaces/tabs/indentations like 'diff -b'
|
||||
*/
|
||||
|
||||
/**
|
||||
* Factory that creates instances of DateTimeFormatter based on the ISO8601 standard.
|
||||
* <p>
|
||||
@ -1720,7 +1721,8 @@ public class StrictISODateTimeFormat {
|
||||
private static DateTimeFormatter basicWeekDate() {
|
||||
if (bwd == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.weekyear(), 4) // ES change, was .appendWeekyear(4, 4)
|
||||
// ES change, was .appendWeekyear(4, 4)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.weekyear(), 4)
|
||||
.appendLiteral('W')
|
||||
.appendFixedDecimal(DateTimeFieldType.weekOfWeekyear(), 2)
|
||||
.appendFixedDecimal(DateTimeFieldType.dayOfWeek(), 1)
|
||||
@ -1897,7 +1899,8 @@ public class StrictISODateTimeFormat {
|
||||
private static DateTimeFormatter yearElement() {
|
||||
if (ye == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.year(), 4) // ES change, was .appendYear(4, 9)
|
||||
// ES change, was .appendYear(4, 9)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.year(), 4)
|
||||
.toFormatter();
|
||||
}
|
||||
return ye;
|
||||
@ -1907,7 +1910,8 @@ public class StrictISODateTimeFormat {
|
||||
if (mye == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendLiteral('-')
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.monthOfYear(), 2) // ES change, was .appendMonthOfYear(2)
|
||||
// ES change, was .appendMonthOfYear(2)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.monthOfYear(), 2)
|
||||
.toFormatter();
|
||||
}
|
||||
return mye;
|
||||
@ -1917,7 +1921,8 @@ public class StrictISODateTimeFormat {
|
||||
if (dme == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendLiteral('-')
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.dayOfMonth(), 2) // ES change, was .appendDayOfMonth(2)
|
||||
// ES change, was .appendDayOfMonth(2)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.dayOfMonth(), 2)
|
||||
.toFormatter();
|
||||
}
|
||||
return dme;
|
||||
@ -1926,7 +1931,8 @@ public class StrictISODateTimeFormat {
|
||||
private static DateTimeFormatter weekyearElement() {
|
||||
if (we == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.weekyear(), 4) // ES change, was .appendWeekyear(4, 9)
|
||||
// ES change, was .appendWeekyear(4, 9)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.weekyear(), 4)
|
||||
.toFormatter();
|
||||
}
|
||||
return we;
|
||||
@ -1936,7 +1942,8 @@ public class StrictISODateTimeFormat {
|
||||
if (wwe == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendLiteral("-W")
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.weekOfWeekyear(), 2) // ES change, was .appendWeekOfWeekyear(2)
|
||||
// ES change, was .appendWeekOfWeekyear(2)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.weekOfWeekyear(), 2)
|
||||
.toFormatter();
|
||||
}
|
||||
return wwe;
|
||||
@ -1956,7 +1963,8 @@ public class StrictISODateTimeFormat {
|
||||
if (dye == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendLiteral('-')
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.dayOfYear(), 3) // ES change, was .appendDayOfYear(3)
|
||||
// ES change, was .appendDayOfYear(3)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.dayOfYear(), 3)
|
||||
.toFormatter();
|
||||
}
|
||||
return dye;
|
||||
@ -1974,7 +1982,8 @@ public class StrictISODateTimeFormat {
|
||||
private static DateTimeFormatter hourElement() {
|
||||
if (hde == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.hourOfDay(), 2) // ES change, was .appendHourOfDay(2)
|
||||
// ES change, was .appendHourOfDay(2)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.hourOfDay(), 2)
|
||||
.toFormatter();
|
||||
}
|
||||
return hde;
|
||||
@ -1984,7 +1993,8 @@ public class StrictISODateTimeFormat {
|
||||
if (mhe == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendLiteral(':')
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.minuteOfHour(), 2) // ES change, was .appendMinuteOfHour(2)
|
||||
// ES change, was .appendMinuteOfHour(2)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.minuteOfHour(), 2)
|
||||
.toFormatter();
|
||||
}
|
||||
return mhe;
|
||||
@ -1994,7 +2004,8 @@ public class StrictISODateTimeFormat {
|
||||
if (sme == null) {
|
||||
return new DateTimeFormatterBuilder()
|
||||
.appendLiteral(':')
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.secondOfMinute(), 2) // ES change, was .appendSecondOfMinute(2)
|
||||
// ES change, was .appendSecondOfMinute(2)
|
||||
.appendFixedSignedDecimal(DateTimeFieldType.secondOfMinute(), 2)
|
||||
.toFormatter();
|
||||
}
|
||||
return sme;
|
||||
|
@ -374,8 +374,11 @@ public class TransportMasterNodeActionTests extends ESTestCase {
|
||||
boolean rejoinSameMaster = failsWithConnectTransportException && randomBoolean();
|
||||
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0));
|
||||
DiscoveryNode masterNode = this.remoteNode;
|
||||
setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, masterNode, allNodes))
|
||||
.version(randomIntBetween(0, 10))); // use a random base version so it can go down when simulating a restart.
|
||||
setState(
|
||||
clusterService,
|
||||
// use a random base version so it can go down when simulating a restart.
|
||||
ClusterState.builder(ClusterStateCreationUtils.state(localNode, masterNode, allNodes)).version(randomIntBetween(0, 10))
|
||||
);
|
||||
|
||||
PlainActionFuture<Response> listener = new PlainActionFuture<>();
|
||||
new Action("internal:testAction", transportService, clusterService, threadPool).execute(request, listener);
|
||||
|
@ -194,7 +194,8 @@ public class LeaderCheckerTests extends ESTestCase {
|
||||
|
||||
assertThat(deterministicTaskQueue.getCurrentTimeMillis() - failureTime,
|
||||
lessThanOrEqualTo((leaderCheckIntervalMillis + leaderCheckTimeoutMillis) * leaderCheckRetryCount
|
||||
+ leaderCheckTimeoutMillis // needed because a successful check response might be in flight at the time of failure
|
||||
// needed because a successful check response might be in flight at the time of failure
|
||||
+ leaderCheckTimeoutMillis
|
||||
));
|
||||
}
|
||||
leaderChecker.updateLeader(null);
|
||||
|
@ -149,7 +149,8 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1")
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1")
|
||||
.put(MergePolicyConfig.INDEX_MERGE_ENABLED, false)
|
||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose
|
||||
// no checkindex - we corrupt shards on purpose
|
||||
.put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false)
|
||||
// no translog based flush - it might change the .liv / segments.N files
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))
|
||||
));
|
||||
|
@ -93,10 +93,11 @@ public class PluginsServiceTests extends ESTestCase {
|
||||
Settings newSettings = service.updatedSettings();
|
||||
assertEquals("test", newSettings.get("my.setting")); // previous settings still exist
|
||||
assertEquals("1", newSettings.get("foo.bar")); // added setting exists
|
||||
// does not override pre existing settings
|
||||
assertEquals(
|
||||
IndexModule.Type.SIMPLEFS.getSettingsKey(),
|
||||
newSettings.get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey())
|
||||
); // does not override pre existing settings
|
||||
);
|
||||
}
|
||||
|
||||
public void testAdditionalSettingsClash() {
|
||||
|
@ -280,7 +280,8 @@ public class RelocationIT extends ESIntegTestCase {
|
||||
Settings.builder()
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", numberOfReplicas)
|
||||
.put("index.refresh_interval", -1) // we want to control refreshes
|
||||
// we want to control refreshes
|
||||
.put("index.refresh_interval", -1)
|
||||
).get();
|
||||
|
||||
for (int i = 1; i < numberOfNodes; i++) {
|
||||
|
@ -97,7 +97,8 @@ public class DateRangeIT extends ESIntegTestCase {
|
||||
indexDoc(2, 15, 3), // Feb 15
|
||||
indexDoc(3, 2, 4), // Mar 2
|
||||
indexDoc(3, 15, 5), // Mar 15
|
||||
indexDoc(3, 23, 6))); // Mar 23
|
||||
indexDoc(3, 23, 6) // Mar 23
|
||||
));
|
||||
|
||||
// dummy docs
|
||||
for (int i = docs.size(); i < numDocs; ++i) {
|
||||
|
@ -147,8 +147,8 @@ public class SearchPreferenceIT extends ESIntegTestCase {
|
||||
refresh();
|
||||
|
||||
final Client client = internalCluster().smartClient();
|
||||
SearchRequestBuilder request = client.prepareSearch("test")
|
||||
.setQuery(matchAllQuery()).setPreference("_only_nodes:*,nodes*"); // multiple wildchar to cover multi-param usecase
|
||||
// multiple wildchar to cover multi-param usecase
|
||||
SearchRequestBuilder request = client.prepareSearch("test").setQuery(matchAllQuery()).setPreference("_only_nodes:*,nodes*");
|
||||
assertSearchOnRandomNodes(request);
|
||||
|
||||
request = client.prepareSearch("test")
|
||||
|
@ -1072,7 +1072,7 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase {
|
||||
.startObject()
|
||||
.field("somefield", "somevalue")
|
||||
.endObject()
|
||||
).get(); // we have 2 docs in a segment...
|
||||
).get();
|
||||
ForceMergeResponse actionGet = client().admin().indices().prepareForceMerge().setFlush(true).setMaxNumSegments(1).get();
|
||||
assertAllSuccessful(actionGet);
|
||||
refresh();
|
||||
|
@ -59,8 +59,8 @@ public class ThreadPoolTests extends ESTestCase {
|
||||
long currentTime = System.currentTimeMillis();
|
||||
long gotTime = threadPool.absoluteTimeInMillis();
|
||||
long delta = Math.abs(gotTime - currentTime);
|
||||
assertTrue("thread pool cached absolute time " + gotTime + " is too far from real current time " + currentTime,
|
||||
delta < 10000); // the delta can be large, we just care it is the same order of magnitude
|
||||
// the delta can be large, we just care it is the same order of magnitude
|
||||
assertTrue("thread pool cached absolute time " + gotTime + " is too far from real current time " + currentTime, delta < 10000);
|
||||
} finally {
|
||||
threadPool.shutdown();
|
||||
threadPool.close();
|
||||
|
Loading…
x
Reference in New Issue
Block a user