Bump com.diffplug.spotless from 5.6.1 to 6.2.0 (#1919)

* Bump com.diffplug.spotless from 5.6.1 to 6.2.0

Bumps com.diffplug.spotless from 5.6.1 to 6.2.0.

---
updated-dependencies:
- dependency-name: com.diffplug.spotless
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* spotlessApply

Signed-off-by: Nicholas Walter Knize <nknize@apache.org>

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Nicholas Walter Knize <nknize@apache.org>
This commit is contained in:
dependabot[bot] 2022-02-16 22:08:40 -08:00 committed by GitHub
parent d47725d9c3
commit 9689a27b63
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
87 changed files with 681 additions and 1014 deletions

View File

@ -48,7 +48,7 @@ plugins {
id 'lifecycle-base' id 'lifecycle-base'
id 'opensearch.docker-support' id 'opensearch.docker-support'
id 'opensearch.global-build-info' id 'opensearch.global-build-info'
id "com.diffplug.spotless" version "5.6.1" apply false id "com.diffplug.spotless" version "6.2.0" apply false
} }
apply from: 'gradle/build-complete.gradle' apply from: 'gradle/build-complete.gradle'

View File

@ -176,14 +176,10 @@ public class OpenSearchJavaPlugin implements Plugin<Project> {
compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask)); compileOptions.getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask));
}); });
// also apply release flag to groovy, which is used in build-tools // also apply release flag to groovy, which is used in build-tools
project.getTasks() project.getTasks().withType(GroovyCompile.class).configureEach(compileTask -> {
.withType(GroovyCompile.class) // TODO: this probably shouldn't apply to groovy at all?
.configureEach( compileTask.getOptions().getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask));
compileTask -> { });
// TODO: this probably shouldn't apply to groovy at all?
compileTask.getOptions().getRelease().set(releaseVersionProviderFromCompileTask(project, compileTask));
}
);
}); });
} }
@ -205,50 +201,37 @@ public class OpenSearchJavaPlugin implements Plugin<Project> {
* Adds additional manifest info to jars * Adds additional manifest info to jars
*/ */
static void configureJars(Project project) { static void configureJars(Project project) {
project.getTasks() project.getTasks().withType(Jar.class).configureEach(jarTask -> {
.withType(Jar.class) // we put all our distributable files under distributions
.configureEach( jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions"));
jarTask -> { // fixup the jar manifest
// we put all our distributable files under distributions // Explicitly using an Action interface as java lambdas
jarTask.getDestinationDirectory().set(new File(project.getBuildDir(), "distributions")); // are not supported by Gradle up-to-date checks
// fixup the jar manifest jarTask.doFirst(new Action<Task>() {
// Explicitly using an Action interface as java lambdas @Override
// are not supported by Gradle up-to-date checks public void execute(Task task) {
jarTask.doFirst(new Action<Task>() { // this doFirst is added before the info plugin, therefore it will run
@Override // after the doFirst added by the info plugin, and we can override attributes
public void execute(Task task) { jarTask.getManifest()
// this doFirst is added before the info plugin, therefore it will run .attributes(
// after the doFirst added by the info plugin, and we can override attributes Map.of("Build-Date", BuildParams.getBuildDate(), "Build-Java-Version", BuildParams.getGradleJavaVersion())
jarTask.getManifest() );
.attributes(
Map.of(
"Build-Date",
BuildParams.getBuildDate(),
"Build-Java-Version",
BuildParams.getGradleJavaVersion()
)
);
}
});
} }
); });
});
project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> { project.getPluginManager().withPlugin("com.github.johnrengelman.shadow", p -> {
project.getTasks() project.getTasks().withType(ShadowJar.class).configureEach(shadowJar -> {
.withType(ShadowJar.class) /*
.configureEach( * Replace the default "-all" classifier with null
shadowJar -> { * which will leave the classifier off of the file name.
/* */
* Replace the default "-all" classifier with null shadowJar.getArchiveClassifier().set((String) null);
* which will leave the classifier off of the file name. /*
*/ * Not all cases need service files merged but it is
shadowJar.getArchiveClassifier().set((String) null); * better to be safe
/* */
* Not all cases need service files merged but it is shadowJar.mergeServiceFiles();
* better to be safe });
*/
shadowJar.mergeServiceFiles();
}
);
// Add "original" classifier to the non-shadowed JAR to distinguish it from the shadow JAR // Add "original" classifier to the non-shadowed JAR to distinguish it from the shadow JAR
project.getTasks().named(JavaPlugin.JAR_TASK_NAME, Jar.class).configure(jar -> jar.getArchiveClassifier().set("original")); project.getTasks().named(JavaPlugin.JAR_TASK_NAME, Jar.class).configure(jar -> jar.getArchiveClassifier().set("original"));
// Make sure we assemble the shadow jar // Make sure we assemble the shadow jar

View File

@ -53,16 +53,12 @@ public abstract class PrecommitPlugin implements Plugin<Project> {
TaskProvider<Task> precommit = project.getTasks().named(PRECOMMIT_TASK_NAME); TaskProvider<Task> precommit = project.getTasks().named(PRECOMMIT_TASK_NAME);
precommit.configure(t -> t.dependsOn(task)); precommit.configure(t -> t.dependsOn(task));
project.getPluginManager() project.getPluginManager().withPlugin("java", p -> {
.withPlugin( // We want to get any compilation error before running the pre-commit checks.
"java", for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) {
p -> { task.configure(t -> t.shouldRunAfter(sourceSet.getClassesTaskName()));
// We want to get any compilation error before running the pre-commit checks. }
for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) { });
task.configure(t -> t.shouldRunAfter(sourceSet.getClassesTaskName()));
}
}
);
} }
public abstract TaskProvider<? extends Task> createTask(Project project); public abstract TaskProvider<? extends Task> createTask(Project project);

View File

@ -56,18 +56,14 @@ public class PrecommitTaskPlugin implements Plugin<Project> {
"lifecycle-base", "lifecycle-base",
p -> project.getTasks().named(LifecycleBasePlugin.CHECK_TASK_NAME).configure(t -> t.dependsOn(precommit)) p -> project.getTasks().named(LifecycleBasePlugin.CHECK_TASK_NAME).configure(t -> t.dependsOn(precommit))
); );
project.getPluginManager() project.getPluginManager().withPlugin("java", p -> {
.withPlugin( // run compilation as part of precommit
"java", for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) {
p -> { precommit.configure(t -> t.dependsOn(sourceSet.getClassesTaskName()));
// run compilation as part of precommit }
for (SourceSet sourceSet : GradleUtils.getJavaSourceSets(project)) {
precommit.configure(t -> t.dependsOn(sourceSet.getClassesTaskName()));
}
// make sure tests run after all precommit tasks // make sure tests run after all precommit tasks
project.getTasks().withType(Test.class).configureEach(t -> t.mustRunAfter(precommit)); project.getTasks().withType(Test.class).configureEach(t -> t.mustRunAfter(precommit));
} });
);
} }
} }

View File

@ -160,9 +160,8 @@ public class Sniffer implements Closeable {
// tasks are run by a single threaded executor, so swapping is safe with a simple volatile variable // tasks are run by a single threaded executor, so swapping is safe with a simple volatile variable
ScheduledTask previousTask = nextScheduledTask; ScheduledTask previousTask = nextScheduledTask;
nextScheduledTask = new ScheduledTask(task, future); nextScheduledTask = new ScheduledTask(task, future);
assert initialized.get() == false assert initialized.get() == false || previousTask.task.isSkipped() || previousTask.task.hasStarted()
|| previousTask.task.isSkipped() : "task that we are replacing is neither " + "cancelled nor has it ever started";
|| previousTask.task.hasStarted() : "task that we are replacing is neither " + "cancelled nor has it ever started";
} }
} }

View File

@ -206,13 +206,10 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
* the parser with right-curly brackets to allow statements to be delimited by them at the end of blocks. * the parser with right-curly brackets to allow statements to be delimited by them at the end of blocks.
*/ */
public void testRCurlyNotDelim() { public void testRCurlyNotDelim() {
IllegalArgumentException e = expectScriptThrows( IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> {
IllegalArgumentException.class, // We don't want PICKY here so we get the normal error message
() -> { exec("def i = 1} return 1", emptyMap(), emptyMap(), false);
// We don't want PICKY here so we get the normal error message });
exec("def i = 1} return 1", emptyMap(), emptyMap(), false);
}
);
assertEquals("unexpected token ['}'] was expecting one of [{<EOF>, ';'}].", e.getMessage()); assertEquals("unexpected token ['}'] was expecting one of [{<EOF>, ';'}].", e.getMessage());
} }

View File

@ -413,8 +413,8 @@ public class AzureBlobStore implements BlobStore {
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws URISyntaxException, public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws URISyntaxException,
BlobStorageException, IOException { BlobStorageException, IOException {
assert inputStream assert inputStream.markSupported()
.markSupported() : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken"; : "Should not be used with non-mark supporting streams as their retry handling in the SDK is broken";
logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize));
final Tuple<BlobServiceClient, Supplier<Context>> client = client(); final Tuple<BlobServiceClient, Supplier<Context>> client = client();
final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(container); final BlobContainerClient blobContainer = client.v1().getBlobContainerClient(container);

View File

@ -199,8 +199,8 @@ public class HttpReadWriteHandler implements NioChannelHandler {
+ ". Found type: " + ". Found type: "
+ message.getClass() + message.getClass()
+ "."; + ".";
assert ((HttpPipelinedResponse) message) assert ((HttpPipelinedResponse) message).getDelegateRequest() instanceof NioHttpResponse
.getDelegateRequest() instanceof NioHttpResponse : "This channel only pipelined responses with a delegate of type: " : "This channel only pipelined responses with a delegate of type: "
+ NioHttpResponse.class + NioHttpResponse.class
+ ". Found type: " + ". Found type: "
+ ((HttpPipelinedResponse) message).getDelegateRequest().getClass() + ((HttpPipelinedResponse) message).getDelegateRequest().getClass()

View File

@ -75,16 +75,14 @@ public class Cleanup {
sh.runIgnoreExitCode("ps aux | grep -i 'org.opensearch.bootstrap.OpenSearch' | awk {'print $2'} | xargs kill -9"); sh.runIgnoreExitCode("ps aux | grep -i 'org.opensearch.bootstrap.OpenSearch' | awk {'print $2'} | xargs kill -9");
}); });
Platforms.onWindows( Platforms.onWindows(() -> {
() -> { // the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here
// the view of processes returned by Get-Process doesn't expose command line arguments, so we use WMI here sh.runIgnoreExitCode(
sh.runIgnoreExitCode( "Get-WmiObject Win32_Process | "
"Get-WmiObject Win32_Process | " + "Where-Object { $_.CommandLine -Match 'org.opensearch.bootstrap.OpenSearch' } | "
+ "Where-Object { $_.CommandLine -Match 'org.opensearch.bootstrap.OpenSearch' } | " + "ForEach-Object { $_.Terminate() }"
+ "ForEach-Object { $_.Terminate() }" );
); });
}
);
Platforms.onLinux(Cleanup::purgePackagesLinux); Platforms.onLinux(Cleanup::purgePackagesLinux);

View File

@ -346,22 +346,16 @@ public class RetentionLeaseIT extends OpenSearchIntegTestCase {
) )
); );
} }
assertBusy( assertBusy(() -> {
() -> { // check all retention leases have been synced to all replicas
// check all retention leases have been synced to all replicas for (final ShardRouting replicaShard : clusterService().state().routingTable().index("index").shard(0).replicaShards()) {
for (final ShardRouting replicaShard : clusterService().state() final String replicaShardNodeId = replicaShard.currentNodeId();
.routingTable() final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
.index("index") final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName)
.shard(0) .getShardOrNull(new ShardId(resolveIndex("index"), 0));
.replicaShards()) { assertThat(replica.getRetentionLeases(), equalTo(primary.getRetentionLeases()));
final String replicaShardNodeId = replicaShard.currentNodeId();
final String replicaShardNodeName = clusterService().state().nodes().get(replicaShardNodeId).getName();
final IndexShard replica = internalCluster().getInstance(IndicesService.class, replicaShardNodeName)
.getShardOrNull(new ShardId(resolveIndex("index"), 0));
assertThat(replica.getRetentionLeases(), equalTo(primary.getRetentionLeases()));
}
} }
); });
} }
} }

View File

@ -84,21 +84,13 @@ public class PersistentTasksExecutorFullRestartIT extends OpenSearchIntegTestCas
assertThat(tasksInProgress.tasks().size(), equalTo(numberOfTasks)); assertThat(tasksInProgress.tasks().size(), equalTo(numberOfTasks));
// Make sure that at least one of the tasks is running // Make sure that at least one of the tasks is running
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to start
// Wait for the task to start assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(),
client().admin() greaterThan(0)
.cluster() );
.prepareListTasks() });
.setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get()
.getTasks()
.size(),
greaterThan(0)
);
}
);
// Restart cluster // Restart cluster
internalCluster().fullRestart(); internalCluster().fullRestart();
@ -113,21 +105,13 @@ public class PersistentTasksExecutorFullRestartIT extends OpenSearchIntegTestCas
} }
logger.info("Waiting for {} tasks to start", numberOfTasks); logger.info("Waiting for {} tasks to start", numberOfTasks);
assertBusy( assertBusy(() -> {
() -> { // Wait for all tasks to start
// Wait for all tasks to start assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(),
client().admin() equalTo(numberOfTasks)
.cluster() );
.prepareListTasks() });
.setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get()
.getTasks()
.size(),
equalTo(numberOfTasks)
);
}
);
logger.info("Complete all tasks"); logger.info("Complete all tasks");
// Complete the running task and make sure it finishes properly // Complete the running task and make sure it finishes properly
@ -136,18 +120,16 @@ public class PersistentTasksExecutorFullRestartIT extends OpenSearchIntegTestCas
equalTo(numberOfTasks) equalTo(numberOfTasks)
); );
assertBusy( assertBusy(() -> {
() -> { // Make sure the task is removed from the cluster state
// Make sure the task is removed from the cluster state assertThat(
assertThat( ((PersistentTasksCustomMetadata) internalCluster().clusterService()
((PersistentTasksCustomMetadata) internalCluster().clusterService() .state()
.state() .getMetadata()
.getMetadata() .custom(PersistentTasksCustomMetadata.TYPE)).tasks(),
.custom(PersistentTasksCustomMetadata.TYPE)).tasks(), empty()
empty() );
); });
}
);
} }
} }

View File

@ -95,21 +95,13 @@ public class PersistentTasksExecutorIT extends OpenSearchIntegTestCase {
PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>(); PlainActionFuture<PersistentTask<TestParams>> future = new PlainActionFuture<>();
persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); persistentTasksService.sendStartRequest(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
long allocationId = future.get().getAllocationId(); long allocationId = future.get().getAllocationId();
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to start
// Wait for the task to start assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(),
client().admin() equalTo(1)
.cluster() );
.prepareListTasks() });
.setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get()
.getTasks()
.size(),
equalTo(1)
);
}
);
TaskInfo firstRunningTask = client().admin() TaskInfo firstRunningTask = client().admin()
.cluster() .cluster()
.prepareListTasks() .prepareListTasks()
@ -130,15 +122,13 @@ public class PersistentTasksExecutorIT extends OpenSearchIntegTestCase {
); );
logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId());
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to disappear completely
// Wait for the task to disappear completely assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(),
client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), empty()
empty() );
); });
}
);
} }
public void testPersistentActionCompletion() throws Exception { public void testPersistentActionCompletion() throws Exception {
@ -147,21 +137,13 @@ public class PersistentTasksExecutorIT extends OpenSearchIntegTestCase {
String taskId = UUIDs.base64UUID(); String taskId = UUIDs.base64UUID();
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future); persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future);
long allocationId = future.get().getAllocationId(); long allocationId = future.get().getAllocationId();
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to start
// Wait for the task to start assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(),
client().admin() equalTo(1)
.cluster() );
.prepareListTasks() });
.setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get()
.getTasks()
.size(),
equalTo(1)
);
}
);
TaskInfo firstRunningTask = client().admin() TaskInfo firstRunningTask = client().admin()
.cluster() .cluster()
.prepareListTasks() .prepareListTasks()
@ -225,15 +207,13 @@ public class PersistentTasksExecutorIT extends OpenSearchIntegTestCase {
internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr"))); internalCluster().stopRandomNode(settings -> "test".equals(settings.get("node.attr.test_attr")));
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to disappear completely
// Wait for the task to disappear completely assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(),
client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), empty()
empty() );
); });
}
);
// Remove the persistent task // Remove the persistent task
PlainActionFuture<PersistentTask<?>> removeFuture = new PlainActionFuture<>(); PlainActionFuture<PersistentTask<?>> removeFuture = new PlainActionFuture<>();
@ -368,21 +348,13 @@ public class PersistentTasksExecutorIT extends OpenSearchIntegTestCase {
persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2); persistentTasksService.sendStartRequest(taskId, TestPersistentTasksExecutor.NAME, new TestParams("Blah"), future2);
assertFutureThrows(future2, ResourceAlreadyExistsException.class); assertFutureThrows(future2, ResourceAlreadyExistsException.class);
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to start
// Wait for the task to start assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(),
client().admin() equalTo(1)
.cluster() );
.prepareListTasks() });
.setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get()
.getTasks()
.size(),
equalTo(1)
);
}
);
TaskInfo firstRunningTask = client().admin() TaskInfo firstRunningTask = client().admin()
.cluster() .cluster()
@ -400,15 +372,13 @@ public class PersistentTasksExecutorIT extends OpenSearchIntegTestCase {
); );
logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId()); logger.info("Waiting for persistent task with id {} to disappear", firstRunningTask.getId());
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to disappear completely
// Wait for the task to disappear completely assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(),
client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks(), empty()
empty() );
); });
}
);
} }
public void testUnassignRunningPersistentTask() throws Exception { public void testUnassignRunningPersistentTask() throws Exception {
@ -489,21 +459,13 @@ public class PersistentTasksExecutorIT extends OpenSearchIntegTestCase {
} }
private static void waitForTaskToStart() throws Exception { private static void waitForTaskToStart() throws Exception {
assertBusy( assertBusy(() -> {
() -> { // Wait for the task to start
// Wait for the task to start assertThat(
assertThat( client().admin().cluster().prepareListTasks().setActions(TestPersistentTasksExecutor.NAME + "[c]").get().getTasks().size(),
client().admin() equalTo(1)
.cluster() );
.prepareListTasks() });
.setActions(TestPersistentTasksExecutor.NAME + "[c]")
.get()
.getTasks()
.size(),
equalTo(1)
);
}
);
} }
private static void assertClusterStateHasTask(String taskId) { private static void assertClusterStateHasTask(String taskId) {

View File

@ -810,8 +810,8 @@ public class RelocationIT extends OpenSearchIntegTestCase {
if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) {
// corrupting the segments_N files in order to make sure future recovery re-send files // corrupting the segments_N files in order to make sure future recovery re-send files
logger.debug("corrupting [{}] to {}. file name: [{}]", action, connection.getNode(), chunkRequest.name()); logger.debug("corrupting [{}] to {}. file name: [{}]", action, connection.getNode(), chunkRequest.name());
assert chunkRequest.content().toBytesRef().bytes == chunkRequest.content() assert chunkRequest.content().toBytesRef().bytes == chunkRequest.content().toBytesRef().bytes
.toBytesRef().bytes : "no internal reference!!"; : "no internal reference!!";
byte[] array = chunkRequest.content().toBytesRef().bytes; byte[] array = chunkRequest.content().toBytesRef().bytes;
array[0] = (byte) ~array[0]; // flip one byte in the content array[0] = (byte) ~array[0]; // flip one byte in the content
corruptionCount.countDown(); corruptionCount.countDown();

View File

@ -297,25 +297,22 @@ public final class ExceptionsHelper {
* @param throwable the throwable to possibly throw on another thread * @param throwable the throwable to possibly throw on another thread
*/ */
public static void maybeDieOnAnotherThread(final Throwable throwable) { public static void maybeDieOnAnotherThread(final Throwable throwable) {
ExceptionsHelper.maybeError(throwable) ExceptionsHelper.maybeError(throwable).ifPresent(error -> {
.ifPresent( /*
error -> { * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack
/* * contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here
* Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, sometimes the stack * will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the
* contains statements that catch any throwable (e.g., Netty, and the JDK futures framework). This means that a rethrow here * stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause
* will not bubble up to where we want it to. So, we fork a thread and throw the exception from there where we are sure the * during exit.
* stack does not contain statements that catch any throwable. We do not wrap the exception so as to not lose the original cause */
* during exit. try {
*/ // try to log the current stack trace
try { final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace());
// try to log the current stack trace logger.error("fatal error\n{}", formatted);
final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); } finally {
logger.error("fatal error\n{}", formatted); new Thread(() -> { throw error; }).start();
} finally { }
new Thread(() -> { throw error; }).start(); });
}
}
);
} }
/** /**

View File

@ -364,9 +364,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
} }
if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) { if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) {
ActiveShardCount waitForActiveShards = request.waitForActiveShards(); ActiveShardCount waitForActiveShards = request.waitForActiveShards();
assert waitForActiveShards.equals( assert waitForActiveShards.equals(ActiveShardCount.DEFAULT) == false
ActiveShardCount.DEFAULT : "waitForActiveShards must not be DEFAULT on the request object, instead it should be NONE";
) == false : "waitForActiveShards must not be DEFAULT on the request object, instead it should be NONE";
if (waitForActiveShards.equals(ActiveShardCount.ALL)) { if (waitForActiveShards.equals(ActiveShardCount.ALL)) {
if (response.getUnassignedShards() == 0 && response.getInitializingShards() == 0) { if (response.getUnassignedShards() == 0 && response.getInitializingShards() == 0) {
// if we are waiting for all shards to be active, then the num of unassigned and num of initializing shards must be 0 // if we are waiting for all shards to be active, then the num of unassigned and num of initializing shards must be 0

View File

@ -187,24 +187,17 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
TaskInfo snapshotOfRunningTask, TaskInfo snapshotOfRunningTask,
ActionListener<GetTaskResponse> listener ActionListener<GetTaskResponse> listener
) { ) {
getFinishedTaskFromIndex( getFinishedTaskFromIndex(thisTask, request, ActionListener.delegateResponse(listener, (delegatedListener, e) -> {
thisTask, /*
request, * We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If
ActionListener.delegateResponse( * the error isn't a 404 then we'll just throw it back to the user.
listener, */
(delegatedListener, e) -> { if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) {
/* delegatedListener.onResponse(new GetTaskResponse(new TaskResult(true, snapshotOfRunningTask)));
* We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If } else {
* the error isn't a 404 then we'll just throw it back to the user. delegatedListener.onFailure(e);
*/ }
if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) { }));
delegatedListener.onResponse(new GetTaskResponse(new TaskResult(true, snapshotOfRunningTask)));
} else {
delegatedListener.onFailure(e);
}
}
)
);
} }
/** /**

View File

@ -363,10 +363,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
} }
final long startTime = snapshotInfo.startTime(); final long startTime = snapshotInfo.startTime();
final long endTime = snapshotInfo.endTime(); final long endTime = snapshotInfo.endTime();
assert endTime >= startTime assert endTime >= startTime || (endTime == 0L && snapshotInfo.state().completed() == false)
|| (endTime == 0L && snapshotInfo.state().completed() == false) : "Inconsistent timestamps found in SnapshotInfo [" : "Inconsistent timestamps found in SnapshotInfo [" + snapshotInfo + "]";
+ snapshotInfo
+ "]";
builder.add( builder.add(
new SnapshotStatus( new SnapshotStatus(
new Snapshot(repositoryName, snapshotId), new Snapshot(repositoryName, snapshotId),

View File

@ -109,8 +109,8 @@ class BulkPrimaryExecutionContext {
/** move to the next item to execute */ /** move to the next item to execute */
private void advance() { private void advance() {
assert currentItemState == ItemProcessingState.COMPLETED assert currentItemState == ItemProcessingState.COMPLETED || currentIndex == -1
|| currentIndex == -1 : "moving to next but current item wasn't completed (state: " + currentItemState + ")"; : "moving to next but current item wasn't completed (state: " + currentItemState + ")";
currentItemState = ItemProcessingState.INITIAL; currentItemState = ItemProcessingState.INITIAL;
currentIndex = findNextNonAborted(currentIndex + 1); currentIndex = findNextNonAborted(currentIndex + 1);
retryCounter = 0; retryCounter = 0;

View File

@ -878,8 +878,8 @@ public abstract class TransportReplicationAction<
// resolve it from the index settings // resolve it from the index settings
request.waitForActiveShards(indexMetadata.getWaitForActiveShards()); request.waitForActiveShards(indexMetadata.getWaitForActiveShards());
} }
assert request assert request.waitForActiveShards() != ActiveShardCount.DEFAULT
.waitForActiveShards() != ActiveShardCount.DEFAULT : "request waitForActiveShards must be set in resolveRequest"; : "request waitForActiveShards must be set in resolveRequest";
final ShardRouting primary = state.getRoutingTable().shardRoutingTable(request.shardId()).primaryShard(); final ShardRouting primary = state.getRoutingTable().shardRoutingTable(request.shardId()).primaryShard();
if (primary == null || primary.active() == false) { if (primary == null || primary.active() == false) {

View File

@ -332,10 +332,8 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
final Set<String> indexNamesInShards = new HashSet<>(); final Set<String> indexNamesInShards = new HashSet<>();
shards.iterator().forEachRemaining(s -> { shards.iterator().forEachRemaining(s -> {
indexNamesInShards.add(s.key.getIndexName()); indexNamesInShards.add(s.key.getIndexName());
assert source == null assert source == null || s.value.nodeId == null
|| s.value.nodeId == null : "Shard snapshot must not be assigned to data node when copying from snapshot [" : "Shard snapshot must not be assigned to data node when copying from snapshot [" + source + "]";
+ source
+ "]";
}); });
assert source == null || indexNames.isEmpty() == false : "No empty snapshot clones allowed"; assert source == null || indexNames.isEmpty() == false : "No empty snapshot clones allowed";
assert source != null || indexNames.equals(indexNamesInShards) : "Indices in shards " assert source != null || indexNames.equals(indexNamesInShards) : "Indices in shards "
@ -348,12 +346,8 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
final boolean shardsCompleted = completed(shards.values()) && completed(clones.values()); final boolean shardsCompleted = completed(shards.values()) && completed(clones.values());
// Check state consistency for normal snapshots and started clone operations // Check state consistency for normal snapshots and started clone operations
if (source == null || clones.isEmpty() == false) { if (source == null || clones.isEmpty() == false) {
assert (state.completed() && shardsCompleted) assert (state.completed() && shardsCompleted) || (state.completed() == false && shardsCompleted == false)
|| (state.completed() == false : "Completed state must imply all shards completed but saw state [" + state + "] and shards " + shards;
&& shardsCompleted == false) : "Completed state must imply all shards completed but saw state ["
+ state
+ "] and shards "
+ shards;
} }
if (source != null && state.completed()) { if (source != null && state.completed()) {
assert hasFailures(clones) == false || state == State.FAILED : "Failed shard clones in [" assert hasFailures(clones) == false || state == State.FAILED : "Failed shard clones in ["
@ -567,8 +561,8 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
userMetadata, userMetadata,
version version
); );
assert updated.state().completed() == false assert updated.state().completed() == false && completed(updated.shards().values()) == false
&& completed(updated.shards().values()) == false : "Only running snapshots allowed but saw [" + updated + "]"; : "Only running snapshots allowed but saw [" + updated + "]";
return updated; return updated;
} }
@ -966,8 +960,8 @@ public class SnapshotsInProgress extends AbstractNamedDiffable<Custom> implement
for (Entry entry : entries) { for (Entry entry : entries) {
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shard : entry.shards()) { for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shard : entry.shards()) {
if (shard.value.isActive()) { if (shard.value.isActive()) {
assert assignedShardsByRepo.computeIfAbsent(entry.repository(), k -> new HashSet<>()) assert assignedShardsByRepo.computeIfAbsent(entry.repository(), k -> new HashSet<>()).add(shard.key)
.add(shard.key) : "Found duplicate shard assignments in " + entries; : "Found duplicate shard assignments in " + entries;
} }
} }
} }

View File

@ -305,7 +305,7 @@ public class CoordinationState {
boolean prevElectionWon = electionWon; boolean prevElectionWon = electionWon;
electionWon = isElectionQuorum(joinVotes); electionWon = isElectionQuorum(joinVotes);
assert !prevElectionWon || electionWon : // we cannot go from won to not won assert !prevElectionWon || electionWon : // we cannot go from won to not won
"locaNode= " + localNode + ", join=" + join + ", joinVotes=" + joinVotes; "locaNode= " + localNode + ", join=" + join + ", joinVotes=" + joinVotes;
logger.debug( logger.debug(
"handleJoin: added join {} from [{}] for election, electionWon={} lastAcceptedTerm={} lastAcceptedVersion={}", "handleJoin: added join {} from [{}] for election, electionWon={} lastAcceptedTerm={} lastAcceptedVersion={}",
join, join,
@ -378,8 +378,8 @@ public class CoordinationState {
throw new CoordinationStateRejectedException("only allow reconfiguration if joinVotes have quorum for new config"); throw new CoordinationStateRejectedException("only allow reconfiguration if joinVotes have quorum for new config");
} }
assert clusterState.getLastCommittedConfiguration() assert clusterState.getLastCommittedConfiguration().equals(getLastCommittedConfiguration())
.equals(getLastCommittedConfiguration()) : "last committed configuration should not change"; : "last committed configuration should not change";
lastPublishedVersion = clusterState.version(); lastPublishedVersion = clusterState.version();
lastPublishedConfiguration = clusterState.getLastAcceptedConfiguration(); lastPublishedConfiguration = clusterState.getLastAcceptedConfiguration();

View File

@ -1207,8 +1207,8 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery
private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) {
if (clusterState.nodes().getMasterNodeId() != null) { if (clusterState.nodes().getMasterNodeId() != null) {
// remove block if it already exists before adding new one // remove block if it already exists before adding new one
assert clusterState.blocks() assert clusterState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false
.hasGlobalBlockWithId(NO_MASTER_BLOCK_ID) == false : "NO_MASTER_BLOCK should only be added by Coordinator"; : "NO_MASTER_BLOCK should only be added by Coordinator";
final ClusterBlocks clusterBlocks = ClusterBlocks.builder() final ClusterBlocks clusterBlocks = ClusterBlocks.builder()
.blocks(clusterState.blocks()) .blocks(clusterState.blocks())
.addGlobalBlock(noMasterBlockService.getNoMasterBlock()) .addGlobalBlock(noMasterBlockService.getNoMasterBlock())

View File

@ -193,20 +193,15 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor<JoinTaskExecut
if (joiniedNodeNameIds.isEmpty() == false) { if (joiniedNodeNameIds.isEmpty() == false) {
Set<CoordinationMetadata.VotingConfigExclusion> currentVotingConfigExclusions = currentState.getVotingConfigExclusions(); Set<CoordinationMetadata.VotingConfigExclusion> currentVotingConfigExclusions = currentState.getVotingConfigExclusions();
Set<CoordinationMetadata.VotingConfigExclusion> newVotingConfigExclusions = currentVotingConfigExclusions.stream() Set<CoordinationMetadata.VotingConfigExclusion> newVotingConfigExclusions = currentVotingConfigExclusions.stream()
.map( .map(e -> {
e -> { // Update nodeId in VotingConfigExclusion when a new node with excluded node name joins
// Update nodeId in VotingConfigExclusion when a new node with excluded node name joins if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId())
if (CoordinationMetadata.VotingConfigExclusion.MISSING_VALUE_MARKER.equals(e.getNodeId()) && joiniedNodeNameIds.containsKey(e.getNodeName())) {
&& joiniedNodeNameIds.containsKey(e.getNodeName())) { return new CoordinationMetadata.VotingConfigExclusion(joiniedNodeNameIds.get(e.getNodeName()), e.getNodeName());
return new CoordinationMetadata.VotingConfigExclusion( } else {
joiniedNodeNameIds.get(e.getNodeName()), return e;
e.getNodeName()
);
} else {
return e;
}
} }
) })
.collect(Collectors.toSet()); .collect(Collectors.toSet());
// if VotingConfigExclusions did get updated // if VotingConfigExclusions did get updated

View File

@ -986,9 +986,8 @@ public class MetadataCreateIndexService {
routingNumShards = calculateNumRoutingShards(numTargetShards, indexVersionCreated); routingNumShards = calculateNumRoutingShards(numTargetShards, indexVersionCreated);
} }
} else { } else {
assert IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists( assert IndexMetadata.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(indexSettings) == false
indexSettings : "index.number_of_routing_shards should not be present on the target index on resize";
) == false : "index.number_of_routing_shards should not be present on the target index on resize";
routingNumShards = sourceMetadata.getRoutingNumShards(); routingNumShards = sourceMetadata.getRoutingNumShards();
} }
return routingNumShards; return routingNumShards;

View File

@ -599,13 +599,11 @@ public class RoutingNodes implements Iterable<RoutingNode> {
ensureMutable(); ensureMutable();
assert failedShard.assignedToNode() : "only assigned shards can be failed"; assert failedShard.assignedToNode() : "only assigned shards can be failed";
assert indexMetadata.getIndex().equals(failedShard.index()) : "shard failed for unknown index (shard entry: " + failedShard + ")"; assert indexMetadata.getIndex().equals(failedShard.index()) : "shard failed for unknown index (shard entry: " + failedShard + ")";
assert getByAllocationId( assert getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId()) == failedShard
failedShard.shardId(), : "shard routing to fail does not exist in routing table, expected: "
failedShard.allocationId().getId() + failedShard
) == failedShard : "shard routing to fail does not exist in routing table, expected: " + " but was: "
+ failedShard + getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId());
+ " but was: "
+ getByAllocationId(failedShard.shardId(), failedShard.allocationId().getId());
logger.debug("{} failing shard {} with unassigned info ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary()); logger.debug("{} failing shard {} with unassigned info ({})", failedShard.shardId(), failedShard, unassignedInfo.shortSummary());
@ -850,12 +848,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
+ oldShard + oldShard
+ " by shard with same shard id but was " + " by shard with same shard id but was "
+ newShard; + newShard;
assert oldShard.unassigned() == false assert oldShard.unassigned() == false && newShard.unassigned() == false
&& newShard.unassigned() == false : "only assigned shards can be updated in list of assigned shards (prev: " : "only assigned shards can be updated in list of assigned shards (prev: " + oldShard + ", new: " + newShard + ")";
+ oldShard
+ ", new: "
+ newShard
+ ")";
assert oldShard.currentNodeId().equals(newShard.currentNodeId()) : "shard to update " assert oldShard.currentNodeId().equals(newShard.currentNodeId()) : "shard to update "
+ oldShard + oldShard
+ " can only update " + " can only update "

View File

@ -101,16 +101,13 @@ public final class ShardRouting implements Writeable, ToXContentObject {
assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE
|| state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.INITIALIZING
|| state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert expectedShardSize >= 0 assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING
|| state != ShardRoutingState.INITIALIZING : expectedShardSize + " state: " + state;
|| state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
assert (state == ShardRoutingState.UNASSIGNED assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null)
|| state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " : "recovery source only available on unassigned or initializing shard but was " + state;
+ state; assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary
assert recoverySource == null : "replica shards always recover from primary";
|| recoverySource == PeerRecoverySource.INSTANCE
|| primary : "replica shards always recover from primary";
assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node " assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node "
+ this; + this;
} }
@ -589,12 +586,8 @@ public final class ShardRouting implements Writeable, ToXContentObject {
**/ **/
public boolean isSameAllocation(ShardRouting other) { public boolean isSameAllocation(ShardRouting other) {
boolean b = this.allocationId != null && other.allocationId != null && this.allocationId.getId().equals(other.allocationId.getId()); boolean b = this.allocationId != null && other.allocationId != null && this.allocationId.getId().equals(other.allocationId.getId());
assert b == false assert b == false || this.currentNodeId.equals(other.currentNodeId)
|| this.currentNodeId.equals(other.currentNodeId) : "ShardRoutings have the same allocation id but not the same node. This [" : "ShardRoutings have the same allocation id but not the same node. This [" + this + "], other [" + other + "]";
+ this
+ "], other ["
+ other
+ "]";
return b; return b;
} }
@ -613,50 +606,35 @@ public final class ShardRouting implements Writeable, ToXContentObject {
&& this.state == ShardRoutingState.INITIALIZING && this.state == ShardRoutingState.INITIALIZING
&& this.allocationId.getId().equals(other.allocationId.getRelocationId()); && this.allocationId.getId().equals(other.allocationId.getRelocationId());
assert b == false assert b == false || other.state == ShardRoutingState.RELOCATING
|| other.state == ShardRoutingState.RELOCATING : "ShardRouting is a relocation target but the source shard state isn't relocating. This [" : "ShardRouting is a relocation target but the source shard state isn't relocating. This [" + this + "], other [" + other + "]";
assert b == false || other.allocationId.getId().equals(this.allocationId.getRelocationId())
: "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId."
+ " This ["
+ this + this
+ "], other [" + "], other ["
+ other + other
+ "]"; + "]";
assert b == false assert b == false || other.currentNodeId().equals(this.relocatingNodeId)
|| other.allocationId.getId() : "ShardRouting is a relocation target but source current node id isn't equal to target relocating node."
.equals( + " This ["
this.allocationId.getRelocationId() + this
) : "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId." + "], other ["
+ " This [" + other
+ this + "]";
+ "], other ["
+ other
+ "]";
assert b == false assert b == false || this.currentNodeId().equals(other.relocatingNodeId)
|| other.currentNodeId() : "ShardRouting is a relocation target but current node id isn't equal to source relocating node."
.equals( + " This ["
this.relocatingNodeId + this
) : "ShardRouting is a relocation target but source current node id isn't equal to target relocating node." + "], other ["
+ " This [" + other
+ this + "]";
+ "], other ["
+ other
+ "]";
assert b == false assert b == false || this.shardId.equals(other.shardId)
|| this.currentNodeId() : "ShardRouting is a relocation target but both indexRoutings are not of the same shard id."
.equals(
other.relocatingNodeId
) : "ShardRouting is a relocation target but current node id isn't equal to source relocating node."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false
|| this.shardId.equals(
other.shardId
) : "ShardRouting is a relocation target but both indexRoutings are not of the same shard id."
+ " This [" + " This ["
+ this + this
+ "], other [" + "], other ["
@ -680,48 +658,35 @@ public final class ShardRouting implements Writeable, ToXContentObject {
&& other.state == ShardRoutingState.INITIALIZING && other.state == ShardRoutingState.INITIALIZING
&& other.allocationId.getId().equals(this.allocationId.getRelocationId()); && other.allocationId.getId().equals(this.allocationId.getRelocationId());
assert b == false assert b == false || this.state == ShardRoutingState.RELOCATING
|| this.state == ShardRoutingState.RELOCATING : "ShardRouting is a relocation source but shard state isn't relocating. This [" : "ShardRouting is a relocation source but shard state isn't relocating. This [" + this + "], other [" + other + "]";
assert b == false || this.allocationId.getId().equals(other.allocationId.getRelocationId())
: "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId."
+ " This ["
+ this + this
+ "], other [" + "], other ["
+ other + other
+ "]"; + "]";
assert b == false assert b == false || this.currentNodeId().equals(other.relocatingNodeId)
|| this.allocationId.getId() : "ShardRouting is a relocation source but current node isn't equal to other's relocating node."
.equals( + " This ["
other.allocationId.getRelocationId() + this
) : "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId." + "], other ["
+ " This [" + other
+ this + "]";
+ "], other ["
+ other
+ "]";
assert b == false assert b == false || other.currentNodeId().equals(this.relocatingNodeId)
|| this.currentNodeId() : "ShardRouting is a relocation source but relocating node isn't equal to other's current node."
.equals( + " This ["
other.relocatingNodeId + this
) : "ShardRouting is a relocation source but current node isn't equal to other's relocating node." + "], other ["
+ " This [" + other
+ this + "]";
+ "], other ["
+ other
+ "]";
assert b == false assert b == false || this.shardId.equals(other.shardId)
|| other.currentNodeId() : "ShardRouting is a relocation source but both indexRoutings are not of the same shard."
.equals(
this.relocatingNodeId
) : "ShardRouting is a relocation source but relocating node isn't equal to other's current node."
+ " This ["
+ this
+ "], other ["
+ other
+ "]";
assert b == false
|| this.shardId.equals(other.shardId) : "ShardRouting is a relocation source but both indexRoutings are not of the same shard."
+ " This [" + " This ["
+ this + this
+ "], target [" + "], target ["

View File

@ -528,8 +528,8 @@ public class AllocationService {
private void reroute(RoutingAllocation allocation) { private void reroute(RoutingAllocation allocation) {
assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes"; assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes";
assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation) assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty()
.isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; : "auto-expand replicas out of sync with number of nodes in the cluster";
assert assertInitialized(); assert assertInitialized();
removeDelayMarkers(allocation); removeDelayMarkers(allocation);
@ -602,15 +602,13 @@ public class AllocationService {
RoutingNodes routingNodes = routingAllocation.routingNodes(); RoutingNodes routingNodes = routingAllocation.routingNodes();
for (ShardRouting startedShard : startedShardEntries) { for (ShardRouting startedShard : startedShardEntries) {
assert startedShard.initializing() : "only initializing shards can be started"; assert startedShard.initializing() : "only initializing shards can be started";
assert routingAllocation.metadata() assert routingAllocation.metadata().index(startedShard.shardId().getIndex()) != null
.index(startedShard.shardId().getIndex()) != null : "shard started for unknown index (shard entry: " + startedShard + ")"; : "shard started for unknown index (shard entry: " + startedShard + ")";
assert startedShard == routingNodes.getByAllocationId( assert startedShard == routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId())
startedShard.shardId(), : "shard routing to start does not exist in routing table, expected: "
startedShard.allocationId().getId() + startedShard
) : "shard routing to start does not exist in routing table, expected: " + " but was: "
+ startedShard + routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId());
+ " but was: "
+ routingNodes.getByAllocationId(startedShard.shardId(), startedShard.allocationId().getId());
routingNodes.startShard(logger, startedShard, routingAllocation.changes()); routingNodes.startShard(logger, startedShard, routingAllocation.changes());
} }

View File

@ -86,14 +86,12 @@ public class IndexMetadataUpdater extends RoutingChangesObserver.AbstractRouting
@Override @Override
public void shardStarted(ShardRouting initializingShard, ShardRouting startedShard) { public void shardStarted(ShardRouting initializingShard, ShardRouting startedShard) {
assert Objects.equals( assert Objects.equals(initializingShard.allocationId().getId(), startedShard.allocationId().getId())
initializingShard.allocationId().getId(), : "initializingShard.allocationId ["
startedShard.allocationId().getId() + initializingShard.allocationId().getId()
) : "initializingShard.allocationId [" + "] and startedShard.allocationId ["
+ initializingShard.allocationId().getId() + startedShard.allocationId().getId()
+ "] and startedShard.allocationId [" + "] have to have the same";
+ startedShard.allocationId().getId()
+ "] have to have the same";
Updates updates = changes(startedShard.shardId()); Updates updates = changes(startedShard.shardId());
updates.addedAllocationIds.add(startedShard.allocationId().getId()); updates.addedAllocationIds.add(startedShard.allocationId().getId());
if (startedShard.primary() if (startedShard.primary()
@ -171,13 +169,11 @@ public class IndexMetadataUpdater extends RoutingChangesObserver.AbstractRouting
ShardId shardId, ShardId shardId,
Updates updates Updates updates
) { ) {
assert Sets.haveEmptyIntersection( assert Sets.haveEmptyIntersection(updates.addedAllocationIds, updates.removedAllocationIds)
updates.addedAllocationIds, : "allocation ids cannot be both added and removed in the same allocation round, added ids: "
updates.removedAllocationIds + updates.addedAllocationIds
) : "allocation ids cannot be both added and removed in the same allocation round, added ids: " + ", removed ids: "
+ updates.addedAllocationIds + updates.removedAllocationIds;
+ ", removed ids: "
+ updates.removedAllocationIds;
Set<String> oldInSyncAllocationIds = oldIndexMetadata.inSyncAllocationIds(shardId.id()); Set<String> oldInSyncAllocationIds = oldIndexMetadata.inSyncAllocationIds(shardId.id());
@ -217,9 +213,8 @@ public class IndexMetadataUpdater extends RoutingChangesObserver.AbstractRouting
inSyncAllocationIds.removeAll(updates.removedAllocationIds); inSyncAllocationIds.removeAll(updates.removedAllocationIds);
assert oldInSyncAllocationIds.contains(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) == false assert oldInSyncAllocationIds.contains(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) == false
|| inSyncAllocationIds.contains( || inSyncAllocationIds.contains(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) == false
RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID : "fake allocation id has to be removed, inSyncAllocationIds:" + inSyncAllocationIds;
) == false : "fake allocation id has to be removed, inSyncAllocationIds:" + inSyncAllocationIds;
// Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary // Prevent set of inSyncAllocationIds to grow unboundedly. This can happen for example if we don't write to a primary
// but repeatedly shut down nodes that have active replicas. // but repeatedly shut down nodes that have active replicas.
@ -258,9 +253,8 @@ public class IndexMetadataUpdater extends RoutingChangesObserver.AbstractRouting
inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId()); inSyncAllocationIds.add(updates.firstFailedPrimary.allocationId().getId());
} }
assert inSyncAllocationIds.isEmpty() == false assert inSyncAllocationIds.isEmpty() == false || oldInSyncAllocationIds.isEmpty()
|| oldInSyncAllocationIds.isEmpty() : "in-sync allocations cannot become empty after they have been non-empty: " : "in-sync allocations cannot become empty after they have been non-empty: " + oldInSyncAllocationIds;
+ oldInSyncAllocationIds;
// be extra safe here and only update in-sync set if it is non-empty // be extra safe here and only update in-sync set if it is non-empty
if (inSyncAllocationIds.isEmpty() == false) { if (inSyncAllocationIds.isEmpty() == false) {
@ -295,11 +289,8 @@ public class IndexMetadataUpdater extends RoutingChangesObserver.AbstractRouting
int shardNumber = shardEntry.getKey().getId(); int shardNumber = shardEntry.getKey().getId();
Set<String> oldInSyncAllocations = oldIndexMetadata.inSyncAllocationIds(shardNumber); Set<String> oldInSyncAllocations = oldIndexMetadata.inSyncAllocationIds(shardNumber);
Set<String> idsToRemove = shardEntry.getValue().stream().map(e -> e.getAllocationId()).collect(Collectors.toSet()); Set<String> idsToRemove = shardEntry.getValue().stream().map(e -> e.getAllocationId()).collect(Collectors.toSet());
assert idsToRemove.stream() assert idsToRemove.stream().allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null)
.allMatch(id -> oldRoutingTable.getByAllocationId(shardEntry.getKey(), id) == null) : "removing stale ids: " : "removing stale ids: " + idsToRemove + ", some of which have still a routing entry: " + oldRoutingTable;
+ idsToRemove
+ ", some of which have still a routing entry: "
+ oldRoutingTable;
Set<String> remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove); Set<String> remainingInSyncAllocations = Sets.difference(oldInSyncAllocations, idsToRemove);
assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard " assert remainingInSyncAllocations.isEmpty() == false : "Set of in-sync ids cannot become empty for shard "
+ shardEntry.getKey() + shardEntry.getKey()

View File

@ -91,9 +91,8 @@ public class RoutingNodesChangedObserver implements RoutingChangesObserver {
@Override @Override
public void relocationSourceRemoved(ShardRouting removedReplicaRelocationSource) { public void relocationSourceRemoved(ShardRouting removedReplicaRelocationSource) {
assert removedReplicaRelocationSource.primary() == false assert removedReplicaRelocationSource.primary() == false && removedReplicaRelocationSource.isRelocationTarget()
&& removedReplicaRelocationSource.isRelocationTarget() : "expected replica relocation target shard " : "expected replica relocation target shard " + removedReplicaRelocationSource;
+ removedReplicaRelocationSource;
setChanged(); setChanged();
} }
@ -108,11 +107,8 @@ public class RoutingNodesChangedObserver implements RoutingChangesObserver {
assert oldReplica.initializing() && oldReplica.primary() == false : "expected initializing replica shard " + oldReplica; assert oldReplica.initializing() && oldReplica.primary() == false : "expected initializing replica shard " + oldReplica;
assert reinitializedReplica.initializing() && reinitializedReplica.primary() == false : "expected reinitialized replica shard " assert reinitializedReplica.initializing() && reinitializedReplica.primary() == false : "expected reinitialized replica shard "
+ reinitializedReplica; + reinitializedReplica;
assert oldReplica.allocationId() assert oldReplica.allocationId().getId().equals(reinitializedReplica.allocationId().getId()) == false
.getId() : "expected allocation id to change for reinitialized replica shard (old: "
.equals(
reinitializedReplica.allocationId().getId()
) == false : "expected allocation id to change for reinitialized replica shard (old: "
+ oldReplica + oldReplica
+ " new: " + " new: "
+ reinitializedReplica + reinitializedReplica

View File

@ -237,9 +237,8 @@ public class ClusterService extends AbstractLifecycleComponent {
public static boolean assertClusterOrMasterStateThread() { public static boolean assertClusterOrMasterStateThread() {
assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME) assert Thread.currentThread().getName().contains(ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME)
|| Thread.currentThread() || Thread.currentThread().getName().contains(MasterService.MASTER_UPDATE_THREAD_NAME)
.getName() : "not called from the master/cluster state update thread";
.contains(MasterService.MASTER_UPDATE_THREAD_NAME) : "not called from the master/cluster state update thread";
return true; return true;
} }

View File

@ -539,8 +539,9 @@ public class MasterService extends AbstractLifecycleComponent {
*/ */
public List<PendingClusterTask> pendingTasks() { public List<PendingClusterTask> pendingTasks() {
return Arrays.stream(threadPoolExecutor.getPending()).map(pending -> { return Arrays.stream(threadPoolExecutor.getPending()).map(pending -> {
assert pending.task instanceof SourcePrioritizedRunnable : "thread pool executor should only use SourcePrioritizedRunnable instances but found: " assert pending.task instanceof SourcePrioritizedRunnable
+ pending.task.getClass().getName(); : "thread pool executor should only use SourcePrioritizedRunnable instances but found: "
+ pending.task.getClass().getName();
SourcePrioritizedRunnable task = (SourcePrioritizedRunnable) pending.task; SourcePrioritizedRunnable task = (SourcePrioritizedRunnable) pending.task;
return new PendingClusterTask( return new PendingClusterTask(
pending.insertionOrder, pending.insertionOrder,

View File

@ -71,9 +71,8 @@ public abstract class TaskBatcher {
return; return;
} }
final BatchedTask firstTask = tasks.get(0); final BatchedTask firstTask = tasks.get(0);
assert tasks.stream() assert tasks.stream().allMatch(t -> t.batchingKey == firstTask.batchingKey)
.allMatch(t -> t.batchingKey == firstTask.batchingKey) : "tasks submitted in a batch should share the same batching key: " : "tasks submitted in a batch should share the same batching key: " + tasks;
+ tasks;
// convert to an identity map to check for dups based on task identity // convert to an identity map to check for dups based on task identity
final Map<Object, BatchedTask> tasksIdentity = tasks.stream() final Map<Object, BatchedTask> tasksIdentity = tasks.stream()
.collect( .collect(
@ -124,8 +123,8 @@ public abstract class TaskBatcher {
if (toRemove.isEmpty() == false) { if (toRemove.isEmpty() == false) {
BatchedTask firstTask = toRemove.get(0); BatchedTask firstTask = toRemove.get(0);
Object batchingKey = firstTask.batchingKey; Object batchingKey = firstTask.batchingKey;
assert tasks.stream() assert tasks.stream().allMatch(t -> t.batchingKey == batchingKey)
.allMatch(t -> t.batchingKey == batchingKey) : "tasks submitted in a batch should share the same batching key: " + tasks; : "tasks submitted in a batch should share the same batching key: " + tasks;
synchronized (tasksPerBatchingKey) { synchronized (tasksPerBatchingKey) {
LinkedHashSet<BatchedTask> existingTasks = tasksPerBatchingKey.get(batchingKey); LinkedHashSet<BatchedTask> existingTasks = tasksPerBatchingKey.get(batchingKey);
if (existingTasks != null) { if (existingTasks != null) {

View File

@ -570,12 +570,8 @@ public abstract class LocalTimeOffset {
long utcStart = transition.toEpochSecond() * 1000; long utcStart = transition.toEpochSecond() * 1000;
long offsetBeforeMillis = transition.getOffsetBefore().getTotalSeconds() * 1000; long offsetBeforeMillis = transition.getOffsetBefore().getTotalSeconds() * 1000;
long offsetAfterMillis = transition.getOffsetAfter().getTotalSeconds() * 1000; long offsetAfterMillis = transition.getOffsetAfter().getTotalSeconds() * 1000;
assert (false == previous instanceof Transition) assert (false == previous instanceof Transition) || ((Transition) previous).startUtcMillis < utcStart
|| ((Transition) previous).startUtcMillis < utcStart : "transition list out of order at [" : "transition list out of order at [" + previous + "] and [" + transition + "]";
+ previous
+ "] and ["
+ transition
+ "]";
assert previous.millis != offsetAfterMillis : "transition list is has a duplicate at [" assert previous.millis != offsetAfterMillis : "transition list is has a duplicate at ["
+ previous + previous
+ "] and [" + "] and ["

View File

@ -117,10 +117,8 @@ final class PerThreadIDVersionAndSeqNoLookup {
* entirely for these readers. * entirely for these readers.
*/ */
public DocIdAndVersion lookupVersion(BytesRef id, boolean loadSeqNo, LeafReaderContext context) throws IOException { public DocIdAndVersion lookupVersion(BytesRef id, boolean loadSeqNo, LeafReaderContext context) throws IOException {
assert context.reader() assert context.reader().getCoreCacheHelper().getKey().equals(readerKey)
.getCoreCacheHelper() : "context's reader is not the same as the reader class was initialized on.";
.getKey()
.equals(readerKey) : "context's reader is not the same as the reader class was initialized on.";
int docID = getDocID(id, context); int docID = getDocID(id, context);
if (docID != DocIdSetIterator.NO_MORE_DOCS) { if (docID != DocIdSetIterator.NO_MORE_DOCS) {
@ -174,10 +172,8 @@ final class PerThreadIDVersionAndSeqNoLookup {
/** Return null if id is not found. */ /** Return null if id is not found. */
DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOException { DocIdAndSeqNo lookupSeqNo(BytesRef id, LeafReaderContext context) throws IOException {
assert context.reader() assert context.reader().getCoreCacheHelper().getKey().equals(readerKey)
.getCoreCacheHelper() : "context's reader is not the same as the reader class was initialized on.";
.getKey()
.equals(readerKey) : "context's reader is not the same as the reader class was initialized on.";
final int docID = getDocID(id, context); final int docID = getDocID(id, context);
if (docID != DocIdSetIterator.NO_MORE_DOCS) { if (docID != DocIdSetIterator.NO_MORE_DOCS) {
final long seqNo = readNumericDocValues(context.reader(), SeqNoFieldMapper.NAME, docID); final long seqNo = readNumericDocValues(context.reader(), SeqNoFieldMapper.NAME, docID);

View File

@ -173,9 +173,8 @@ public class Setting<T> implements ToXContentObject {
Validator<T> validator, Validator<T> validator,
Property... properties Property... properties
) { ) {
assert this instanceof SecureSetting assert this instanceof SecureSetting || this.isGroupSetting() || parser.apply(defaultValue.apply(Settings.EMPTY)) != null
|| this.isGroupSetting() : "parser returned null";
|| parser.apply(defaultValue.apply(Settings.EMPTY)) != null : "parser returned null";
this.key = key; this.key = key;
this.fallbackSetting = fallbackSetting; this.fallbackSetting = fallbackSetting;
this.defaultValue = defaultValue; this.defaultValue = defaultValue;

View File

@ -455,12 +455,11 @@ public class BigArrays {
private <T extends AbstractBigArray> T resizeInPlace(T array, long newSize) { private <T extends AbstractBigArray> T resizeInPlace(T array, long newSize) {
final long oldMemSize = array.ramBytesUsed(); final long oldMemSize = array.ramBytesUsed();
final long oldSize = array.size(); final long oldSize = array.size();
assert oldMemSize == array.ramBytesEstimated( assert oldMemSize == array.ramBytesEstimated(oldSize)
oldSize : "ram bytes used should equal that which was previously estimated: ramBytesUsed="
) : "ram bytes used should equal that which was previously estimated: ramBytesUsed=" + oldMemSize
+ oldMemSize + ", ramBytesEstimated="
+ ", ramBytesEstimated=" + array.ramBytesEstimated(oldSize);
+ array.ramBytesEstimated(oldSize);
final long estimatedIncreaseInBytes = array.ramBytesEstimated(newSize) - oldMemSize; final long estimatedIncreaseInBytes = array.ramBytesEstimated(newSize) - oldMemSize;
adjustBreaker(estimatedIncreaseInBytes, false); adjustBreaker(estimatedIncreaseInBytes, false);
array.resize(newSize); array.resize(newSize);

View File

@ -178,12 +178,11 @@ public final class QueueResizingOpenSearchThreadPoolExecutor extends OpenSearchT
final long totalNanos = totalTaskNanos.addAndGet(taskNanos); final long totalNanos = totalTaskNanos.addAndGet(taskNanos);
final long taskExecutionNanos = timedRunnable.getTotalExecutionNanos(); final long taskExecutionNanos = timedRunnable.getTotalExecutionNanos();
assert taskExecutionNanos >= 0 assert taskExecutionNanos >= 0 || (failedOrRejected && taskExecutionNanos == -1)
|| (failedOrRejected : "expected task to always take longer than 0 nanoseconds or have '-1' failure code, got: "
&& taskExecutionNanos == -1) : "expected task to always take longer than 0 nanoseconds or have '-1' failure code, got: " + taskExecutionNanos
+ taskExecutionNanos + ", failedOrRejected: "
+ ", failedOrRejected: " + failedOrRejected;
+ failedOrRejected;
if (taskExecutionNanos != -1) { if (taskExecutionNanos != -1) {
// taskExecutionNanos may be -1 if the task threw an exception // taskExecutionNanos may be -1 if the task threw an exception
executionEWMA.addValue(taskExecutionNanos); executionEWMA.addValue(taskExecutionNanos);

View File

@ -563,30 +563,25 @@ public final class NodeEnvironment implements Closeable {
} }
private static boolean assertPathsDoNotExist(final Path[] paths) { private static boolean assertPathsDoNotExist(final Path[] paths) {
Set<Path> existingPaths = Stream.of(paths) Set<Path> existingPaths = Stream.of(paths).filter(FileSystemUtils::exists).filter(leftOver -> {
.filter(FileSystemUtils::exists) // Relaxed assertion for the special case where only the empty state directory exists after deleting
.filter( // the shard directory because it was created again as a result of a metadata read action concurrently.
leftOver -> { try (DirectoryStream<Path> children = Files.newDirectoryStream(leftOver)) {
// Relaxed assertion for the special case where only the empty state directory exists after deleting Iterator<Path> iter = children.iterator();
// the shard directory because it was created again as a result of a metadata read action concurrently. if (iter.hasNext() == false) {
try (DirectoryStream<Path> children = Files.newDirectoryStream(leftOver)) { return true;
Iterator<Path> iter = children.iterator();
if (iter.hasNext() == false) {
return true;
}
Path maybeState = iter.next();
if (iter.hasNext() || maybeState.equals(leftOver.resolve(MetadataStateFormat.STATE_DIR_NAME)) == false) {
return true;
}
try (DirectoryStream<Path> stateChildren = Files.newDirectoryStream(maybeState)) {
return stateChildren.iterator().hasNext();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
} }
) Path maybeState = iter.next();
.collect(Collectors.toSet()); if (iter.hasNext() || maybeState.equals(leftOver.resolve(MetadataStateFormat.STATE_DIR_NAME)) == false) {
return true;
}
try (DirectoryStream<Path> stateChildren = Files.newDirectoryStream(maybeState)) {
return stateChildren.iterator().hasNext();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}).collect(Collectors.toSet());
assert existingPaths.size() == 0 : "Paths exist that should have been deleted: " + existingPaths; assert existingPaths.size() == 0 : "Paths exist that should have been deleted: " + existingPaths;
return existingPaths.size() == 0; return existingPaths.size() == 0;
} }

View File

@ -125,8 +125,8 @@ public final class NodeMetadata {
public NodeMetadata build() { public NodeMetadata build() {
final Version nodeVersion; final Version nodeVersion;
if (this.nodeVersion == null) { if (this.nodeVersion == null) {
assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1
+ 1 : "version is required in the node metadata from v9 onwards"; : "version is required in the node metadata from v9 onwards";
nodeVersion = Version.V_EMPTY; nodeVersion = Version.V_EMPTY;
} else { } else {
nodeVersion = this.nodeVersion; nodeVersion = this.nodeVersion;

View File

@ -134,8 +134,8 @@ public class GatewayMetaState implements Closeable {
long currentTerm = onDiskState.currentTerm; long currentTerm = onDiskState.currentTerm;
if (onDiskState.empty()) { if (onDiskState.empty()) {
assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1
+ 1 : "legacy metadata loader is not needed anymore from v9 onwards"; : "legacy metadata loader is not needed anymore from v9 onwards";
final Tuple<Manifest, Metadata> legacyState = metaStateService.loadFullState(); final Tuple<Manifest, Metadata> legacyState = metaStateService.loadFullState();
if (legacyState.v1().isEmpty() == false) { if (legacyState.v1().isEmpty() == false) {
metadata = legacyState.v2(); metadata = legacyState.v2();

View File

@ -368,11 +368,10 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
} }
if (allocationId != null) { if (allocationId != null) {
assert nodeShardState.storeException() == null assert nodeShardState.storeException() == null || nodeShardState.storeException() instanceof ShardLockObtainFailedException
|| nodeShardState : "only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a "
.storeException() instanceof ShardLockObtainFailedException : "only allow store that can be opened or that throws a ShardLockObtainFailedException while being opened but got a " + "store throwing "
+ "store throwing " + nodeShardState.storeException();
+ nodeShardState.storeException();
numberOfAllocationsFound++; numberOfAllocationsFound++;
if (matchAnyShard || inSyncAllocationIds.contains(nodeShardState.allocationId())) { if (matchAnyShard || inSyncAllocationIds.contains(nodeShardState.allocationId())) {
nodeShardStates.add(nodeShardState); nodeShardStates.add(nodeShardState);

View File

@ -128,8 +128,8 @@ public class DefaultRestChannel extends AbstractRestChannel implements RestChann
finalContent = BytesArray.EMPTY; finalContent = BytesArray.EMPTY;
} }
} catch (IllegalArgumentException ignored) { } catch (IllegalArgumentException ignored) {
assert restResponse assert restResponse.status() == RestStatus.METHOD_NOT_ALLOWED
.status() == RestStatus.METHOD_NOT_ALLOWED : "request HTTP method is unsupported but HTTP status is not METHOD_NOT_ALLOWED(405)"; : "request HTTP method is unsupported but HTTP status is not METHOD_NOT_ALLOWED(405)";
} }
final HttpResponse httpResponse = httpRequest.createResponse(restResponse.status(), finalContent); final HttpResponse httpResponse = httpRequest.createResponse(restResponse.status(), finalContent);

View File

@ -57,11 +57,8 @@ public class PerFieldMappingPostingFormatCodec extends Lucene87Codec {
private final DocValuesFormat dvFormat = new Lucene80DocValuesFormat(Lucene80DocValuesFormat.Mode.BEST_COMPRESSION); private final DocValuesFormat dvFormat = new Lucene80DocValuesFormat(Lucene80DocValuesFormat.Mode.BEST_COMPRESSION);
static { static {
assert Codec.forName(Lucene.LATEST_CODEC) assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class)
.getClass() : "PerFieldMappingPostingFormatCodec must subclass the latest " + "lucene codec: " + Lucene.LATEST_CODEC;
.isAssignableFrom(PerFieldMappingPostingFormatCodec.class) : "PerFieldMappingPostingFormatCodec must subclass the latest "
+ "lucene codec: "
+ Lucene.LATEST_CODEC;
} }
public PerFieldMappingPostingFormatCodec(Mode compressionMode, MapperService mapperService, Logger logger) { public PerFieldMappingPostingFormatCodec(Mode compressionMode, MapperService mapperService, Logger logger) {

View File

@ -1466,11 +1466,8 @@ public abstract class Engine implements Closeable {
assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin; assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin;
assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative"; assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative";
assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >= 0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset"; assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >= 0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset";
assert (origin == Origin.PRIMARY) assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM)
|| (ifSeqNo == UNASSIGNED_SEQ_NO : "cas operations are only allowed if origin is primary. get [" + origin + "]";
&& ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) : "cas operations are only allowed if origin is primary. get ["
+ origin
+ "]";
this.doc = doc; this.doc = doc;
this.isRetry = isRetry; this.isRetry = isRetry;
this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp; this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp;
@ -1585,11 +1582,8 @@ public abstract class Engine implements Closeable {
assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin; assert (origin == Origin.PRIMARY) == (versionType != null) : "invalid version_type=" + versionType + " for origin=" + origin;
assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative"; assert ifPrimaryTerm >= 0 : "ifPrimaryTerm [" + ifPrimaryTerm + "] must be non negative";
assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >= 0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset"; assert ifSeqNo == UNASSIGNED_SEQ_NO || ifSeqNo >= 0 : "ifSeqNo [" + ifSeqNo + "] must be non negative or unset";
assert (origin == Origin.PRIMARY) assert (origin == Origin.PRIMARY) || (ifSeqNo == UNASSIGNED_SEQ_NO && ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM)
|| (ifSeqNo == UNASSIGNED_SEQ_NO : "cas operations are only allowed if origin is primary. get [" + origin + "]";
&& ifPrimaryTerm == UNASSIGNED_PRIMARY_TERM) : "cas operations are only allowed if origin is primary. get ["
+ origin
+ "]";
this.type = Objects.requireNonNull(type); this.type = Objects.requireNonNull(type);
this.id = Objects.requireNonNull(id); this.id = Objects.requireNonNull(id);
this.ifSeqNo = ifSeqNo; this.ifSeqNo = ifSeqNo;

View File

@ -480,8 +480,8 @@ public class InternalEngine extends Engine {
} }
syncTranslog(); // to persist noops associated with the advancement of the local checkpoint syncTranslog(); // to persist noops associated with the advancement of the local checkpoint
assert localCheckpointTracker assert localCheckpointTracker.getPersistedCheckpoint() == maxSeqNo
.getPersistedCheckpoint() == maxSeqNo : "persisted local checkpoint did not advance to max seq no; is [" : "persisted local checkpoint did not advance to max seq no; is ["
+ localCheckpointTracker.getPersistedCheckpoint() + localCheckpointTracker.getPersistedCheckpoint()
+ "], max seq no [" + "], max seq no ["
+ maxSeqNo + maxSeqNo
@ -1348,10 +1348,10 @@ public class InternalEngine extends Engine {
int reservedDocs, int reservedDocs,
IndexResult earlyResultOnPreFlightError IndexResult earlyResultOnPreFlightError
) { ) {
assert useLuceneUpdateDocument == false assert useLuceneUpdateDocument == false || indexIntoLucene
|| indexIntoLucene : "use lucene update is set to true, but we're not indexing into lucene"; : "use lucene update is set to true, but we're not indexing into lucene";
assert (indexIntoLucene assert (indexIntoLucene && earlyResultOnPreFlightError != null) == false
&& earlyResultOnPreFlightError != null) == false : "can only index into lucene or have a preflight result but not both." : "can only index into lucene or have a preflight result but not both."
+ "indexIntoLucene: " + "indexIntoLucene: "
+ indexIntoLucene + indexIntoLucene
+ " earlyResultOnPreFlightError:" + " earlyResultOnPreFlightError:"
@ -1699,8 +1699,8 @@ public class InternalEngine extends Engine {
int reservedDocs, int reservedDocs,
DeleteResult earlyResultOnPreflightError DeleteResult earlyResultOnPreflightError
) { ) {
assert (deleteFromLucene assert (deleteFromLucene && earlyResultOnPreflightError != null) == false
&& earlyResultOnPreflightError != null) == false : "can only delete from lucene or have a preflight result but not both." : "can only delete from lucene or have a preflight result but not both."
+ "deleteFromLucene: " + "deleteFromLucene: "
+ deleteFromLucene + deleteFromLucene
+ " earlyResultOnPreFlightError:" + " earlyResultOnPreFlightError:"
@ -1808,9 +1808,8 @@ public class InternalEngine extends Engine {
tombstone.version().setLongValue(1L); tombstone.version().setLongValue(1L);
assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]"; assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]";
final ParseContext.Document doc = tombstone.docs().get(0); final ParseContext.Document doc = tombstone.docs().get(0);
assert doc.getField( assert doc.getField(SeqNoFieldMapper.TOMBSTONE_NAME) != null
SeqNoFieldMapper.TOMBSTONE_NAME : "Noop tombstone document but _tombstone field is not set [" + doc + " ]";
) != null : "Noop tombstone document but _tombstone field is not set [" + doc + " ]";
doc.add(softDeletesField); doc.add(softDeletesField);
indexWriter.addDocument(doc); indexWriter.addDocument(doc);
} catch (final Exception ex) { } catch (final Exception ex) {
@ -2367,9 +2366,8 @@ public class InternalEngine extends Engine {
@Override @Override
protected final void closeNoLock(String reason, CountDownLatch closedLatch) { protected final void closeNoLock(String reason, CountDownLatch closedLatch) {
if (isClosed.compareAndSet(false, true)) { if (isClosed.compareAndSet(false, true)) {
assert rwl.isWriteLockedByCurrentThread() assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread()
|| failEngineLock : "Either the write lock must be held or the engine must be currently be failing itself";
.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself";
try { try {
this.versionMap.clear(); this.versionMap.clear();
if (internalReaderManager != null) { if (internalReaderManager != null) {

View File

@ -140,8 +140,8 @@ public class GetResult implements Writeable, Iterable<DocumentField>, ToXContent
+ seqNo + seqNo
+ " primaryTerm: " + " primaryTerm: "
+ primaryTerm; + primaryTerm;
assert exists assert exists || (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM)
|| (seqNo == UNASSIGNED_SEQ_NO && primaryTerm == UNASSIGNED_PRIMARY_TERM) : "doc not found but seqNo/primaryTerm are set"; : "doc not found but seqNo/primaryTerm are set";
this.version = version; this.version = version;
this.exists = exists; this.exists = exists;
this.source = source; this.source = source;

View File

@ -230,9 +230,8 @@ public class LocalCheckpointTracker {
@SuppressForbidden(reason = "Object#notifyAll") @SuppressForbidden(reason = "Object#notifyAll")
private void updateCheckpoint(AtomicLong checkPoint, LongObjectHashMap<CountedBitSet> bitSetMap) { private void updateCheckpoint(AtomicLong checkPoint, LongObjectHashMap<CountedBitSet> bitSetMap) {
assert Thread.holdsLock(this); assert Thread.holdsLock(this);
assert getBitSetForSeqNo(bitSetMap, checkPoint.get() + 1).get( assert getBitSetForSeqNo(bitSetMap, checkPoint.get() + 1).get(seqNoToBitSetOffset(checkPoint.get() + 1))
seqNoToBitSetOffset(checkPoint.get() + 1) : "updateCheckpoint is called but the bit following the checkpoint is not set";
) : "updateCheckpoint is called but the bit following the checkpoint is not set";
try { try {
// keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words // keep it simple for now, get the checkpoint one by one; in the future we can optimize and read words
long bitSetKey = getBitSetKey(checkPoint.get()); long bitSetKey = getBitSetKey(checkPoint.get());

View File

@ -845,23 +845,15 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
assert pendingInSync.isEmpty() || (primaryMode && !handoffInProgress); assert pendingInSync.isEmpty() || (primaryMode && !handoffInProgress);
// the computed global checkpoint is always up-to-date // the computed global checkpoint is always up-to-date
assert !primaryMode assert !primaryMode || globalCheckpoint == computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint)
|| globalCheckpoint == computeGlobalCheckpoint( : "global checkpoint is not up-to-date, expected: "
pendingInSync,
checkpoints.values(),
globalCheckpoint
) : "global checkpoint is not up-to-date, expected: "
+ computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint) + computeGlobalCheckpoint(pendingInSync, checkpoints.values(), globalCheckpoint)
+ " but was: " + " but was: "
+ globalCheckpoint; + globalCheckpoint;
// when in primary mode, the global checkpoint is at most the minimum local checkpoint on all in-sync shard copies // when in primary mode, the global checkpoint is at most the minimum local checkpoint on all in-sync shard copies
assert !primaryMode assert !primaryMode || globalCheckpoint <= inSyncCheckpointStates(checkpoints, CheckpointState::getLocalCheckpoint, LongStream::min)
|| globalCheckpoint <= inSyncCheckpointStates( : "global checkpoint ["
checkpoints,
CheckpointState::getLocalCheckpoint,
LongStream::min
) : "global checkpoint ["
+ globalCheckpoint + globalCheckpoint
+ "] " + "] "
+ "for primary mode allocation ID [" + "for primary mode allocation ID ["
@ -877,11 +869,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
+ " but replication group is " + " but replication group is "
+ replicationGroup; + replicationGroup;
assert replicationGroup == null assert replicationGroup == null || replicationGroup.equals(calculateReplicationGroup())
|| replicationGroup.equals(calculateReplicationGroup()) : "cached replication group out of sync: expected: " : "cached replication group out of sync: expected: " + calculateReplicationGroup() + " but was: " + replicationGroup;
+ calculateReplicationGroup()
+ " but was: "
+ replicationGroup;
// all assigned shards from the routing table are tracked // all assigned shards from the routing table are tracked
assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getAllAllocationIds()) : "local checkpoints " assert routingTable == null || checkpoints.keySet().containsAll(routingTable.getAllAllocationIds()) : "local checkpoints "
@ -907,9 +896,8 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
// all tracked shard copies have a corresponding peer-recovery retention lease // all tracked shard copies have a corresponding peer-recovery retention lease
for (final ShardRouting shardRouting : routingTable.assignedShards()) { for (final ShardRouting shardRouting : routingTable.assignedShards()) {
if (checkpoints.get(shardRouting.allocationId().getId()).tracked) { if (checkpoints.get(shardRouting.allocationId().getId()).tracked) {
assert retentionLeases.contains( assert retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting))
getPeerRecoveryRetentionLeaseId(shardRouting) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases;
) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases;
assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals( assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals(
retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)).source() retentionLeases.get(getPeerRecoveryRetentionLeaseId(shardRouting)).source()
) : "incorrect source [" ) : "incorrect source ["
@ -1190,13 +1178,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
if (applyingClusterStateVersion > appliedClusterStateVersion) { if (applyingClusterStateVersion > appliedClusterStateVersion) {
// check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode // check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode
assert !primaryMode assert !primaryMode
|| inSyncAllocationIds.stream() || inSyncAllocationIds.stream().allMatch(inSyncId -> checkpoints.containsKey(inSyncId) && checkpoints.get(inSyncId).inSync)
.allMatch( : "update from master in primary mode contains in-sync ids "
inSyncId -> checkpoints.containsKey(inSyncId) && checkpoints.get(inSyncId).inSync + inSyncAllocationIds
) : "update from master in primary mode contains in-sync ids " + " that have no matching entries in "
+ inSyncAllocationIds + checkpoints;
+ " that have no matching entries in "
+ checkpoints;
// remove entries which don't exist on master // remove entries which don't exist on master
Set<String> initializingAllocationIds = routingTable.getAllInitializingShards() Set<String> initializingAllocationIds = routingTable.getAllInitializingShards()
.stream() .stream()

View File

@ -516,9 +516,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting; assert currentRouting.active() == false : "we are in POST_RECOVERY, but our shard routing is active " + currentRouting;
assert currentRouting.isRelocationTarget() == false assert currentRouting.isRelocationTarget() == false
|| currentRouting.primary() == false || currentRouting.primary() == false
|| replicationTracker || replicationTracker.isPrimaryMode()
.isPrimaryMode() : "a primary relocation is completed by the master, but primary mode is not active " : "a primary relocation is completed by the master, but primary mode is not active " + currentRouting;
+ currentRouting;
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
} else if (currentRouting.primary() } else if (currentRouting.primary()
@ -533,12 +532,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
"Shard is marked as relocated, cannot safely move to state " + newRouting.state() "Shard is marked as relocated, cannot safely move to state " + newRouting.state()
); );
} }
assert newRouting.active() == false assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED
|| state == IndexShardState.STARTED : "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state;
|| state == IndexShardState.CLOSED : "routing is active, but local shard state isn't. routing: "
+ newRouting
+ ", local state: "
+ state;
persistMetadata(path, indexSettings, newRouting, currentRouting, logger); persistMetadata(path, indexSettings, newRouting, currentRouting, logger);
final CountDownLatch shardStateUpdated = new CountDownLatch(1); final CountDownLatch shardStateUpdated = new CountDownLatch(1);
@ -726,8 +721,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
forceRefreshes.close(); forceRefreshes.close();
// no shard operation permits are being held here, move state from started to relocated // no shard operation permits are being held here, move state from started to relocated
assert indexShardOperationPermits assert indexShardOperationPermits.getActiveOperationsCount() == OPERATIONS_BLOCKED
.getActiveOperationsCount() == OPERATIONS_BLOCKED : "in-flight operations in progress while moving shard state to relocated"; : "in-flight operations in progress while moving shard state to relocated";
/* /*
* We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a * We should not invoke the runnable under the mutex as the expected implementation is to handoff the primary context via a
* network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations. * network operation. Doing this under the mutex can implicitly block the cluster state update thread on network operations.
@ -1516,9 +1511,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
} }
private Engine.Searcher wrapSearcher(Engine.Searcher searcher) { private Engine.Searcher wrapSearcher(Engine.Searcher searcher) {
assert OpenSearchDirectoryReader.unwrap( assert OpenSearchDirectoryReader.unwrap(searcher.getDirectoryReader()) != null
searcher.getDirectoryReader() : "DirectoryReader must be an instance or OpenSearchDirectoryReader";
) != null : "DirectoryReader must be an instance or OpenSearchDirectoryReader";
boolean success = false; boolean success = false;
try { try {
final Engine.Searcher newSearcher = readerWrapper == null ? searcher : wrapSearcher(searcher, readerWrapper); final Engine.Searcher newSearcher = readerWrapper == null ? searcher : wrapSearcher(searcher, readerWrapper);
@ -1945,8 +1939,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
// but we need to make sure we don't loose deletes until we are done recovering // but we need to make sure we don't loose deletes until we are done recovering
config.setEnableGcDeletes(false); config.setEnableGcDeletes(false);
updateRetentionLeasesOnReplica(loadRetentionLeases()); updateRetentionLeasesOnReplica(loadRetentionLeases());
assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false assert recoveryState.getRecoverySource().expectEmptyRetentionLeases() == false || getRetentionLeases().leases().isEmpty()
|| getRetentionLeases().leases().isEmpty() : "expected empty set of retention leases with recovery source [" : "expected empty set of retention leases with recovery source ["
+ recoveryState.getRecoverySource() + recoveryState.getRecoverySource()
+ "] but got " + "] but got "
+ getRetentionLeases(); + getRetentionLeases();
@ -2085,9 +2079,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
assert assertReplicationTarget(); assert assertReplicationTarget();
} else { } else {
assert origin == Engine.Operation.Origin.LOCAL_RESET; assert origin == Engine.Operation.Origin.LOCAL_RESET;
assert getActiveOperationsCount() == OPERATIONS_BLOCKED : "locally resetting without blocking operations, active operations are [" assert getActiveOperationsCount() == OPERATIONS_BLOCKED
+ getActiveOperations() : "locally resetting without blocking operations, active operations are [" + getActiveOperations() + "]";
+ "]";
} }
if (writeAllowedStates.contains(state) == false) { if (writeAllowedStates.contains(state) == false) {
throw new IllegalIndexShardStateException( throw new IllegalIndexShardStateException(
@ -2793,8 +2786,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move * while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move
* to recovery finalization, or even finished recovery before the update arrives here. * to recovery finalization, or even finished recovery before the update arrives here.
*/ */
assert state() != IndexShardState.POST_RECOVERY assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.STARTED
&& state() != IndexShardState.STARTED : "supposedly in-sync shard copy received a global checkpoint [" : "supposedly in-sync shard copy received a global checkpoint ["
+ globalCheckpoint + globalCheckpoint
+ "] " + "] "
+ "that is higher than its local checkpoint [" + "that is higher than its local checkpoint ["
@ -2811,9 +2804,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
* @param primaryContext the sequence number context * @param primaryContext the sequence number context
*/ */
public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) { public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) {
assert shardRouting.primary() assert shardRouting.primary() && shardRouting.isRelocationTarget()
&& shardRouting.isRelocationTarget() : "only primary relocation target can update allocation IDs from primary context: " : "only primary relocation target can update allocation IDs from primary context: " + shardRouting;
+ shardRouting;
assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) : "primary context [" assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) : "primary context ["
+ primaryContext + primaryContext
+ "] does not contain relocation target [" + "] does not contain relocation target ["

View File

@ -85,9 +85,8 @@ public class ReplicationGroup {
replicationTargets.add(relocationTarget); replicationTargets.add(relocationTarget);
} else { } else {
skippedShards.add(relocationTarget); skippedShards.add(relocationTarget);
assert inSyncAllocationIds.contains( assert inSyncAllocationIds.contains(relocationTarget.allocationId().getId()) == false
relocationTarget.allocationId().getId() : "in-sync shard copy but not tracked: " + shard;
) == false : "in-sync shard copy but not tracked: " + shard;
} }
} }
} }

View File

@ -62,17 +62,12 @@ public final class ShardPath {
public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) { public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) {
assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: "
+ dataPath.toString(); + dataPath.toString();
assert shardStatePath.getFileName() assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id()))
.toString() : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString();
.equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString(); assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID())
assert dataPath.getParent() : "dataPath must end with index path id but didn't: " + dataPath.toString();
.getFileName() assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID())
.toString() : "shardStatePath must end with index path id but didn't: " + dataPath.toString();
.equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString();
assert shardStatePath.getParent()
.getFileName()
.toString()
.equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString();
if (isCustomDataPath && dataPath.equals(shardStatePath)) { if (isCustomDataPath && dataPath.equals(shardStatePath)) {
throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths"); throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths");
} }

View File

@ -103,8 +103,8 @@ final class StoreRecovery {
void recoverFromStore(final IndexShard indexShard, ActionListener<Boolean> listener) { void recoverFromStore(final IndexShard indexShard, ActionListener<Boolean> listener) {
if (canRecover(indexShard)) { if (canRecover(indexShard)) {
RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType();
assert recoveryType == RecoverySource.Type.EMPTY_STORE assert recoveryType == RecoverySource.Type.EMPTY_STORE || recoveryType == RecoverySource.Type.EXISTING_STORE
|| recoveryType == RecoverySource.Type.EXISTING_STORE : "expected store recovery type but was: " + recoveryType; : "expected store recovery type but was: " + recoveryType;
ActionListener.completeWith(recoveryListener(indexShard, listener), () -> { ActionListener.completeWith(recoveryListener(indexShard, listener), () -> {
logger.debug("starting recovery from store ..."); logger.debug("starting recovery from store ...");
internalRecoverFromStore(indexShard); internalRecoverFromStore(indexShard);

View File

@ -1095,9 +1095,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
Collections.unmodifiableList(different), Collections.unmodifiableList(different),
Collections.unmodifiableList(missing) Collections.unmodifiableList(missing)
); );
assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0)
? 1 : "some files are missing recoveryDiff size: ["
: 0) : "some files are missing recoveryDiff size: ["
+ recoveryDiff.size() + recoveryDiff.size()
+ "] metadata size: [" + "] metadata size: ["
+ this.metadata.size() + this.metadata.size()

View File

@ -199,10 +199,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
// //
// For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that
// file exists. If not we don't even try to clean it up and wait until we fail creating it // file exists. If not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID)
|| Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" : "unexpected translog file: [" + nextTranslogFile + "]";
+ nextTranslogFile
+ "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
logger.warn( logger.warn(
@ -399,7 +397,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
@Override @Override
public void close() throws IOException { public void close() throws IOException {
assert calledFromOutsideOrViaTragedyClose() : "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method"; assert calledFromOutsideOrViaTragedyClose()
: "Translog.close method is called from inside Translog, but not via closeOnTragicEvent method";
if (closed.compareAndSet(false, true)) { if (closed.compareAndSet(false, true)) {
try (ReleasableLock lock = writeLock.acquire()) { try (ReleasableLock lock = writeLock.acquire()) {
try { try {
@ -439,11 +438,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (readers.isEmpty()) { if (readers.isEmpty()) {
return current.getGeneration(); return current.getGeneration();
} else { } else {
assert readers.stream() assert readers.stream().map(TranslogReader::getGeneration).min(Long::compareTo).get().equals(readers.get(0).getGeneration())
.map(TranslogReader::getGeneration) : "the first translog isn't the one with the minimum generation:" + readers;
.min(Long::compareTo)
.get()
.equals(readers.get(0).getGeneration()) : "the first translog isn't the one with the minimum generation:" + readers;
return readers.get(0).getGeneration(); return readers.get(0).getGeneration();
} }
} }
@ -740,10 +736,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
if (snapshots.length == 0) { if (snapshots.length == 0) {
onClose = () -> {}; onClose = () -> {};
} else { } else {
assert Arrays.stream(snapshots) assert Arrays.stream(snapshots).map(BaseTranslogReader::getGeneration).min(Long::compareTo).get() == snapshots[0].generation
.map(BaseTranslogReader::getGeneration) : "first reader generation of " + snapshots + " is not the smallest";
.min(Long::compareTo)
.get() == snapshots[0].generation : "first reader generation of " + snapshots + " is not the smallest";
onClose = acquireTranslogGenFromDeletionPolicy(snapshots[0].generation); onClose = acquireTranslogGenFromDeletionPolicy(snapshots[0].generation);
} }
boolean success = false; boolean success = false;
@ -759,8 +753,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
} }
private Stream<? extends BaseTranslogReader> readersAboveMinSeqNo(long minSeqNo) { private Stream<? extends BaseTranslogReader> readersAboveMinSeqNo(long minSeqNo) {
assert readLock.isHeldByCurrentThread() assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread()
|| writeLock.isHeldByCurrentThread() : "callers of readersAboveMinSeqNo must hold a lock: readLock [" : "callers of readersAboveMinSeqNo must hold a lock: readLock ["
+ readLock.isHeldByCurrentThread() + readLock.isHeldByCurrentThread()
+ "], writeLock [" + "], writeLock ["
+ readLock.isHeldByCurrentThread() + readLock.isHeldByCurrentThread()
@ -1806,8 +1800,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
current.sync(); current.sync();
deleteReaderFiles(reader); deleteReaderFiles(reader);
} }
assert readers.isEmpty() == false assert readers.isEmpty() == false || current.generation == minReferencedGen
|| current.generation == minReferencedGen : "all readers were cleaned but the minReferenceGen [" : "all readers were cleaned but the minReferenceGen ["
+ minReferencedGen + minReferencedGen
+ "] is not the current writer's gen [" + "] is not the current writer's gen ["
+ current.generation + current.generation

View File

@ -850,8 +850,8 @@ public class IndicesService extends AbstractLifecycleComponent
IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer); IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer);
indexShard.addShardFailureCallback(onShardFailure); indexShard.addShardFailureCallback(onShardFailure);
indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, (type, mapping) -> { indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, (type, mapping) -> {
assert recoveryState.getRecoverySource() assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS
.getType() == RecoverySource.Type.LOCAL_SHARDS : "mapping update consumer only required by local shards recovery"; : "mapping update consumer only required by local shards recovery";
client.admin() client.admin()
.indices() .indices()
.preparePutMapping() .preparePutMapping()

View File

@ -636,13 +636,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
ClusterState clusterState ClusterState clusterState
) { ) {
final ShardRouting currentRoutingEntry = shard.routingEntry(); final ShardRouting currentRoutingEntry = shard.routingEntry();
assert currentRoutingEntry.isSameAllocation( assert currentRoutingEntry.isSameAllocation(shardRouting)
shardRouting : "local shard has a different allocation id but wasn't cleaned by removeShards. "
) : "local shard has a different allocation id but wasn't cleaned by removeShards. " + "cluster state: "
+ "cluster state: " + shardRouting
+ shardRouting + " local: "
+ " local: " + currentRoutingEntry;
+ currentRoutingEntry;
final long primaryTerm; final long primaryTerm;
try { try {

View File

@ -260,8 +260,8 @@ public class PeerRecoverySourceService extends AbstractLifecycleComponent implem
if (removed != null) { if (removed != null) {
shard.recoveryStats().decCurrentAsSource(); shard.recoveryStats().decCurrentAsSource();
removed.cancel(); removed.cancel();
assert nodeToHandlers.getOrDefault(removed.targetNode(), Collections.emptySet()) assert nodeToHandlers.getOrDefault(removed.targetNode(), Collections.emptySet()).contains(removed)
.contains(removed) : "Remote recovery was not properly tracked [" + removed + "]"; : "Remote recovery was not properly tracked [" + removed + "]";
nodeToHandlers.computeIfPresent(removed.targetNode(), (k, handlersForNode) -> { nodeToHandlers.computeIfPresent(removed.targetNode(), (k, handlersForNode) -> {
handlersForNode.remove(removed); handlersForNode.remove(removed);
if (handlersForNode.isEmpty()) { if (handlersForNode.isEmpty()) {

View File

@ -233,12 +233,8 @@ public class PeerRecoveryTargetService implements IndexEventListener {
logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
indexShard.prepareForIndexRecovery(); indexShard.prepareForIndexRecovery();
final long startingSeqNo = indexShard.recoverLocallyUpToGlobalCheckpoint(); final long startingSeqNo = indexShard.recoverLocallyUpToGlobalCheckpoint();
assert startingSeqNo == UNASSIGNED_SEQ_NO assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG
|| recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]";
+ recoveryTarget.state().getStage()
+ "] starting seqno [ "
+ startingSeqNo
+ "]";
startRequest = getStartRecoveryRequest(logger, clusterService.localNode(), recoveryTarget, startingSeqNo); startRequest = getStartRecoveryRequest(logger, clusterService.localNode(), recoveryTarget, startingSeqNo);
requestToSend = startRequest; requestToSend = startRequest;
actionName = PeerRecoverySourceService.Actions.START_RECOVERY; actionName = PeerRecoverySourceService.Actions.START_RECOVERY;
@ -469,18 +465,15 @@ public class PeerRecoveryTargetService implements IndexEventListener {
request.maxSeqNoOfUpdatesOrDeletesOnPrimary(), request.maxSeqNoOfUpdatesOrDeletesOnPrimary(),
request.retentionLeases(), request.retentionLeases(),
request.mappingVersionOnPrimary(), request.mappingVersionOnPrimary(),
ActionListener.wrap( ActionListener.wrap(checkpoint -> listener.onResponse(null), e -> {
checkpoint -> listener.onResponse(null), // do not retry if the mapping on replica is at least as recent as the mapping
e -> { // that the primary used to index the operations in the request.
// do not retry if the mapping on replica is at least as recent as the mapping if (mappingVersionOnTarget < request.mappingVersionOnPrimary() && e instanceof MapperException) {
// that the primary used to index the operations in the request. retryOnMappingException.accept(e);
if (mappingVersionOnTarget < request.mappingVersionOnPrimary() && e instanceof MapperException) { } else {
retryOnMappingException.accept(e); listener.onFailure(e);
} else {
listener.onFailure(e);
}
} }
) })
); );
} }
} }

View File

@ -644,10 +644,8 @@ public class RecoverySourceHandler {
createRetentionLeaseStep.whenComplete(retentionLease -> { createRetentionLeaseStep.whenComplete(retentionLease -> {
final long lastKnownGlobalCheckpoint = shard.getLastKnownGlobalCheckpoint(); final long lastKnownGlobalCheckpoint = shard.getLastKnownGlobalCheckpoint();
assert retentionLease == null assert retentionLease == null || retentionLease.retainingSequenceNumber() - 1 <= lastKnownGlobalCheckpoint
|| retentionLease.retainingSequenceNumber() - 1 <= lastKnownGlobalCheckpoint : retentionLease : retentionLease + " vs " + lastKnownGlobalCheckpoint;
+ " vs "
+ lastKnownGlobalCheckpoint;
// Establishes new empty translog on the replica with global checkpoint set to lastKnownGlobalCheckpoint. We want // Establishes new empty translog on the replica with global checkpoint set to lastKnownGlobalCheckpoint. We want
// the commit we just copied to be a safe commit on the replica, so why not set the global checkpoint on the replica // the commit we just copied to be a safe commit on the replica, so why not set the global checkpoint on the replica
// to the max seqno of this commit? Because (in rare corner cases) this commit might not be a safe commit here on // to the max seqno of this commit? Because (in rare corner cases) this commit might not be a safe commit here on

View File

@ -138,11 +138,8 @@ public class RecoveryState implements ToXContentFragment, Writeable {
public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, Index index) { public RecoveryState(ShardRouting shardRouting, DiscoveryNode targetNode, @Nullable DiscoveryNode sourceNode, Index index) {
assert shardRouting.initializing() : "only allow initializing shard routing to be recovered: " + shardRouting; assert shardRouting.initializing() : "only allow initializing shard routing to be recovered: " + shardRouting;
RecoverySource recoverySource = shardRouting.recoverySource(); RecoverySource recoverySource = shardRouting.recoverySource();
assert (recoverySource assert (recoverySource.getType() == RecoverySource.Type.PEER) == (sourceNode != null)
.getType() == RecoverySource.Type.PEER) == (sourceNode != null) : "peer recovery requires source node, recovery type: " : "peer recovery requires source node, recovery type: " + recoverySource.getType() + " source node: " + sourceNode;
+ recoverySource.getType()
+ " source node: "
+ sourceNode;
this.shardId = shardRouting.shardId(); this.shardId = shardRouting.shardId();
this.primary = shardRouting.primary(); this.primary = shardRouting.primary();
this.recoverySource = recoverySource; this.recoverySource = recoverySource;

View File

@ -98,8 +98,8 @@ public class StartRecoveryRequest extends TransportRequest {
this.metadataSnapshot = metadataSnapshot; this.metadataSnapshot = metadataSnapshot;
this.primaryRelocation = primaryRelocation; this.primaryRelocation = primaryRelocation;
this.startingSeqNo = startingSeqNo; this.startingSeqNo = startingSeqNo;
assert startingSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO assert startingSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO || metadataSnapshot.getHistoryUUID() != null
|| metadataSnapshot.getHistoryUUID() != null : "starting seq no is set but not history uuid"; : "starting seq no is set but not history uuid";
} }
public long recoveryId() { public long recoveryId() {

View File

@ -100,59 +100,56 @@ public final class TrackingResultProcessor implements Processor {
+ ']' + ']'
); );
} }
ingestDocumentCopy.executePipeline( ingestDocumentCopy.executePipeline(pipelineToCall, (result, e) -> {
pipelineToCall, // special handling for pipeline cycle errors
(result, e) -> { if (e instanceof OpenSearchException
// special handling for pipeline cycle errors && e.getCause() instanceof IllegalStateException
if (e instanceof OpenSearchException && e.getCause().getMessage().startsWith(PIPELINE_CYCLE_ERROR_MESSAGE)) {
&& e.getCause() instanceof IllegalStateException if (ignoreFailure) {
&& e.getCause().getMessage().startsWith(PIPELINE_CYCLE_ERROR_MESSAGE)) {
if (ignoreFailure) {
processorResultList.add(
new SimulateProcessorResult(
pipelineProcessor.getType(),
pipelineProcessor.getTag(),
pipelineProcessor.getDescription(),
new IngestDocument(ingestDocument),
e,
conditionalWithResult
)
);
} else {
processorResultList.add(
new SimulateProcessorResult(
pipelineProcessor.getType(),
pipelineProcessor.getTag(),
pipelineProcessor.getDescription(),
e,
conditionalWithResult
)
);
}
handler.accept(null, e);
} else {
// now that we know that there are no cycles between pipelines, decorate the processors for this pipeline and
// execute it
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), null, processorResultList);
// add the pipeline process to the results
processorResultList.add( processorResultList.add(
new SimulateProcessorResult( new SimulateProcessorResult(
actualProcessor.getType(), pipelineProcessor.getType(),
actualProcessor.getTag(), pipelineProcessor.getTag(),
actualProcessor.getDescription(), pipelineProcessor.getDescription(),
new IngestDocument(ingestDocument),
e,
conditionalWithResult conditionalWithResult
) )
); );
Pipeline verbosePipeline = new Pipeline( } else {
pipeline.getId(), processorResultList.add(
pipeline.getDescription(), new SimulateProcessorResult(
pipeline.getVersion(), pipelineProcessor.getType(),
verbosePipelineProcessor pipelineProcessor.getTag(),
pipelineProcessor.getDescription(),
e,
conditionalWithResult
)
); );
ingestDocument.executePipeline(verbosePipeline, handler);
} }
handler.accept(null, e);
} else {
// now that we know that there are no cycles between pipelines, decorate the processors for this pipeline and
// execute it
CompoundProcessor verbosePipelineProcessor = decorate(pipeline.getCompoundProcessor(), null, processorResultList);
// add the pipeline process to the results
processorResultList.add(
new SimulateProcessorResult(
actualProcessor.getType(),
actualProcessor.getTag(),
actualProcessor.getDescription(),
conditionalWithResult
)
);
Pipeline verbosePipeline = new Pipeline(
pipeline.getId(),
pipeline.getDescription(),
pipeline.getVersion(),
verbosePipelineProcessor
);
ingestDocument.executePipeline(verbosePipeline, handler);
} }
); });
return; return;
} }

View File

@ -1059,8 +1059,8 @@ public class Node implements Closeable {
transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService));
transportService.start(); transportService.start();
assert localNodeFactory.getNode() != null; assert localNodeFactory.getNode() != null;
assert transportService.getLocalNode() assert transportService.getLocalNode().equals(localNodeFactory.getNode())
.equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; : "transportService has a different local node than the factory provided";
injector.getInstance(PeerRecoverySourceService.class).start(); injector.getInstance(PeerRecoverySourceService.class).start();
// Load (and maybe upgrade) the metadata stored on disk // Load (and maybe upgrade) the metadata stored on disk
@ -1103,8 +1103,8 @@ public class Node implements Closeable {
// start after transport service so the local disco is known // start after transport service so the local disco is known
discovery.start(); // start before cluster service so that it can set initial state on ClusterApplierService discovery.start(); // start before cluster service so that it can set initial state on ClusterApplierService
clusterService.start(); clusterService.start();
assert clusterService.localNode() assert clusterService.localNode().equals(localNodeFactory.getNode())
.equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; : "clusterService has a different local node than the factory provided";
transportService.acceptIncomingRequests(); transportService.acceptIncomingRequests();
discovery.startInitialJoin(); discovery.startInitialJoin();
final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings());

View File

@ -66,11 +66,8 @@ public final class IndexMetaDataGenerations {
final Map<String, String> identifiers; final Map<String, String> identifiers;
IndexMetaDataGenerations(Map<SnapshotId, Map<IndexId, String>> lookup, Map<String, String> identifiers) { IndexMetaDataGenerations(Map<SnapshotId, Map<IndexId, String>> lookup, Map<String, String> identifiers) {
assert identifiers.keySet() assert identifiers.keySet().equals(lookup.values().stream().flatMap(m -> m.values().stream()).collect(Collectors.toSet()))
.equals(lookup.values().stream().flatMap(m -> m.values().stream()).collect(Collectors.toSet())) : "identifier mappings " : "identifier mappings " + identifiers + " don't track the same blob ids as the lookup map " + lookup;
+ identifiers
+ " don't track the same blob ids as the lookup map "
+ lookup;
assert lookup.values().stream().noneMatch(Map::isEmpty) : "Lookup contained empty map [" + lookup + "]"; assert lookup.values().stream().noneMatch(Map::isEmpty) : "Lookup contained empty map [" + lookup + "]";
this.lookup = Collections.unmodifiableMap(lookup); this.lookup = Collections.unmodifiableMap(lookup);
this.identifiers = Collections.unmodifiableMap(identifiers); this.identifiers = Collections.unmodifiableMap(identifiers);

View File

@ -168,11 +168,8 @@ public final class RepositoryData {
+ shardGenerations.indices() + shardGenerations.indices()
+ " but snapshots only reference indices " + " but snapshots only reference indices "
+ indices.values(); + indices.values();
assert indexSnapshots.values() assert indexSnapshots.values().stream().noneMatch(snapshotIdList -> new HashSet<>(snapshotIdList).size() != snapshotIdList.size())
.stream() : "Found duplicate snapshot ids per index in [" + indexSnapshots + "]";
.noneMatch(
snapshotIdList -> new HashSet<>(snapshotIdList).size() != snapshotIdList.size()
) : "Found duplicate snapshot ids per index in [" + indexSnapshots + "]";
} }
protected RepositoryData copy() { protected RepositoryData copy() {
@ -355,8 +352,8 @@ public final class RepositoryData {
+ "]"; + "]";
newIndexMetaGenerations = IndexMetaDataGenerations.EMPTY; newIndexMetaGenerations = IndexMetaDataGenerations.EMPTY;
} else { } else {
assert indexMetaBlobs.isEmpty() assert indexMetaBlobs.isEmpty() || shardGenerations.indices().equals(indexMetaBlobs.keySet())
|| shardGenerations.indices().equals(indexMetaBlobs.keySet()) : "Shard generations contained indices " : "Shard generations contained indices "
+ shardGenerations.indices() + shardGenerations.indices()
+ " but indexMetaData was given for " + " but indexMetaData was given for "
+ indexMetaBlobs.keySet(); + indexMetaBlobs.keySet();

View File

@ -1909,13 +1909,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
meta.pendingGeneration() meta.pendingGeneration()
); );
} }
assert expectedGen == RepositoryData.EMPTY_REPO_GEN assert expectedGen == RepositoryData.EMPTY_REPO_GEN || uninitializedMeta || expectedGen == meta.generation()
|| uninitializedMeta : "Expected non-empty generation [" + expectedGen + "] does not match generation tracked in [" + meta + "]";
|| expectedGen == meta.generation() : "Expected non-empty generation ["
+ expectedGen
+ "] does not match generation tracked in ["
+ meta
+ "]";
// If we run into the empty repo generation for the expected gen, the repo is assumed to have been cleared of // If we run into the empty repo generation for the expected gen, the repo is assumed to have been cleared of
// all contents by an external process so we reset the safe generation to the empty generation. // all contents by an external process so we reset the safe generation to the empty generation.
final long safeGeneration = expectedGen == RepositoryData.EMPTY_REPO_GEN final long safeGeneration = expectedGen == RepositoryData.EMPTY_REPO_GEN

View File

@ -448,8 +448,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
SearchShardTask task, SearchShardTask task,
ActionListener<SearchPhaseResult> listener ActionListener<SearchPhaseResult> listener
) { ) {
assert request.canReturnNullResponseIfMatchNoDocs() == false assert request.canReturnNullResponseIfMatchNoDocs() == false || request.numberOfShards() > 1
|| request.numberOfShards() > 1 : "empty responses require more than one shard"; : "empty responses require more than one shard";
final IndexShard shard = getShard(request); final IndexShard shard = getShard(request);
rewriteAndFetchShardRequest(shard, request, new ActionListener<ShardSearchRequest>() { rewriteAndFetchShardRequest(shard, request, new ActionListener<ShardSearchRequest>() {
@Override @Override

View File

@ -226,7 +226,8 @@ public abstract class InternalAggregation implements Aggregation, NamedWriteable
out.writeString(name); out.writeString(name);
out.writeGenericValue(metadata); out.writeGenericValue(metadata);
if (out.getVersion().before(LegacyESVersion.V_7_8_0)) { if (out.getVersion().before(LegacyESVersion.V_7_8_0)) {
assert pipelineAggregatorsForBwcSerialization != null : "serializing to pre-7.8.0 versions should have called mergePipelineTreeForBWCSerialization"; assert pipelineAggregatorsForBwcSerialization != null
: "serializing to pre-7.8.0 versions should have called mergePipelineTreeForBWCSerialization";
out.writeNamedWriteableList(pipelineAggregatorsForBwcSerialization); out.writeNamedWriteableList(pipelineAggregatorsForBwcSerialization);
} }
doWriteTo(out); doWriteTo(out);

View File

@ -228,8 +228,8 @@ public class AggregationPath {
AggregationPath.PathElement token = pathElements.get(0); AggregationPath.PathElement token = pathElements.get(0);
// TODO both unwrap and subAggregator are only used here! // TODO both unwrap and subAggregator are only used here!
Aggregator aggregator = ProfilingAggregator.unwrap(root.subAggregator(token.name)); Aggregator aggregator = ProfilingAggregator.unwrap(root.subAggregator(token.name));
assert (aggregator instanceof SingleBucketAggregator) assert (aggregator instanceof SingleBucketAggregator) || (aggregator instanceof NumericMetricsAggregator)
|| (aggregator instanceof NumericMetricsAggregator) : "this should be picked up before aggregation execution - on validate"; : "this should be picked up before aggregation execution - on validate";
return aggregator; return aggregator;
} }

View File

@ -96,8 +96,8 @@ public final class InFlightShardSnapshotStates {
busyIds.computeIfAbsent(indexName, k -> new HashSet<>()).add(shardId); busyIds.computeIfAbsent(indexName, k -> new HashSet<>()).add(shardId);
assert assertGenerationConsistency(generations, indexName, shardId, shardState.generation()); assert assertGenerationConsistency(generations, indexName, shardId, shardState.generation());
} else if (shardState.state() == SnapshotsInProgress.ShardState.SUCCESS) { } else if (shardState.state() == SnapshotsInProgress.ShardState.SUCCESS) {
assert busyIds.getOrDefault(indexName, Collections.emptySet()) assert busyIds.getOrDefault(indexName, Collections.emptySet()).contains(shardId) == false
.contains(shardId) == false : "Can't have a successful operation queued after an in-progress operation"; : "Can't have a successful operation queued after an in-progress operation";
generations.computeIfAbsent(indexName, k -> new HashMap<>()).put(shardId, shardState.generation()); generations.computeIfAbsent(indexName, k -> new HashMap<>()).put(shardId, shardState.generation());
} }
} }

View File

@ -274,9 +274,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
final IndexId indexId = indicesMap.get(shardId.getIndexName()); final IndexId indexId = indicesMap.get(shardId.getIndexName());
assert indexId != null; assert indexId != null;
assert SnapshotsService.useShardGenerations(entry.version()) assert SnapshotsService.useShardGenerations(entry.version())
|| ShardGenerations.fixShardGeneration( || ShardGenerations.fixShardGeneration(snapshotStatus.generation()) == null
snapshotStatus.generation() : "Found non-null, non-numeric shard generation ["
) == null : "Found non-null, non-numeric shard generation ["
+ snapshotStatus.generation() + snapshotStatus.generation()
+ "] for snapshot with old-format compatibility"; + "] for snapshot with old-format compatibility";
snapshot(shardId, snapshot, indexId, entry.userMetadata(), snapshotStatus, entry.version(), new ActionListener<String>() { snapshot(shardId, snapshot, indexId, entry.userMetadata(), snapshotStatus, entry.version(), new ActionListener<String>() {

View File

@ -1924,8 +1924,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override @Override
public ClusterState execute(ClusterState currentState) { public ClusterState execute(ClusterState currentState) {
assert readyDeletions(currentState) assert readyDeletions(currentState).v1() == currentState
.v1() == currentState : "Deletes should have been set to ready by finished snapshot deletes and finalizations"; : "Deletes should have been set to ready by finished snapshot deletes and finalizations";
for (SnapshotDeletionsInProgress.Entry entry : currentState.custom( for (SnapshotDeletionsInProgress.Entry entry : currentState.custom(
SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.TYPE,
SnapshotDeletionsInProgress.EMPTY SnapshotDeletionsInProgress.EMPTY
@ -2667,8 +2667,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
repositoriesService.getRepositoryData(deleteEntry.repository(), new ActionListener<RepositoryData>() { repositoriesService.getRepositoryData(deleteEntry.repository(), new ActionListener<RepositoryData>() {
@Override @Override
public void onResponse(RepositoryData repositoryData) { public void onResponse(RepositoryData repositoryData) {
assert repositoryData assert repositoryData.getGenId() == expectedRepoGen
.getGenId() == expectedRepoGen : "Repository generation should not change as long as a ready delete is found in the cluster state but found [" : "Repository generation should not change as long as a ready delete is found in the cluster state but found ["
+ expectedRepoGen + expectedRepoGen
+ "] in cluster state and [" + "] in cluster state and ["
+ repositoryData.getGenId() + repositoryData.getGenId()
@ -2746,9 +2746,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
@Override @Override
protected void handleListeners(List<ActionListener<Void>> deleteListeners) { protected void handleListeners(List<ActionListener<Void>> deleteListeners) {
assert repositoryData.getSnapshotIds() assert repositoryData.getSnapshotIds().stream().noneMatch(deleteEntry.getSnapshots()::contains)
.stream() : "Repository data contained snapshot ids "
.noneMatch(deleteEntry.getSnapshots()::contains) : "Repository data contained snapshot ids "
+ repositoryData.getSnapshotIds() + repositoryData.getSnapshotIds()
+ " that should should been deleted by [" + " that should should been deleted by ["
+ deleteEntry + deleteEntry
@ -2866,12 +2865,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
} }
} else { } else {
leaveRepoLoop(deleteEntry.repository()); leaveRepoLoop(deleteEntry.repository());
assert readyDeletions.stream() assert readyDeletions.stream().noneMatch(entry -> entry.repository().equals(deleteEntry.repository()))
.noneMatch(entry -> entry.repository().equals(deleteEntry.repository())) : "New finalizations " : "New finalizations " + newFinalizations + " added even though deletes " + readyDeletions + " are ready";
+ newFinalizations
+ " added even though deletes "
+ readyDeletions
+ " are ready";
for (SnapshotsInProgress.Entry entry : newFinalizations) { for (SnapshotsInProgress.Entry entry : newFinalizations) {
endSnapshot(entry, newState.metadata(), repositoryData); endSnapshot(entry, newState.metadata(), repositoryData);
} }
@ -3837,8 +3832,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
synchronized boolean assertConsistent() { synchronized boolean assertConsistent() {
assert (latestKnownMetaData == null && snapshotsToFinalize.isEmpty()) assert (latestKnownMetaData == null && snapshotsToFinalize.isEmpty())
|| (latestKnownMetaData != null || (latestKnownMetaData != null && snapshotsToFinalize.isEmpty() == false)
&& snapshotsToFinalize.isEmpty() == false) : "Should not hold on to metadata if there are no more queued snapshots"; : "Should not hold on to metadata if there are no more queued snapshots";
assert snapshotsToFinalize.values().stream().noneMatch(Collection::isEmpty) : "Found empty queue in " + snapshotsToFinalize; assert snapshotsToFinalize.values().stream().noneMatch(Collection::isEmpty) : "Found empty queue in " + snapshotsToFinalize;
return true; return true;
} }

View File

@ -77,11 +77,8 @@ public enum Transports {
public static boolean assertDefaultThreadContext(ThreadContext threadContext) { public static boolean assertDefaultThreadContext(ThreadContext threadContext) {
assert threadContext.getRequestHeadersOnly().isEmpty() assert threadContext.getRequestHeadersOnly().isEmpty()
|| threadContext.getRequestHeadersOnly().size() == 1 || threadContext.getRequestHeadersOnly().size() == 1 && threadContext.getRequestHeadersOnly().containsKey(Task.X_OPAQUE_ID)
&& threadContext.getRequestHeadersOnly().containsKey(Task.X_OPAQUE_ID) : "expected empty context but was " : "expected empty context but was " + threadContext.getRequestHeadersOnly() + " on " + Thread.currentThread().getName();
+ threadContext.getRequestHeadersOnly()
+ " on "
+ Thread.currentThread().getName();
return true; return true;
} }
} }

View File

@ -66,8 +66,8 @@ public class JavaJodaTimeDuellingTests extends OpenSearchTestCase {
public static void checkJvmProperties() { public static void checkJvmProperties() {
boolean runtimeJdk8 = JavaVersion.current().getVersion().get(0) == 8; boolean runtimeJdk8 = JavaVersion.current().getVersion().get(0) == 8;
assert (runtimeJdk8 && ("SPI,JRE".equals(System.getProperty("java.locale.providers")))) assert (runtimeJdk8 && ("SPI,JRE".equals(System.getProperty("java.locale.providers"))))
|| (false == runtimeJdk8 || (false == runtimeJdk8 && ("SPI,COMPAT".equals(System.getProperty("java.locale.providers"))))
&& ("SPI,COMPAT".equals(System.getProperty("java.locale.providers")))) : "`-Djava.locale.providers` needs to be set"; : "`-Djava.locale.providers` needs to be set";
assumeFalse( assumeFalse(
"won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs", "won't work in jdk8 " + "because SPI mechanism is not looking at classpath - needs ISOCalendarDataProvider in jre's ext/libs",
runtimeJdk8 runtimeJdk8

View File

@ -311,16 +311,14 @@ public class IndexServiceTests extends OpenSearchSingleNodeTestCase {
// before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible // before that this is why we need to wait for the refresh task to be unscheduled and the first doc to be visible
assertTrue(refreshTask.isClosed()); assertTrue(refreshTask.isClosed());
refreshTask = indexService.getRefreshTask(); refreshTask = indexService.getRefreshTask();
assertBusy( assertBusy(() -> {
() -> { // this one either becomes visible due to a concurrently running scheduled refresh OR due to the force refresh
// this one either becomes visible due to a concurrently running scheduled refresh OR due to the force refresh // we are running on updateMetadata if the interval changes
// we are running on updateMetadata if the interval changes try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.search(new MatchAllDocsQuery(), 10);
TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); assertEquals(1, search.totalHits.value);
assertEquals(1, search.totalHits.value);
}
} }
); });
assertFalse(refreshTask.isClosed()); assertFalse(refreshTask.isClosed());
// refresh every millisecond // refresh every millisecond
client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); client().prepareIndex("test", "test", "1").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
@ -330,25 +328,21 @@ public class IndexServiceTests extends OpenSearchSingleNodeTestCase {
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms")) .setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "1ms"))
.get(); .get();
assertTrue(refreshTask.isClosed()); assertTrue(refreshTask.isClosed());
assertBusy( assertBusy(() -> {
() -> { // this one becomes visible due to the force refresh we are running on updateMetadata if the interval changes
// this one becomes visible due to the force refresh we are running on updateMetadata if the interval changes try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.search(new MatchAllDocsQuery(), 10);
TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); assertEquals(2, search.totalHits.value);
assertEquals(2, search.totalHits.value);
}
} }
); });
client().prepareIndex("test", "test", "2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get(); client().prepareIndex("test", "test", "2").setSource("{\"foo\": \"bar\"}", XContentType.JSON).get();
assertBusy( assertBusy(() -> {
() -> { // this one becomes visible due to the scheduled refresh
// this one becomes visible due to the scheduled refresh try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
try (Engine.Searcher searcher = shard.acquireSearcher("test")) { TopDocs search = searcher.search(new MatchAllDocsQuery(), 10);
TopDocs search = searcher.search(new MatchAllDocsQuery(), 10); assertEquals(3, search.totalHits.value);
assertEquals(3, search.totalHits.value);
}
} }
); });
} }
public void testAsyncFsyncActuallyWorks() throws Exception { public void testAsyncFsyncActuallyWorks() throws Exception {

View File

@ -5837,12 +5837,10 @@ public class InternalEngineTests extends EngineTestCase {
assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(2)); assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(2));
engine.refresh("test"); engine.refresh("test");
engine.forceMerge(false, 1, false, false, false, UUIDs.randomBase64UUID()); engine.forceMerge(false, 1, false, false, false, UUIDs.randomBase64UUID());
assertBusy( assertBusy(() -> {
() -> { // the merge listner runs concurrently after the force merge returned
// the merge listner runs concurrently after the force merge returned assertThat(engine.shouldPeriodicallyFlush(), equalTo(true));
assertThat(engine.shouldPeriodicallyFlush(), equalTo(true)); });
}
);
engine.flush(); engine.flush();
assertThat(engine.shouldPeriodicallyFlush(), equalTo(false)); assertThat(engine.shouldPeriodicallyFlush(), equalTo(false));
} }

View File

@ -209,14 +209,10 @@ public class QueryShardContextTests extends OpenSearchTestCase {
} }
public void testFielddataLookupSelfReference() { public void testFielddataLookupSelfReference() {
QueryShardContext queryShardContext = createQueryShardContext( QueryShardContext queryShardContext = createQueryShardContext("uuid", null, (field, leafLookup, docId) -> {
"uuid", // simulate a runtime field that depends on itself e.g. field: doc['field']
null, return leafLookup.doc().get(field).toString();
(field, leafLookup, docId) -> { });
// simulate a runtime field that depends on itself e.g. field: doc['field']
return leafLookup.doc().get(field).toString();
}
);
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> collect("field", queryShardContext)); IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> collect("field", queryShardContext));
assertEquals("Cyclic dependency detected while resolving runtime fields: field -> field", iae.getMessage()); assertEquals("Cyclic dependency detected while resolving runtime fields: field -> field", iae.getMessage());
} }

View File

@ -187,14 +187,12 @@ public class RetentionLeasesReplicationTests extends OpenSearchIndexLevelReplica
} }
group.syncGlobalCheckpoint(); group.syncGlobalCheckpoint();
group.flush(); group.flush();
assertBusy( assertBusy(() -> {
() -> { // we turn off the translog retention policy using the generic threadPool
// we turn off the translog retention policy using the generic threadPool for (IndexShard shard : group) {
for (IndexShard shard : group) { assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(0));
assertThat(shard.translogStats().estimatedNumberOfOperations(), equalTo(0));
}
} }
); });
} }
} }

View File

@ -1160,23 +1160,21 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
equalTo(expectedLeaseIds) equalTo(expectedLeaseIds)
); );
assertAsTimePasses.accept( assertAsTimePasses.accept(() -> {
() -> { // Leases still don't expire
// Leases still don't expire assertThat(
assertThat( tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()),
tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()), equalTo(expectedLeaseIds)
equalTo(expectedLeaseIds) );
);
// Also leases are renewed before reaching half the expiry time // Also leases are renewed before reaching half the expiry time
// noinspection OptionalGetWithoutIsPresent // noinspection OptionalGetWithoutIsPresent
assertThat( assertThat(
tracker.getRetentionLeases() + " renewed before too long", tracker.getRetentionLeases() + " renewed before too long",
tracker.getRetentionLeases().leases().stream().mapToLong(RetentionLease::timestamp).min().getAsLong(), tracker.getRetentionLeases().leases().stream().mapToLong(RetentionLease::timestamp).min().getAsLong(),
greaterThanOrEqualTo(currentTimeMillis.get() - peerRecoveryRetentionLeaseRenewalTimeMillis) greaterThanOrEqualTo(currentTimeMillis.get() - peerRecoveryRetentionLeaseRenewalTimeMillis)
); );
} });
);
IndexShardRoutingTable.Builder routingTableBuilder = new IndexShardRoutingTable.Builder(routingTable); IndexShardRoutingTable.Builder routingTableBuilder = new IndexShardRoutingTable.Builder(routingTable);
for (ShardRouting replicaShard : routingTable.replicaShards()) { for (ShardRouting replicaShard : routingTable.replicaShards()) {
@ -1188,17 +1186,15 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase {
tracker.updateFromMaster(initialClusterStateVersion + randomLongBetween(1, 10), ids(activeAllocationIds), routingTable); tracker.updateFromMaster(initialClusterStateVersion + randomLongBetween(1, 10), ids(activeAllocationIds), routingTable);
assertAsTimePasses.accept( assertAsTimePasses.accept(() -> {
() -> { // Leases still don't expire
// Leases still don't expire assertThat(
assertThat( tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()),
tracker.getRetentionLeases().leases().stream().map(RetentionLease::id).collect(Collectors.toSet()), equalTo(expectedLeaseIds)
equalTo(expectedLeaseIds) );
); // ... and any extra peer recovery retention leases are expired immediately since the shard is fully active
// ... and any extra peer recovery retention leases are expired immediately since the shard is fully active tracker.addPeerRecoveryRetentionLease(randomAlphaOfLength(10), randomNonNegativeLong(), ActionListener.wrap(() -> {}));
tracker.addPeerRecoveryRetentionLease(randomAlphaOfLength(10), randomNonNegativeLong(), ActionListener.wrap(() -> {})); });
}
);
tracker.renewPeerRecoveryRetentionLeases(); tracker.renewPeerRecoveryRetentionLeases();
assertTrue("expired extra lease", tracker.getRetentionLeases(true).v1()); assertTrue("expired extra lease", tracker.getRetentionLeases(true).v1());

View File

@ -131,21 +131,12 @@ public class RetentionLeaseBackgroundSyncActionTests extends OpenSearchTestCase
); );
final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch latch = new CountDownLatch(1);
action.shardOperationOnPrimary( action.shardOperationOnPrimary(request, indexShard, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> {
request, // the retention leases on the shard should be persisted
indexShard, verify(indexShard).persistRetentionLeases();
new LatchedActionListener<>( // we should forward the request containing the current retention leases to the replica
ActionTestUtils.assertNoFailureListener( assertThat(result.replicaRequest(), sameInstance(request));
result -> { }), latch));
// the retention leases on the shard should be persisted
verify(indexShard).persistRetentionLeases();
// we should forward the request containing the current retention leases to the replica
assertThat(result.replicaRequest(), sameInstance(request));
}
),
latch
)
);
latch.await(); latch.await();
} }

View File

@ -126,20 +126,14 @@ public class RetentionLeaseSyncActionTests extends OpenSearchTestCase {
); );
final RetentionLeases retentionLeases = mock(RetentionLeases.class); final RetentionLeases retentionLeases = mock(RetentionLeases.class);
final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases); final RetentionLeaseSyncAction.Request request = new RetentionLeaseSyncAction.Request(indexShard.shardId(), retentionLeases);
action.dispatchedShardOperationOnPrimary( action.dispatchedShardOperationOnPrimary(request, indexShard, ActionTestUtils.assertNoFailureListener(result -> {
request, // the retention leases on the shard should be persisted
indexShard, verify(indexShard).persistRetentionLeases();
ActionTestUtils.assertNoFailureListener( // we should forward the request containing the current retention leases to the replica
result -> { assertThat(result.replicaRequest(), sameInstance(request));
// the retention leases on the shard should be persisted // we should start with an empty replication response
verify(indexShard).persistRetentionLeases(); assertNull(result.finalResponseIfSuccessful.getShardInfo());
// we should forward the request containing the current retention leases to the replica }));
assertThat(result.replicaRequest(), sameInstance(request));
// we should start with an empty replication response
assertNull(result.finalResponseIfSuccessful.getShardInfo());
}
)
);
} }
public void testRetentionLeaseSyncActionOnReplica() throws Exception { public void testRetentionLeaseSyncActionOnReplica() throws Exception {

View File

@ -208,31 +208,25 @@ public class SumAggregatorTests extends AggregatorTestCase {
} }
private void verifySummationOfDoubles(double[] values, double expected, double delta) throws IOException { private void verifySummationOfDoubles(double[] values, double expected, double delta) throws IOException {
testAggregation( testAggregation(sum("_name").field(FIELD_NAME), new MatchAllDocsQuery(), iw -> {
sum("_name").field(FIELD_NAME), /*
new MatchAllDocsQuery(), * The sum agg uses a Kahan sumation on the shard to limit
iw -> { * floating point errors. But it doesn't ship the sums to the
/* * coordinating node, so floaing point error can creep in when
* The sum agg uses a Kahan sumation on the shard to limit * reducing many sums. The test framework aggregates each
* floating point errors. But it doesn't ship the sums to the * segment as though it were a separate shard, then reduces
* coordinating node, so floaing point error can creep in when * those togther. Fun. But it means we don't get the full
* reducing many sums. The test framework aggregates each * accuracy of the Kahan sumation. And *that* accuracy is
* segment as though it were a separate shard, then reduces * what this method is trying to test. So we have to stick
* those togther. Fun. But it means we don't get the full * all the documents on the same leaf. `addDocuments` does
* accuracy of the Kahan sumation. And *that* accuracy is * that.
* what this method is trying to test. So we have to stick */
* all the documents on the same leaf. `addDocuments` does iw.addDocuments(
* that. Arrays.stream(values)
*/ .mapToObj(value -> singleton(new NumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(value))))
iw.addDocuments( .collect(toList())
Arrays.stream(values) );
.mapToObj(value -> singleton(new NumericDocValuesField(FIELD_NAME, NumericUtils.doubleToSortableLong(value)))) }, result -> assertEquals(expected, result.getValue(), delta), defaultFieldType(NumberType.DOUBLE));
.collect(toList())
);
},
result -> assertEquals(expected, result.getValue(), delta),
defaultFieldType(NumberType.DOUBLE)
);
} }
public void testUnmapped() throws IOException { public void testUnmapped() throws IOException {

View File

@ -80,10 +80,8 @@ public abstract class OpenSearchAllocationTestCase extends OpenSearchTestCase {
) { ) {
@Override @Override
public Long getShardSize(ShardRouting shardRouting) { public Long getShardSize(ShardRouting shardRouting) {
assert shardRouting.recoverySource() assert shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT
.getType() == RecoverySource.Type.SNAPSHOT : "Expecting a recovery source of type [SNAPSHOT] but got [" : "Expecting a recovery source of type [SNAPSHOT] but got [" + shardRouting.recoverySource().getType() + ']';
+ shardRouting.recoverySource().getType()
+ ']';
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
}; };

View File

@ -351,10 +351,8 @@ public abstract class OpenSearchIndexLevelReplicationTestCase extends IndexShard
} }
public synchronized void addReplica(IndexShard replica) throws IOException { public synchronized void addReplica(IndexShard replica) throws IOException {
assert shardRoutings().stream() assert shardRoutings().stream().anyMatch(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())) == false
.anyMatch(shardRouting -> shardRouting.isSameAllocation(replica.routingEntry())) == false : "replica with aId [" : "replica with aId [" + replica.routingEntry().allocationId() + "] already exists";
+ replica.routingEntry().allocationId()
+ "] already exists";
replicas.add(replica); replicas.add(replica);
if (replicationTargets != null) { if (replicationTargets != null) {
replicationTargets.addReplica(replica); replicationTargets.addReplica(replica);

View File

@ -454,9 +454,8 @@ public final class InternalTestCluster extends TestCluster {
* It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMasterNodes is false. * It's only possible to change {@link #bootstrapMasterNodeIndex} value if autoManageMasterNodes is false.
*/ */
public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) { public void setBootstrapMasterNodeIndex(int bootstrapMasterNodeIndex) {
assert autoManageMasterNodes == false assert autoManageMasterNodes == false || bootstrapMasterNodeIndex == -1
|| bootstrapMasterNodeIndex == -1 : "bootstrapMasterNodeIndex should be -1 if autoManageMasterNodes is true, but was " : "bootstrapMasterNodeIndex should be -1 if autoManageMasterNodes is true, but was " + bootstrapMasterNodeIndex;
+ bootstrapMasterNodeIndex;
this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex; this.bootstrapMasterNodeIndex = bootstrapMasterNodeIndex;
} }

View File

@ -284,8 +284,8 @@ public final class XContentTestUtils {
* </ul> * </ul>
*/ */
static List<String> getInsertPaths(XContentParser parser, Stack<String> currentPath) throws IOException { static List<String> getInsertPaths(XContentParser parser, Stack<String> currentPath) throws IOException {
assert parser.currentToken() == XContentParser.Token.START_OBJECT assert parser.currentToken() == XContentParser.Token.START_OBJECT || parser.currentToken() == XContentParser.Token.START_ARRAY
|| parser.currentToken() == XContentParser.Token.START_ARRAY : "should only be called when new objects or arrays start"; : "should only be called when new objects or arrays start";
List<String> validPaths = new ArrayList<>(); List<String> validPaths = new ArrayList<>();
// parser.currentName() can be null for root object and unnamed objects in arrays // parser.currentName() can be null for root object and unnamed objects in arrays
if (parser.currentName() != null) { if (parser.currentName() != null) {

View File

@ -2485,31 +2485,26 @@ public abstract class AbstractSimpleTransportTestCase extends OpenSearchTestCase
MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY); MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY);
CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch receivedLatch = new CountDownLatch(1);
CountDownLatch sendResponseLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1);
serviceC.registerRequestHandler( serviceC.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> {
"internal:action", // don't block on a network thread here
ThreadPool.Names.SAME, threadPool.generic().execute(new AbstractRunnable() {
TestRequest::new, @Override
(request, channel, task) -> { public void onFailure(Exception e) {
// don't block on a network thread here try {
threadPool.generic().execute(new AbstractRunnable() { channel.sendResponse(e);
@Override } catch (IOException e1) {
public void onFailure(Exception e) { throw new UncheckedIOException(e1);
try {
channel.sendResponse(e);
} catch (IOException e1) {
throw new UncheckedIOException(e1);
}
} }
}
@Override @Override
protected void doRun() throws Exception { protected void doRun() throws Exception {
receivedLatch.countDown(); receivedLatch.countDown();
sendResponseLatch.await(); sendResponseLatch.await();
channel.sendResponse(TransportResponse.Empty.INSTANCE); channel.sendResponse(TransportResponse.Empty.INSTANCE);
} }
}); });
} });
);
serviceC.start(); serviceC.start();
serviceC.acceptIncomingRequests(); serviceC.acceptIncomingRequests();
CountDownLatch responseLatch = new CountDownLatch(1); CountDownLatch responseLatch = new CountDownLatch(1);
@ -2564,31 +2559,26 @@ public abstract class AbstractSimpleTransportTestCase extends OpenSearchTestCase
MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY); MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY);
CountDownLatch receivedLatch = new CountDownLatch(1); CountDownLatch receivedLatch = new CountDownLatch(1);
CountDownLatch sendResponseLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1);
serviceB.registerRequestHandler( serviceB.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> {
"internal:action", // don't block on a network thread here
ThreadPool.Names.SAME, threadPool.generic().execute(new AbstractRunnable() {
TestRequest::new, @Override
(request, channel, task) -> { public void onFailure(Exception e) {
// don't block on a network thread here try {
threadPool.generic().execute(new AbstractRunnable() { channel.sendResponse(e);
@Override } catch (IOException e1) {
public void onFailure(Exception e) { throw new UncheckedIOException(e1);
try {
channel.sendResponse(e);
} catch (IOException e1) {
throw new UncheckedIOException(e1);
}
} }
}
@Override @Override
protected void doRun() throws Exception { protected void doRun() throws Exception {
receivedLatch.countDown(); receivedLatch.countDown();
sendResponseLatch.await(); sendResponseLatch.await();
channel.sendResponse(TransportResponse.Empty.INSTANCE); channel.sendResponse(TransportResponse.Empty.INSTANCE);
} }
}); });
} });
);
serviceC.start(); serviceC.start();
serviceC.acceptIncomingRequests(); serviceC.acceptIncomingRequests();
CountDownLatch responseLatch = new CountDownLatch(1); CountDownLatch responseLatch = new CountDownLatch(1);
@ -2688,31 +2678,26 @@ public abstract class AbstractSimpleTransportTestCase extends OpenSearchTestCase
CountDownLatch sendResponseLatch = new CountDownLatch(1); CountDownLatch sendResponseLatch = new CountDownLatch(1);
Exception ex = new RuntimeException("boom"); Exception ex = new RuntimeException("boom");
ex.setStackTrace(new StackTraceElement[0]); ex.setStackTrace(new StackTraceElement[0]);
serviceB.registerRequestHandler( serviceB.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> {
"internal:action", // don't block on a network thread here
ThreadPool.Names.SAME, threadPool.generic().execute(new AbstractRunnable() {
TestRequest::new, @Override
(request, channel, task) -> { public void onFailure(Exception e) {
// don't block on a network thread here try {
threadPool.generic().execute(new AbstractRunnable() { channel.sendResponse(e);
@Override } catch (IOException e1) {
public void onFailure(Exception e) { throw new UncheckedIOException(e1);
try {
channel.sendResponse(e);
} catch (IOException e1) {
throw new UncheckedIOException(e1);
}
} }
}
@Override @Override
protected void doRun() throws Exception { protected void doRun() throws Exception {
receivedLatch.countDown(); receivedLatch.countDown();
sendResponseLatch.await(); sendResponseLatch.await();
onFailure(ex); onFailure(ex);
} }
}); });
} });
);
serviceC.start(); serviceC.start();
serviceC.acceptIncomingRequests(); serviceC.acceptIncomingRequests();
CountDownLatch responseLatch = new CountDownLatch(1); CountDownLatch responseLatch = new CountDownLatch(1);