Speed ups to test suite and precommit tasks. (#580)

Use C1 compiler only for short-lived tasks and unit test execution. Tone
down some of the slowest unit tests.

Signed-off-by: Robert Muir <rmuir@apache.org>
This commit is contained in:
Robert Muir 2021-04-20 10:02:45 -04:00 committed by GitHub
parent feb435aeff
commit 5474e8d094
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 39 additions and 13 deletions

View File

@ -55,6 +55,8 @@ import java.io.UncheckedIOException;
import java.io.UnsupportedEncodingException; import java.io.UnsupportedEncodingException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.nio.file.Files; import java.nio.file.Files;
import java.util.Arrays;
import java.util.List;
import java.util.function.Consumer; import java.util.function.Consumer;
import java.util.function.Function; import java.util.function.Function;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -142,6 +144,11 @@ public class LoggedExec extends Exec implements FileSystemOperationsAware {
return genericExec(project::javaexec, action); return genericExec(project::javaexec, action);
} }
/** Returns JVM arguments suitable for a short-lived forked task */
public static final List<String> shortLivedArgs() {
return Arrays.asList(new String[] { "-XX:TieredStopAtLevel=1" });
}
private static final Pattern NEWLINE = Pattern.compile(System.lineSeparator()); private static final Pattern NEWLINE = Pattern.compile(System.lineSeparator());
private static <T extends BaseExecSpec> ExecResult genericExec(Function<Action<T>, ExecResult> function, Action<T> action) { private static <T extends BaseExecSpec> ExecResult genericExec(Function<Action<T>, ExecResult> function, Action<T> action) {
@ -153,6 +160,10 @@ public class LoggedExec extends Exec implements FileSystemOperationsAware {
return function.apply(spec -> { return function.apply(spec -> {
spec.setStandardOutput(output); spec.setStandardOutput(output);
spec.setErrorOutput(output); spec.setErrorOutput(output);
// optimize for short-lived process
if (spec instanceof JavaExecSpec) {
((JavaExecSpec) spec).setJvmArgs(shortLivedArgs());
}
action.execute(spec); action.execute(spec);
try { try {
output.write(("Output for " + spec.getExecutable() + ":").getBytes(StandardCharsets.UTF_8)); output.write(("Output for " + spec.getExecutable() + ":").getBytes(StandardCharsets.UTF_8));

View File

@ -33,6 +33,7 @@ package org.opensearch.gradle.precommit;
import de.thetaphi.forbiddenapis.cli.CliMain; import de.thetaphi.forbiddenapis.cli.CliMain;
import org.apache.commons.io.output.NullOutputStream; import org.apache.commons.io.output.NullOutputStream;
import org.opensearch.gradle.LoggedExec;
import org.opensearch.gradle.OS; import org.opensearch.gradle.OS;
import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin; import org.opensearch.gradle.dependencies.CompileOnlyResolvePlugin;
import org.gradle.api.DefaultTask; import org.gradle.api.DefaultTask;
@ -358,6 +359,7 @@ public class ThirdPartyAuditTask extends DefaultTask {
getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME) getProject().getConfigurations().getByName(CompileOnlyResolvePlugin.RESOLVEABLE_COMPILE_ONLY_CONFIGURATION_NAME)
); );
spec.jvmArgs("-Xmx1g"); spec.jvmArgs("-Xmx1g");
spec.jvmArgs(LoggedExec.shortLivedArgs());
spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain"); spec.setMain("de.thetaphi.forbiddenapis.cli.CliMain");
spec.args("-f", getSignatureFile().getAbsolutePath(), "-d", getJarExpandDir(), "--allowmissingclasses"); spec.args("-f", getSignatureFile().getAbsolutePath(), "-d", getJarExpandDir(), "--allowmissingclasses");
spec.setErrorOutput(errorOut); spec.setErrorOutput(errorOut);

View File

@ -24,3 +24,6 @@ systemProp.org.gradle.warning.mode=fail
# forcing to use TLS1.2 to avoid failure in vault # forcing to use TLS1.2 to avoid failure in vault
# see https://github.com/hashicorp/vault/issues/8750#issuecomment-631236121 # see https://github.com/hashicorp/vault/issues/8750#issuecomment-631236121
systemProp.jdk.tls.client.protocols=TLSv1.2 systemProp.jdk.tls.client.protocols=TLSv1.2
# jvm args for faster test execution by default
systemProp.tests.jvm.argline=-XX:TieredStopAtLevel=1

View File

@ -68,6 +68,8 @@ restResources {
tasks.named("test").configure { tasks.named("test").configure {
// in WhenThingsGoWrongTests we intentionally generate an out of memory error, this prevents the heap from being dumped to disk // in WhenThingsGoWrongTests we intentionally generate an out of memory error, this prevents the heap from being dumped to disk
jvmArgs '-XX:-OmitStackTraceInFastThrow', '-XX:-HeapDumpOnOutOfMemoryError' jvmArgs '-XX:-OmitStackTraceInFastThrow', '-XX:-HeapDumpOnOutOfMemoryError'
// TODO: painless tests unexpectedly run extremely slow without C2
jvmArgs -= '-XX:TieredStopAtLevel=1'
} }
/* Build Javadoc for the Java classes in Painless's public API that are in the /* Build Javadoc for the Java classes in Painless's public API that are in the

View File

@ -50,8 +50,8 @@ import static org.hamcrest.Matchers.equalTo;
* always sticks. Update-by-query should never revert documents. * always sticks. Update-by-query should never revert documents.
*/ */
public class UpdateByQueryWhileModifyingTests extends ReindexTestCase { public class UpdateByQueryWhileModifyingTests extends ReindexTestCase {
private static final int MAX_MUTATIONS = 50; private static final int MAX_MUTATIONS = 10;
private static final int MAX_ATTEMPTS = 50; private static final int MAX_ATTEMPTS = 10;
public void testUpdateWhileReindexing() throws Exception { public void testUpdateWhileReindexing() throws Exception {
AtomicReference<String> value = new AtomicReference<>(randomSimpleString(random())); AtomicReference<String> value = new AtomicReference<>(randomSimpleString(random()));

View File

@ -147,6 +147,11 @@ tasks.withType(JavaCompile).configureEach {
options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked" options.compilerArgs << "-Xlint:-cast,-rawtypes,-unchecked"
} }
tasks.named("internalClusterTest").configure {
// TODO: these run faster with C2 only because they run for so, so long
jvmArgs -= '-XX:TieredStopAtLevel=1'
}
// Until this project is always being formatted with spotless, we need to // Until this project is always being formatted with spotless, we need to
// guard against `spotless()` not existing. // guard against `spotless()` not existing.
try { try {

View File

@ -409,7 +409,7 @@ public class RoundingTests extends OpenSearchTestCase {
} }
public void testRandomTimeIntervalRounding() { public void testRandomTimeIntervalRounding() {
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 100; i++) {
int unitCount = randomIntBetween(1, 365); int unitCount = randomIntBetween(1, 365);
TimeUnit unit = randomFrom(TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS); TimeUnit unit = randomFrom(TimeUnit.MINUTES, TimeUnit.HOURS, TimeUnit.DAYS);
long interval = unit.toMillis(unitCount); long interval = unit.toMillis(unitCount);
@ -433,7 +433,7 @@ public class RoundingTests extends OpenSearchTestCase {
// Round a whole bunch of dates and make sure they line up with the known good java time implementation // Round a whole bunch of dates and make sure they line up with the known good java time implementation
Rounding.Prepared javaTimeRounding = rounding.prepareJavaTime(); Rounding.Prepared javaTimeRounding = rounding.prepareJavaTime();
for (int d = 0; d < 1000; d++) { for (int d = 0; d < 100; d++) {
long date = dateBetween(min, max); long date = dateBetween(min, max);
long javaRounded = javaTimeRounding.round(date); long javaRounded = javaTimeRounding.round(date);
long esRounded = prepared.round(date); long esRounded = prepared.round(date);

View File

@ -600,6 +600,7 @@ public class NodeJoinControllerTests extends OpenSearchTestCase {
assertThat(e.getMessage(), containsString("found existing node")); assertThat(e.getMessage(), containsString("found existing node"));
} }
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/577")
public void testRejectingJoinWithIncompatibleVersion() throws InterruptedException, ExecutionException { public void testRejectingJoinWithIncompatibleVersion() throws InterruptedException, ExecutionException {
addNodes(randomInt(5)); addNodes(randomInt(5));
final Version badVersion; final Version badVersion;

View File

@ -116,7 +116,7 @@ public class CombinedDeletionPolicyTests extends OpenSearchTestCase {
int safeIndex = 0; int safeIndex = 0;
List<IndexCommit> commitList = new ArrayList<>(); List<IndexCommit> commitList = new ArrayList<>();
List<IndexCommit> snapshottingCommits = new ArrayList<>(); List<IndexCommit> snapshottingCommits = new ArrayList<>();
final int iters = between(10, 100); final int iters = between(5, 10);
for (int i = 0; i < iters; i++) { for (int i = 0; i < iters; i++) {
int newCommits = between(1, 10); int newCommits = between(1, 10);
for (int n = 0; n < newCommits; n++) { for (int n = 0; n < newCommits; n++) {

View File

@ -61,7 +61,7 @@ public class LiveVersionMapTests extends OpenSearchTestCase {
public void testRamBytesUsed() throws Exception { public void testRamBytesUsed() throws Exception {
LiveVersionMap map = new LiveVersionMap(); LiveVersionMap map = new LiveVersionMap();
for (int i = 0; i < 100000; ++i) { for (int i = 0; i < 10000; ++i) {
BytesRefBuilder uid = new BytesRefBuilder(); BytesRefBuilder uid = new BytesRefBuilder();
uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
try (Releasable r = map.acquireLock(uid.toBytesRef())) { try (Releasable r = map.acquireLock(uid.toBytesRef())) {
@ -77,7 +77,7 @@ public class LiveVersionMapTests extends OpenSearchTestCase {
map.beforeRefresh(); map.beforeRefresh();
map.afterRefresh(true); map.afterRefresh(true);
for (int i = 0; i < 100000; ++i) { for (int i = 0; i < 10000; ++i) {
BytesRefBuilder uid = new BytesRefBuilder(); BytesRefBuilder uid = new BytesRefBuilder();
uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20)); uid.copyChars(TestUtil.randomSimpleString(random(), 10, 20));
try (Releasable r = map.acquireLock(uid.toBytesRef())) { try (Releasable r = map.acquireLock(uid.toBytesRef())) {

View File

@ -3241,7 +3241,7 @@ public class IndexShardTests extends IndexShardTestCase {
public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { public void testReadSnapshotAndCheckIndexConcurrently() throws Exception {
final boolean isPrimary = randomBoolean(); final boolean isPrimary = randomBoolean();
IndexShard indexShard = newStartedShard(isPrimary); IndexShard indexShard = newStartedShard(isPrimary);
final long numDocs = between(10, 100); final long numDocs = between(10, 20);
for (long i = 0; i < numDocs; i++) { for (long i = 0; i < numDocs; i++) {
indexDoc(indexShard, "_doc", Long.toString(i), "{}"); indexDoc(indexShard, "_doc", Long.toString(i), "{}");
if (randomBoolean()) { if (randomBoolean()) {
@ -3289,7 +3289,7 @@ public class IndexShardTests extends IndexShardTestCase {
newShard.markAsRecovering("peer", new RecoveryState(newShard.routingEntry(), newShard.markAsRecovering("peer", new RecoveryState(newShard.routingEntry(),
getFakeDiscoNode(newShard.routingEntry().currentNodeId()), getFakeDiscoNode(newShard.routingEntry().currentNodeId()))); getFakeDiscoNode(newShard.routingEntry().currentNodeId()), getFakeDiscoNode(newShard.routingEntry().currentNodeId())));
} }
int iters = iterations(10, 100); int iters = iterations(5, 10);
latch.await(); latch.await();
for (int i = 0; i < iters; i++) { for (int i = 0; i < iters; i++) {
newShard.checkIndex(); newShard.checkIndex();
@ -3584,9 +3584,9 @@ public class IndexShardTests extends IndexShardTestCase {
IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null); IndexShard primary = newShard(new ShardId(metadata.getIndex(), 0), true, "n1", metadata, null);
recoverShardFromStore(primary); recoverShardFromStore(primary);
int threadCount = randomIntBetween(2, 6); int threadCount = randomIntBetween(2, 4);
List<Thread> threads = new ArrayList<>(threadCount); List<Thread> threads = new ArrayList<>(threadCount);
int iterations = randomIntBetween(50, 100); int iterations = randomIntBetween(10, 20);
List<Engine.Searcher> searchers = Collections.synchronizedList(new ArrayList<>()); List<Engine.Searcher> searchers = Collections.synchronizedList(new ArrayList<>());
logger.info("--> running with {} threads and {} iterations each", threadCount, iterations); logger.info("--> running with {} threads and {} iterations each", threadCount, iterations);

View File

@ -307,7 +307,7 @@ public class RefreshListenersTests extends OpenSearchTestCase {
}); });
refresher.start(); refresher.start();
try { try {
for (int i = 0; i < 1000; i++) { for (int i = 0; i < 100; i++) {
Engine.IndexResult index = index("1"); Engine.IndexResult index = index("1");
DummyRefreshListener listener = new DummyRefreshListener(); DummyRefreshListener listener = new DummyRefreshListener();
boolean immediate = listeners.addOrNotify(index.getTranslogLocation(), listener); boolean immediate = listeners.addOrNotify(index.getTranslogLocation(), listener);

View File

@ -269,6 +269,7 @@ public class SearchServiceTests extends OpenSearchSingleNodeTestCase {
assertEquals(activeRefs, indexShard.store().refCount()); assertEquals(activeRefs, indexShard.store().refCount());
} }
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/578")
public void testSearchWhileIndexDeleted() throws InterruptedException { public void testSearchWhileIndexDeleted() throws InterruptedException {
createIndex("index"); createIndex("index");
client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();

View File

@ -873,6 +873,7 @@ public class SnapshotResiliencyTests extends OpenSearchTestCase {
* Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently * Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently
* deleting a snapshot. * deleting a snapshot.
*/ */
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/579")
public void testSnapshotPrimaryRelocations() { public void testSnapshotPrimaryRelocations() {
final int masterNodeCount = randomFrom(1, 3, 5); final int masterNodeCount = randomFrom(1, 3, 5);
setupTestCluster(masterNodeCount, randomIntBetween(2, 10)); setupTestCluster(masterNodeCount, randomIntBetween(2, 10));

View File

@ -107,7 +107,7 @@ public class InboundPipelineTests extends OpenSearchTestCase {
final InboundPipeline pipeline = new InboundPipeline(statsTracker, millisSupplier, decoder, aggregator, messageHandler); final InboundPipeline pipeline = new InboundPipeline(statsTracker, millisSupplier, decoder, aggregator, messageHandler);
final FakeTcpChannel channel = new FakeTcpChannel(); final FakeTcpChannel channel = new FakeTcpChannel();
final int iterations = randomIntBetween(100, 500); final int iterations = randomIntBetween(5, 10);
long totalMessages = 0; long totalMessages = 0;
long bytesReceived = 0; long bytesReceived = 0;