HBASE-27443 Use java11 in the general check of our jenkins job (#4845)

Signed-off-by: Guanghao Zhang <zghao@apache.org>
(cherry picked from commit 5c6ff7dc47)
This commit is contained in:
Duo Zhang 2022-11-07 23:14:26 +08:00
parent 5f55ff7578
commit 4605c323cc
12 changed files with 63 additions and 81 deletions

View File

@ -202,7 +202,9 @@ pipeline {
environment { environment {
BASEDIR = "${env.WORKSPACE}/component" BASEDIR = "${env.WORKSPACE}/component"
TESTS = "${env.SHALLOW_CHECKS}" TESTS = "${env.SHALLOW_CHECKS}"
SET_JAVA_HOME = '/usr/lib/jvm/java-8' SET_JAVA_HOME = "/usr/lib/jvm/java-11"
// Activates hadoop 3.0 profile in maven runs.
HADOOP_PROFILE = '3.0'
OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}" OUTPUT_DIR_RELATIVE = "${env.OUTPUT_DIR_RELATIVE_GENERAL}"
OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}" OUTPUT_DIR = "${env.WORKSPACE}/${env.OUTPUT_DIR_RELATIVE_GENERAL}"
ASF_NIGHTLIES_GENERAL_CHECK_BASE="${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}" ASF_NIGHTLIES_GENERAL_CHECK_BASE="${ASF_NIGHTLIES_BASE}/${OUTPUT_DIR_RELATIVE}"

View File

@ -78,7 +78,7 @@ pipeline {
environment { environment {
// customized per parallel stage // customized per parallel stage
PLUGINS = "${GENERAL_CHECK_PLUGINS}" PLUGINS = "${GENERAL_CHECK_PLUGINS}"
SET_JAVA_HOME = '/usr/lib/jvm/java-8' SET_JAVA_HOME = "/usr/lib/jvm/java-11"
WORKDIR_REL = "${WORKDIR_REL_GENERAL_CHECK}" WORKDIR_REL = "${WORKDIR_REL_GENERAL_CHECK}"
// identical for all parallel stages // identical for all parallel stages
WORKDIR = "${WORKSPACE}/${WORKDIR_REL}" WORKDIR = "${WORKSPACE}/${WORKDIR_REL}"

View File

@ -69,31 +69,6 @@
<activation> <activation>
<activeByDefault>false</activeByDefault> <activeByDefault>false</activeByDefault>
</activation> </activation>
<properties>
<!-- https://errorprone.info/docs/installation Maven section has details -->
<!-- required when compiling with JDK 8 -->
<javac.version>9+181-r4173-1</javac.version>
</properties>
<dependencies>
<dependency>
<groupId>com.google.errorprone</groupId>
<artifactId>error_prone_core</artifactId>
<version>${error-prone.version}</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.errorprone</groupId>
<artifactId>javac</artifactId>
<version>${javac.version}</version>
<scope>provided</scope>
</dependency>
</dependencies>
<build> <build>
<plugins> <plugins>
<!-- Turn on error-prone --> <!-- Turn on error-prone -->
@ -101,17 +76,12 @@
<groupId>org.apache.maven.plugins</groupId> <groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId> <artifactId>maven-compiler-plugin</artifactId>
<configuration> <configuration>
<source>${compileSource}</source> <release>${releaseTarget}</release>
<target>${compileSource}</target>
<!-- required when compiling with JDK 8 -->
<fork>true</fork>
<showWarnings>true</showWarnings> <showWarnings>true</showWarnings>
<compilerArgs> <compilerArgs>
<arg>-XDcompilePolicy=simple</arg> <arg>-XDcompilePolicy=simple</arg>
<!-- All -Xep need to be on single line see: https://github.com/google/error-prone/pull/1115 --> <!-- All -Xep need to be on single line see: https://github.com/google/error-prone/pull/1115 -->
<arg>-Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -Xep:FallThrough:OFF -Xep:MutablePublicArray:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR</arg> <arg>-Xplugin:ErrorProne -XepDisableWarningsInGeneratedCode -XepExcludedPaths:.*/target/.* -Xep:FallThrough:OFF -Xep:MutablePublicArray:OFF -Xep:ClassNewInstance:ERROR -Xep:MissingDefault:ERROR -Xep:BanJNDI:WARN</arg>
<!-- Required when compiling with JDK 8 -->
<arg>-J-Xbootclasspath/p:${settings.localRepository}/com/google/errorprone/javac/${javac.version}/javac-${javac.version}.jar</arg>
</compilerArgs> </compilerArgs>
<annotationProcessorPaths> <annotationProcessorPaths>
<path> <path>
@ -122,6 +92,25 @@
</annotationProcessorPaths> </annotationProcessorPaths>
</configuration> </configuration>
</plugin> </plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>jdk11-required</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireJavaVersion>
<version>[11,)</version>
</requireJavaVersion>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins> </plugins>
</build> </build>
</profile> </profile>

View File

@ -195,4 +195,4 @@ public final class BloomFilterFactory {
writer.addInlineBlockWriter(bloomWriter); writer.addInlineBlockWriter(bloomWriter);
return bloomWriter; return bloomWriter;
} }
}; }

View File

@ -36,21 +36,17 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet;
/** /**
* Base class for command lines that start up various HBase daemons. * Base class for command lines that start up various HBase daemons.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public abstract class ServerCommandLine extends Configured implements Tool { public abstract class ServerCommandLine extends Configured implements Tool {
private static final Logger LOG = LoggerFactory.getLogger(ServerCommandLine.class); private static final Logger LOG = LoggerFactory.getLogger(ServerCommandLine.class);
@SuppressWarnings("serial")
private static final Set<String> DEFAULT_SKIP_WORDS = new HashSet<String>() { private static final Set<String> DEFAULT_SKIP_WORDS =
{ ImmutableSet.of("secret", "passwd", "password", "credential");
add("secret");
add("passwd");
add("password");
add("credential");
}
};
/** /**
* Implementing subclasses should return a usage string to print out. * Implementing subclasses should return a usage string to print out.

View File

@ -24,7 +24,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.Arrays;
import java.util.List; import java.util.List;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CallQueueTooBigException; import org.apache.hadoop.hbase.CallQueueTooBigException;
@ -266,16 +266,10 @@ public class TestMetaCache {
} }
public static List<Throwable> metaCachePreservingExceptions() { public static List<Throwable> metaCachePreservingExceptions() {
return new ArrayList<Throwable>() { return Arrays.asList(new RegionOpeningException(" "),
{ new RegionTooBusyException("Some old message"), new RpcThrottlingException(" "),
add(new RegionOpeningException(" ")); new MultiActionResultTooLarge(" "), new RetryImmediatelyException(" "),
add(new RegionTooBusyException("Some old message")); new CallQueueTooBigException());
add(new RpcThrottlingException(" "));
add(new MultiActionResultTooLarge(" "));
add(new RetryImmediatelyException(" "));
add(new CallQueueTooBigException());
}
};
} }
public static class RegionServerWithFakeRpcServices extends HRegionServer { public static class RegionServerWithFakeRpcServices extends HRegionServer {

View File

@ -364,13 +364,12 @@ public class TestFavoredNodeAssignmentHelper {
regionMap.put(regionsOnRack1, 1); regionMap.put(regionsOnRack1, 1);
regionMap.put(regionsOnRack2, 2); regionMap.put(regionsOnRack2, 2);
regionMap.put(regionsOnRack3, 3); regionMap.put(regionsOnRack3, 3);
assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, assertEquals(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
regionsOnRack2, regionsOnRack3), rackMap.get(firstRackSize) == regionMap.get(regionsOnRack1)); regionsOnRack2, regionsOnRack3), rackMap.get(firstRackSize), regionMap.get(regionsOnRack1));
assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, assertEquals(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
regionsOnRack2, regionsOnRack3), regionsOnRack2, regionsOnRack3), rackMap.get(secondRackSize), regionMap.get(regionsOnRack2));
rackMap.get(secondRackSize) == regionMap.get(regionsOnRack2)); assertEquals(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1,
assertTrue(printProportions(firstRackSize, secondRackSize, thirdRackSize, regionsOnRack1, regionsOnRack2, regionsOnRack3), rackMap.get(thirdRackSize), regionMap.get(regionsOnRack3));
regionsOnRack2, regionsOnRack3), rackMap.get(thirdRackSize) == regionMap.get(regionsOnRack3));
} }
private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize, private String printProportions(int firstRackSize, int secondRackSize, int thirdRackSize,

View File

@ -538,14 +538,15 @@ public class TestScanner {
} }
} }
/* /**
* @param hri Region * Count table.
* @param hri Region
* @param flushIndex At what row we start the flush. * @param flushIndex At what row we start the flush.
* @param concurrent if the flush should be concurrent or sync. * @param concurrent if the flush should be concurrent or sync.
* @return Count of rows found. * @return Count of rows found.
*/ */
private int count(final Table countTable, final int flushIndex, boolean concurrent) private int count(final Table countTable, final int flushIndex, boolean concurrent)
throws IOException { throws Exception {
LOG.info("Taking out counting scan"); LOG.info("Taking out counting scan");
Scan scan = new Scan(); Scan scan = new Scan();
for (byte[] qualifier : EXPLICIT_COLS) { for (byte[] qualifier : EXPLICIT_COLS) {
@ -573,10 +574,10 @@ public class TestScanner {
} }
} }
}; };
if (concurrent) { t.start();
t.start(); // concurrently flush. if (!concurrent) {
} else { // sync flush
t.run(); // sync flush t.join();
} }
LOG.info("Continuing on after kicking off background flush"); LOG.info("Continuing on after kicking off background flush");
justFlushed = true; justFlushed = true;

View File

@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.replication;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.UUID; import java.util.UUID;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
@ -45,6 +43,8 @@ import org.junit.ClassRule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
@Category({ MediumTests.class, ReplicationTests.class }) @Category({ MediumTests.class, ReplicationTests.class })
public class TestNonHBaseReplicationEndpoint { public class TestNonHBaseReplicationEndpoint {
@ -86,11 +86,8 @@ public class TestNonHBaseReplicationEndpoint {
ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder() ReplicationPeerConfig peerConfig = ReplicationPeerConfig.newBuilder()
.setReplicationEndpointImpl(NonHBaseReplicationEndpoint.class.getName()) .setReplicationEndpointImpl(NonHBaseReplicationEndpoint.class.getName())
.setReplicateAllUserTables(false).setTableCFsMap(new HashMap<TableName, List<String>>() { .setReplicateAllUserTables(false)
{ .setTableCFsMap(ImmutableMap.of(tableName, new ArrayList<>())).build();
put(tableName, new ArrayList<>());
}
}).build();
ADMIN.addReplicationPeer("1", peerConfig); ADMIN.addReplicationPeer("1", peerConfig);
loadData(table); loadData(table);

View File

@ -611,7 +611,8 @@ public abstract class TestBasicWALEntryStream extends WALEntryStreamTestBase {
localLogQueue.enqueueLog(emptyLog, fakeWalGroupId); localLogQueue.enqueueLog(emptyLog, fakeWalGroupId);
ReplicationSourceWALReader reader = new ReplicationSourceWALReader(fs, conf, localLogQueue, 0, ReplicationSourceWALReader reader = new ReplicationSourceWALReader(fs, conf, localLogQueue, 0,
getDummyFilter(), source, fakeWalGroupId); getDummyFilter(), source, fakeWalGroupId);
reader.run(); reader.start();
reader.join();
// ReplicationSourceWALReaderThread#handleEofException method will // ReplicationSourceWALReaderThread#handleEofException method will
// remove empty log from logQueue. // remove empty log from logQueue.
assertEquals(0, localLogQueue.getQueueSize(fakeWalGroupId)); assertEquals(0, localLogQueue.getQueueSize(fakeWalGroupId));
@ -650,7 +651,8 @@ public abstract class TestBasicWALEntryStream extends WALEntryStreamTestBase {
getDummyFilter(), source, fakeWalGroupId); getDummyFilter(), source, fakeWalGroupId);
assertEquals("Initial log queue size is not correct", 2, assertEquals("Initial log queue size is not correct", 2,
localLogQueue.getQueueSize(fakeWalGroupId)); localLogQueue.getQueueSize(fakeWalGroupId));
reader.run(); reader.start();
reader.join();
// remove empty log from logQueue. // remove empty log from logQueue.
assertEquals(0, localLogQueue.getQueueSize(fakeWalGroupId)); assertEquals(0, localLogQueue.getQueueSize(fakeWalGroupId));

View File

@ -381,7 +381,8 @@ public abstract class TestReplicationSourceManager {
ReplicationFactory.getReplicationPeers(s1.getZooKeeper(), s1.getConfiguration()); ReplicationFactory.getReplicationPeers(s1.getZooKeeper(), s1.getConfiguration());
rp1.init(); rp1.init();
NodeFailoverWorker w1 = manager.new NodeFailoverWorker(server.getServerName()); NodeFailoverWorker w1 = manager.new NodeFailoverWorker(server.getServerName());
w1.run(); w1.start();
w1.join();
assertEquals(1, manager.getWalsByIdRecoveredQueues().size()); assertEquals(1, manager.getWalsByIdRecoveredQueues().size());
String id = "1-" + server.getServerName().getServerName(); String id = "1-" + server.getServerName().getServerName();
assertEquals(files, manager.getWalsByIdRecoveredQueues().get(id).get(group)); assertEquals(files, manager.getWalsByIdRecoveredQueues().get(id).get(group));
@ -402,7 +403,8 @@ public abstract class TestReplicationSourceManager {
rq.addWAL(server.getServerName(), "2", group + ".log2"); rq.addWAL(server.getServerName(), "2", group + ".log2");
NodeFailoverWorker w1 = manager.new NodeFailoverWorker(server.getServerName()); NodeFailoverWorker w1 = manager.new NodeFailoverWorker(server.getServerName());
w1.run(); w1.start();
w1.join();
// The log of the unknown peer should be removed from zk // The log of the unknown peer should be removed from zk
for (String peer : manager.getAllQueues()) { for (String peer : manager.getAllQueues()) {

View File

@ -611,7 +611,7 @@
--> -->
<checkstyle.version>8.29</checkstyle.version> <checkstyle.version>8.29</checkstyle.version>
<exec.maven.version>1.6.0</exec.maven.version> <exec.maven.version>1.6.0</exec.maven.version>
<error-prone.version>2.10.0</error-prone.version> <error-prone.version>2.16</error-prone.version>
<jamon.plugin.version>2.4.2</jamon.plugin.version> <jamon.plugin.version>2.4.2</jamon.plugin.version>
<lifecycle.mapping.version>1.0.0</lifecycle.mapping.version> <lifecycle.mapping.version>1.0.0</lifecycle.mapping.version>
<maven.antrun.version>1.8</maven.antrun.version> <maven.antrun.version>1.8</maven.antrun.version>