HBASE-27148 Move minimum hadoop 3 support version to 3.2.3 (#4561) (#4599)

Signed-off-by: Xin Sun <ddupgs@gmail.com>
(cherry picked from commit 41972cb460)
This commit is contained in:
Duo Zhang 2022-07-08 15:59:25 +08:00 committed by GitHub
parent 92525fb869
commit 6dd1b58481
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 112 additions and 83 deletions

View File

@ -144,6 +144,8 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
@ -196,6 +198,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
public static final boolean PRESPLIT_TEST_TABLE = true;
private MiniDFSCluster dfsCluster = null;
private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
private volatile HBaseCluster hbaseCluster = null;
private MiniMRCluster mrCluster = null;
@ -574,6 +577,56 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
return getTestFileSystem().delete(cpath, true);
}
// Workaround to avoid IllegalThreadStateException
// See HBASE-27148 for more details
private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
private volatile boolean stopped = false;
private final MiniDFSCluster cluster;
FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
super("FsDatasetAsyncDiskServiceFixer");
setDaemon(true);
this.cluster = cluster;
}
@Override
public void run() {
while (!stopped) {
try {
Thread.sleep(30000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
continue;
}
// we could add new datanodes during tests, so here we will check every 30 seconds, as the
// timeout of the thread pool executor is 60 seconds by default.
try {
for (DataNode dn : cluster.getDataNodes()) {
FsDatasetSpi<?> dataset = dn.getFSDataset();
Field service = dataset.getClass().getDeclaredField("asyncDiskService");
service.setAccessible(true);
Object asyncDiskService = service.get(dataset);
Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
group.setAccessible(true);
ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
if (threadGroup.isDaemon()) {
threadGroup.setDaemon(false);
}
}
} catch (Exception e) {
LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
}
}
}
void shutdown() {
stopped = true;
interrupt();
}
}
/**
* Start a minidfscluster.
* @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster()
@ -632,7 +685,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
this.dfsCluster =
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
this.dfsClusterFixer.start();
// Set this just-started cluster as our filesystem.
setFs();
@ -656,6 +710,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
"ERROR");
dfsCluster =
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
this.dfsClusterFixer.start();
return dfsCluster;
}
@ -778,6 +834,12 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
// It is possible that the dfs cluster is set through setDFSCluster method, where we will not
// have a fixer
if (dfsClusterFixer != null) {
this.dfsClusterFixer.shutdown();
dfsClusterFixer = null;
}
dataTestDirOnTestFS = null;
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
}

View File

@ -128,6 +128,16 @@
<type>test-jar</type>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.github.stephenc.findbugs</groupId>
<artifactId>findbugs-annotations</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>jcl-over-slf4j</artifactId>

121
pom.xml
View File

@ -544,7 +544,7 @@
<java.min.version>${compileSource}</java.min.version>
<!-- Dependencies -->
<hadoop-two.version>2.10.0</hadoop-two.version>
<hadoop-three.version>3.1.2</hadoop-three.version>
<hadoop-three.version>3.2.3</hadoop-three.version>
<!-- These must be defined here for downstream build tools that don't look at profiles.
They ought to match the values found in our default hadoop profile, which is
currently "hadoop-2.0". See HBASE-15925 for more info. -->
@ -557,7 +557,7 @@
<netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
<!-- end HBASE-15925 default hadoop compatibility values -->
<audience-annotations.version>0.5.0</audience-annotations.version>
<avro.version>1.7.7</avro.version>
<avro.version>1.11.0</avro.version>
<caffeine.version>2.8.1</caffeine.version>
<commons-codec.version>1.13</commons-codec.version>
<commons-io.version>2.11.0</commons-io.version>
@ -2816,8 +2816,6 @@
--add-opens java.base/java.lang.reflect=ALL-UNNAMED
--add-exports java.base/jdk.internal.misc=ALL-UNNAMED
${hbase-surefire.argLine}</argLine>
<!-- We need a minimum HDFS version of 3.2.0 for HADOOP-12760 -->
<hadoop-three.version>3.2.0</hadoop-three.version>
<!--
Value to use for surefire when running jdk11.
TODO: replicate logic for windows
@ -3643,12 +3641,16 @@
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
@ -3666,14 +3668,6 @@
<groupId>javax.inject</groupId>
<artifactId>javax.inject</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -3702,12 +3696,8 @@
<version>${hadoop-three.version}</version>
<exclusions>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
<groupId>org.codehaus.jackson</groupId>
<artifactId>*</artifactId>
</exclusion>
<!--HERE-->
<exclusion>
@ -3777,19 +3767,7 @@
<exclusions>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
@ -3860,11 +3838,7 @@
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
@ -3929,11 +3903,7 @@
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
@ -3968,38 +3938,6 @@
-->
</exclusions>
</dependency>
<dependency>
<!-- Is this needed? Seems a duplicate of the above dependency but for the
classifier-->
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop-three.version}</version>
<classifier>tests</classifier>
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>ch.qos.reload4j</groupId>
<artifactId>reload4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-reload4j</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
@ -4081,12 +4019,8 @@
<artifactId>junit</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehause.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehause.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<groupId>org.codehaus.jackson</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
@ -4182,6 +4116,29 @@
<groupId>org.slf4j</groupId>
<artifactId>slf4j-reload4j</artifactId>
</exclusion>
<!--
Needed in test context when hadoop-3.3 runs.
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty-all</artifactId>
</exclusion>
-->
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>