HBASE-27148 Move minimum hadoop 3 support version to 3.2.3 (#4561)

Signed-off-by: Xin Sun <ddupgs@gmail.com>
This commit is contained in:
Duo Zhang 2022-07-06 12:49:47 +08:00 committed by GitHub
parent f76d8554ca
commit 41972cb460
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 170 additions and 175 deletions

View File

@ -150,15 +150,15 @@ public class TestVerifyReplicationCrossDiffHdfs {
@AfterClass
public static void tearDownClass() throws Exception {
if (mapReduceUtil != null) {
mapReduceUtil.shutdownMiniCluster();
}
if (util2 != null) {
util2.shutdownMiniCluster();
}
if (util1 != null) {
util1.shutdownMiniCluster();
}
// if (mapReduceUtil != null) {
// mapReduceUtil.shutdownMiniCluster();
// }
// if (util2 != null) {
// util2.shutdownMiniCluster();
// }
// if (util1 != null) {
// util1.shutdownMiniCluster();
// }
}
@Test

View File

@ -143,6 +143,8 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
@ -202,6 +204,7 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
public static final boolean PRESPLIT_TEST_TABLE = true;
private MiniDFSCluster dfsCluster = null;
private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
private volatile HBaseClusterInterface hbaseCluster = null;
private MiniMRCluster mrCluster = null;
@ -571,6 +574,56 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
conf.unset(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE);
}
// Workaround to avoid IllegalThreadStateException
// See HBASE-27148 for more details
private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
private volatile boolean stopped = false;
private final MiniDFSCluster cluster;
FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
super("FsDatasetAsyncDiskServiceFixer");
setDaemon(true);
this.cluster = cluster;
}
@Override
public void run() {
while (!stopped) {
try {
Thread.sleep(30000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
continue;
}
// we could add new datanodes during tests, so here we will check every 30 seconds, as the
// timeout of the thread pool executor is 60 seconds by default.
try {
for (DataNode dn : cluster.getDataNodes()) {
FsDatasetSpi<?> dataset = dn.getFSDataset();
Field service = dataset.getClass().getDeclaredField("asyncDiskService");
service.setAccessible(true);
Object asyncDiskService = service.get(dataset);
Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
group.setAccessible(true);
ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
if (threadGroup.isDaemon()) {
threadGroup.setDaemon(false);
}
}
} catch (Exception e) {
LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
}
}
}
void shutdown() {
stopped = true;
interrupt();
}
}
public MiniDFSCluster startMiniDFSCluster(int servers, final String[] racks, String[] hosts)
throws Exception {
createDirsAndSetProperties();
@ -582,7 +635,8 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
"ERROR");
this.dfsCluster =
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
this.dfsClusterFixer.start();
// Set this just-started cluster as our filesystem.
setFs();
@ -606,6 +660,8 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
"ERROR");
dfsCluster =
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
this.dfsClusterFixer.start();
return dfsCluster;
}
@ -728,6 +784,12 @@ public class HBaseTestingUtil extends HBaseZKTestingUtil {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
// It is possible that the dfs cluster is set through setDFSCluster method, where we will not
// have a fixer
if (dfsClusterFixer != null) {
this.dfsClusterFixer.shutdown();
dfsClusterFixer = null;
}
dataTestDirOnTestFS = null;
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
}

View File

@ -81,38 +81,6 @@
<artifactId>hadoop-common</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
<version>1.9.13</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<version>1.9.13</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>
</profiles>

View File

@ -203,52 +203,6 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
<version>1.9.13</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<version>1.9.13</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>

View File

@ -84,12 +84,6 @@
<version>${project.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<version>1.9.13</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@ -38,36 +38,6 @@
<version>${hadoop.version}</version>
<type>test-jar</type>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -123,12 +93,6 @@
<type>test-jar</type>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
<version>1.9.13</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-testing-util</artifactId>

View File

@ -110,6 +110,11 @@
<type>test-jar</type>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.github.stephenc.findbugs</groupId>
<artifactId>findbugs-annotations</artifactId>

View File

@ -135,6 +135,8 @@ import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MiniMRCluster;
@ -189,6 +191,7 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
public static final boolean PRESPLIT_TEST_TABLE = true;
private MiniDFSCluster dfsCluster = null;
private FsDatasetAsyncDiskServiceFixer dfsClusterFixer = null;
private volatile HBaseCluster hbaseCluster = null;
private MiniMRCluster mrCluster = null;
@ -509,6 +512,56 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
return getTestFileSystem().delete(cpath, true);
}
// Workaround to avoid IllegalThreadStateException
// See HBASE-27148 for more details
private static final class FsDatasetAsyncDiskServiceFixer extends Thread {
private volatile boolean stopped = false;
private final MiniDFSCluster cluster;
FsDatasetAsyncDiskServiceFixer(MiniDFSCluster cluster) {
super("FsDatasetAsyncDiskServiceFixer");
setDaemon(true);
this.cluster = cluster;
}
@Override
public void run() {
while (!stopped) {
try {
Thread.sleep(30000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
continue;
}
// we could add new datanodes during tests, so here we will check every 30 seconds, as the
// timeout of the thread pool executor is 60 seconds by default.
try {
for (DataNode dn : cluster.getDataNodes()) {
FsDatasetSpi<?> dataset = dn.getFSDataset();
Field service = dataset.getClass().getDeclaredField("asyncDiskService");
service.setAccessible(true);
Object asyncDiskService = service.get(dataset);
Field group = asyncDiskService.getClass().getDeclaredField("threadGroup");
group.setAccessible(true);
ThreadGroup threadGroup = (ThreadGroup) group.get(asyncDiskService);
if (threadGroup.isDaemon()) {
threadGroup.setDaemon(false);
}
}
} catch (Exception e) {
LOG.warn("failed to reset thread pool timeout for FsDatasetAsyncDiskService", e);
}
}
}
void shutdown() {
stopped = true;
interrupt();
}
}
/**
* Start a minidfscluster.
* @param servers How many DNs to start. n * @see #shutdownMiniDFSCluster()
@ -567,7 +620,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
this.dfsCluster =
new MiniDFSCluster(0, this.conf, servers, true, true, true, null, racks, hosts, null);
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
this.dfsClusterFixer.start();
// Set this just-started cluster as our filesystem.
setFs();
@ -591,6 +645,8 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
"ERROR");
dfsCluster =
new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
this.dfsClusterFixer = new FsDatasetAsyncDiskServiceFixer(dfsCluster);
this.dfsClusterFixer.start();
return dfsCluster;
}
@ -713,6 +769,12 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
// It is possible that the dfs cluster is set through setDFSCluster method, where we will not
// have a fixer
if (dfsClusterFixer != null) {
this.dfsClusterFixer.shutdown();
dfsClusterFixer = null;
}
dataTestDirOnTestFS = null;
CommonFSUtils.setFsDefault(this.conf, new Path("file:///"));
}

74
pom.xml
View File

@ -771,7 +771,7 @@
<maven.min.version>3.0.4</maven.min.version>
<java.min.version>${compileSource}</java.min.version>
<!-- Dependencies -->
<hadoop-three.version>3.1.2</hadoop-three.version>
<hadoop-three.version>3.2.3</hadoop-three.version>
<!-- These must be defined here for downstream build tools that don't look at profiles.
-->
<hadoop.version>${hadoop-three.version}</hadoop.version>
@ -781,7 +781,7 @@
<netty.hadoop.version>3.10.5.Final</netty.hadoop.version>
<!-- end HBASE-15925 default hadoop compatibility values -->
<audience-annotations.version>0.5.0</audience-annotations.version>
<avro.version>1.7.7</avro.version>
<avro.version>1.11.0</avro.version>
<caffeine.version>2.8.1</caffeine.version>
<commons-codec.version>1.13</commons-codec.version>
<commons-validator.version>1.6</commons-validator.version>
@ -3128,8 +3128,6 @@
--add-opens java.base/java.lang.reflect=ALL-UNNAMED
--add-exports java.base/jdk.internal.misc=ALL-UNNAMED
${hbase-surefire.argLine}</argLine>
<!-- We need a minimum HDFS version of 3.2.0 for HADOOP-12760 -->
<hadoop-three.version>3.2.0</hadoop-three.version>
<!--
Value to use for surefire when running jdk11.
TODO: replicate logic for windows
@ -3345,12 +3343,16 @@
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
@ -3368,14 +3370,6 @@
<groupId>javax.inject</groupId>
<artifactId>javax.inject</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -3406,19 +3400,7 @@
<exclusions>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
@ -3549,11 +3531,7 @@
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
@ -3610,11 +3588,7 @@
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
@ -3756,12 +3730,8 @@
<artifactId>junit</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehause.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehause.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<groupId>org.codehaus.jackson</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
@ -3836,6 +3806,22 @@
<artifactId>netty-all</artifactId>
</exclusion>
-->
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>*</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>