HBASE-26051 Remove reflections used to access HDFS EC APIs (#3446)

Signed-off-by: Michael Stack <stack@apache.org>
Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
Wei-Chiu Chuang 2021-07-01 20:10:52 -07:00 committed by GitHub
parent ef639ff083
commit fab0505257
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 12 additions and 39 deletions

View File

@ -173,11 +173,6 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
private static final FileCreator FILE_CREATOR;
// CreateFlag.SHOULD_REPLICATE is to make OutputStream on a EC directory support hflush/hsync, but
// EC is introduced in hadoop 3.x so we do not have this enum on 2.x, that's why we need to
// indirectly reference it through reflection.
private static final CreateFlag SHOULD_REPLICATE_FLAG;
private static DFSClientAdaptor createDFSClientAdaptor() throws NoSuchMethodException {
Method isClientRunningMethod = DFSClient.class.getDeclaredMethod("isClientRunning");
isClientRunningMethod.setAccessible(true);
@ -273,15 +268,6 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
return createFileCreator2();
}
private static CreateFlag loadShouldReplicateFlag() {
try {
return CreateFlag.valueOf("SHOULD_REPLICATE");
} catch (IllegalArgumentException e) {
LOG.debug("can not find SHOULD_REPLICATE flag, should be hadoop 2.x", e);
return null;
}
}
// cancel the processing if DFSClient is already closed.
static final class CancelOnClose implements CancelableProgressable {
@ -302,7 +288,6 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
LEASE_MANAGER = createLeaseManager();
DFS_CLIENT_ADAPTOR = createDFSClientAdaptor();
FILE_CREATOR = createFileCreator();
SHOULD_REPLICATE_FLAG = loadShouldReplicateFlag();
} catch (Exception e) {
String msg = "Couldn't properly initialize access to HDFS internals. Please " +
"update your WAL Provider to not make use of the 'asyncfs' provider. See " +
@ -503,9 +488,7 @@ public final class FanOutOneBlockAsyncDFSOutputHelper {
if (overwrite) {
flags.add(CreateFlag.OVERWRITE);
}
if (SHOULD_REPLICATE_FLAG != null) {
flags.add(SHOULD_REPLICATE_FLAG);
}
flags.add(CreateFlag.SHOULD_REPLICATE);
return new EnumSetWritable<>(EnumSet.copyOf(flags));
}

View File

@ -205,9 +205,7 @@ public class TestSaslFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase {
private Path entryptionTestDirOnTestFs;
private void createEncryptionZone() throws Exception {
Method method =
DistributedFileSystem.class.getMethod("createEncryptionZone", Path.class, String.class);
method.invoke(FS, entryptionTestDirOnTestFs, TEST_KEY_NAME);
FS.createEncryptionZone(entryptionTestDirOnTestFs, TEST_KEY_NAME);
}
@Before

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
@ -67,27 +68,18 @@ public class TestHBaseWalOnEC {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
try {
MiniDFSCluster cluster = UTIL.startMiniDFSCluster(3); // Need 3 DNs for RS-3-2 policy
DistributedFileSystem fs = cluster.getFileSystem();
MiniDFSCluster cluster = UTIL.startMiniDFSCluster(3); // Need 3 DNs for RS-3-2 policy
DistributedFileSystem fs = cluster.getFileSystem();
Method enableAllECPolicies =
DFSTestUtil.class.getMethod("enableAllECPolicies", DistributedFileSystem.class);
enableAllECPolicies.invoke(null, fs);
DFSTestUtil.enableAllECPolicies(fs);
DFSClient client = fs.getClient();
Method setErasureCodingPolicy =
DFSClient.class.getMethod("setErasureCodingPolicy", String.class, String.class);
setErasureCodingPolicy.invoke(client, "/", "RS-3-2-1024k"); // try a built-in policy
HdfsAdmin hdfsAdmin = new HdfsAdmin(fs.getUri(), UTIL.getConfiguration());
hdfsAdmin.setErasureCodingPolicy(new Path("/"), "RS-3-2-1024k");
try (FSDataOutputStream out = fs.create(new Path("/canary"))) {
// If this comes back as having hflush then some test setup assumption is wrong.
// Fail the test so that a developer has to look and triage
assertFalse("Did not enable EC!", out.hasCapability(StreamCapabilities.HFLUSH));
}
} catch (NoSuchMethodException e) {
// We're not testing anything interesting if EC is not available, so skip the rest of the test
Assume.assumeNoException("Using an older version of hadoop; EC not available.", e);
try (FSDataOutputStream out = fs.create(new Path("/canary"))) {
// If this comes back as having hflush then some test setup assumption is wrong.
// Fail the test so that a developer has to look and triage
assertFalse("Did not enable EC!", out.hasCapability(StreamCapabilities.HFLUSH));
}
UTIL.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true);