HDFS-14013. Skip any credentials stored in HDFS when starting ZKFC. Contributed by Stephen O'Donnell

(cherry picked from commit c3b3b36dee)
This commit is contained in:
S O'Donnell 2021-03-01 11:36:41 +00:00
parent 792329fde9
commit 24a0304159
2 changed files with 49 additions and 10 deletions

View File

@ -31,11 +31,14 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException;
import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.util.ZKUtil;
import org.apache.hadoop.util.ZKUtil.ZKAuthInfo;
import org.apache.hadoop.ha.HealthMonitor.State;
@ -343,8 +346,19 @@ public abstract class ZKFailoverController {
zkAcls = Ids.CREATOR_ALL_ACL;
}
// Parse authentication from configuration.
List<ZKAuthInfo> zkAuths = SecurityUtil.getZKAuthInfos(conf, ZK_AUTH_KEY);
// Parse authentication from configuration. Exclude any Credential providers
// using the hdfs scheme to avoid a circular dependency. As HDFS is likely
// not started when ZKFC is started, we cannot read the credentials from it.
Configuration c = conf;
try {
c = ProviderUtils.excludeIncompatibleCredentialProviders(
conf, FileSystem.getFileSystemClass("hdfs", conf));
} catch (UnsupportedFileSystemException e) {
// Should not happen in a real cluster, as the hdfs FS will always be
// present. Inside tests, the hdfs filesystem will not be present
LOG.debug("No filesystem found for the hdfs scheme", e);
}
List<ZKAuthInfo> zkAuths = SecurityUtil.getZKAuthInfos(c, ZK_AUTH_KEY);
// Sanity check configuration.
Preconditions.checkArgument(zkQuorum != null,

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
@ -93,7 +94,9 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
ServerSocketUtil.getPort(10023, 100));
conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2",
ServerSocketUtil.getPort(10024, 100));
}
private void startCluster() throws Exception {
// prefer non-ephemeral port to avoid port collision on restartNameNode
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
@ -147,11 +150,26 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
}
}
@Test(timeout=60000)
/**
* Ensure the cluster simply starts with a hdfs jceks credential provider
* configured. HDFS-14013.
*/
public void testZFFCStartsWithCredentialProviderReferencingHDFS()
throws Exception{
// Create a provider path on HDFS
conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
"jceks://hdfs/tmp/test.jceks");
//
startCluster();
}
/**
* Test that thread dump is captured after NN state changes.
*/
@Test(timeout=60000)
public void testThreadDumpCaptureAfterNNStateChange() throws Exception {
startCluster();
MockNameNodeResourceChecker mockResourceChecker =
new MockNameNodeResourceChecker(conf);
mockResourceChecker.setResourcesAvailable(false);
@ -169,6 +187,7 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
*/
@Test(timeout=60000)
public void testFailoverAndBackOnNNShutdown() throws Exception {
startCluster();
Path p1 = new Path("/dir1");
Path p2 = new Path("/dir2");
@ -201,6 +220,7 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
@Test(timeout=30000)
public void testManualFailover() throws Exception {
startCluster();
thr2.zkfc.getLocalTarget().getZKFCProxy(conf, 15000).gracefulFailover();
waitForHAState(0, HAServiceState.STANDBY);
waitForHAState(1, HAServiceState.ACTIVE);
@ -212,6 +232,7 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
@Test(timeout=30000)
public void testWithoutBindAddressSet() throws Exception {
startCluster();
DFSZKFailoverController zkfc = DFSZKFailoverController.create(
conf);
@ -222,6 +243,7 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
@Test(timeout=30000)
public void testWithBindAddressSet() throws Exception {
startCluster();
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
DFSZKFailoverController zkfc = DFSZKFailoverController.create(
conf);
@ -239,6 +261,7 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
*/
@Test
public void testObserverRejectZkfcCall() throws Exception {
startCluster();
NamenodeProtocols nn1 = cluster.getNameNode(1).getRpcServer();
nn1.transitionToObserver(
new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER_FORCED));
@ -251,6 +274,7 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
@Test(timeout=30000)
public void testManualFailoverWithDFSHAAdmin() throws Exception {
startCluster();
DFSHAAdmin tool = new DFSHAAdmin();
tool.setConf(conf);
assertEquals(0,
@ -279,6 +303,7 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes {
@Test(timeout=30000)
public void testElectionOnObserver() throws Exception{
startCluster();
InputStream inOriginial = System.in;
try {
DFSHAAdmin tool = new DFSHAAdmin();