HDFS-10584. Allow long-running Mover tool to login with keytab. Contributed by Rakesh R.
This commit is contained in:
parent
a445b82baa
commit
e806db7190
|
@ -487,6 +487,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final int DFS_MOVER_MOVERTHREADS_DEFAULT = 1000;
|
public static final int DFS_MOVER_MOVERTHREADS_DEFAULT = 1000;
|
||||||
public static final String DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY = "dfs.mover.retry.max.attempts";
|
public static final String DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY = "dfs.mover.retry.max.attempts";
|
||||||
public static final int DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
|
public static final int DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
|
||||||
|
public static final String DFS_MOVER_KEYTAB_ENABLED_KEY =
|
||||||
|
"dfs.mover.keytab.enabled";
|
||||||
|
public static final boolean DFS_MOVER_KEYTAB_ENABLED_DEFAULT = false;
|
||||||
|
public static final String DFS_MOVER_ADDRESS_KEY = "dfs.mover.address";
|
||||||
|
public static final String DFS_MOVER_ADDRESS_DEFAULT= "0.0.0.0:0";
|
||||||
|
public static final String DFS_MOVER_KEYTAB_FILE_KEY =
|
||||||
|
"dfs.mover.keytab.file";
|
||||||
|
public static final String DFS_MOVER_KERBEROS_PRINCIPAL_KEY =
|
||||||
|
"dfs.mover.kerberos.principal";
|
||||||
|
|
||||||
public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
|
public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
|
||||||
public static final int DFS_DATANODE_DEFAULT_PORT = 9866;
|
public static final int DFS_DATANODE_DEFAULT_PORT = 9866;
|
||||||
|
|
|
@ -47,7 +47,10 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
|
@ -57,6 +60,7 @@ import java.io.BufferedReader;
|
||||||
import java.io.FileInputStream;
|
import java.io.FileInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.text.DateFormat;
|
import java.text.DateFormat;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
|
@ -579,6 +583,22 @@ public class Mover {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static void checkKeytabAndInit(Configuration conf)
|
||||||
|
throws IOException {
|
||||||
|
if (conf.getBoolean(DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_KEY,
|
||||||
|
DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_DEFAULT)) {
|
||||||
|
LOG.info("Keytab is configured, will login using keytab.");
|
||||||
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
String addr = conf.get(DFSConfigKeys.DFS_MOVER_ADDRESS_KEY,
|
||||||
|
DFSConfigKeys.DFS_MOVER_ADDRESS_DEFAULT);
|
||||||
|
InetSocketAddress socAddr = NetUtils.createSocketAddr(addr, 0,
|
||||||
|
DFSConfigKeys.DFS_MOVER_ADDRESS_KEY);
|
||||||
|
SecurityUtil.login(conf, DFSConfigKeys.DFS_MOVER_KEYTAB_FILE_KEY,
|
||||||
|
DFSConfigKeys.DFS_MOVER_KERBEROS_PRINCIPAL_KEY,
|
||||||
|
socAddr.getHostName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int run(Map<URI, List<Path>> namenodes, Configuration conf)
|
static int run(Map<URI, List<Path>> namenodes, Configuration conf)
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
final long sleeptime =
|
final long sleeptime =
|
||||||
|
@ -588,7 +608,8 @@ public class Mover {
|
||||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
|
DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
|
||||||
AtomicInteger retryCount = new AtomicInteger(0);
|
AtomicInteger retryCount = new AtomicInteger(0);
|
||||||
LOG.info("namenodes = " + namenodes);
|
LOG.info("namenodes = " + namenodes);
|
||||||
|
|
||||||
|
checkKeytabAndInit(conf);
|
||||||
List<NameNodeConnector> connectors = Collections.emptyList();
|
List<NameNodeConnector> connectors = Collections.emptyList();
|
||||||
try {
|
try {
|
||||||
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
|
connectors = NameNodeConnector.newNameNodeConnectors(namenodes,
|
||||||
|
|
|
@ -3711,6 +3711,46 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.mover.keytab.enabled</name>
|
||||||
|
<value>false</value>
|
||||||
|
<description>
|
||||||
|
Set to true to enable login using a keytab for Kerberized Hadoop.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.mover.address</name>
|
||||||
|
<value>0.0.0.0:0</value>
|
||||||
|
<description>
|
||||||
|
The hostname used for a keytab based Kerberos login. Keytab based login
|
||||||
|
can be enabled with dfs.mover.keytab.enabled.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.mover.keytab.file</name>
|
||||||
|
<value></value>
|
||||||
|
<description>
|
||||||
|
The keytab file used by the Mover to login as its
|
||||||
|
service principal. The principal name is configured with
|
||||||
|
dfs.mover.kerberos.principal. Keytab based login can be
|
||||||
|
enabled with dfs.mover.keytab.enabled.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.mover.kerberos.principal</name>
|
||||||
|
<value></value>
|
||||||
|
<description>
|
||||||
|
The Mover principal. This is typically set to
|
||||||
|
mover/_HOST@REALM.TLD. The Mover will substitute _HOST with its
|
||||||
|
own fully qualified hostname at startup. The _HOST placeholder
|
||||||
|
allows using the same configuration setting on different servers.
|
||||||
|
Keytab based login can be enabled with dfs.mover.keytab.enabled.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.audit.log.async</name>
|
<name>dfs.namenode.audit.log.async</name>
|
||||||
<value>false</value>
|
<value>false</value>
|
||||||
|
|
|
@ -17,21 +17,47 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.mover;
|
package org.apache.hadoop.hdfs.server.mover;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_KERBEROS_PRINCIPAL_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_KEYTAB_ENABLED_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MOVER_KEYTAB_FILE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.*;
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Properties;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import com.google.common.collect.Maps;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
|
@ -47,14 +73,27 @@ import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
|
||||||
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
||||||
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
|
import org.apache.hadoop.hdfs.server.mover.Mover.MLocation;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
|
import org.apache.hadoop.minikdc.MiniKdc;
|
||||||
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.authentication.util.KerberosName;
|
||||||
|
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
|
import com.google.common.collect.Maps;
|
||||||
|
|
||||||
public class TestMover {
|
public class TestMover {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(TestMover.class);
|
||||||
static final int DEFAULT_BLOCK_SIZE = 100;
|
private static final int DEFAULT_BLOCK_SIZE = 100;
|
||||||
|
private File keytabFile;
|
||||||
|
private String principal;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
TestBalancer.initTestSetup();
|
TestBalancer.initTestSetup();
|
||||||
|
@ -116,14 +155,11 @@ public class TestMover {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
private void testWithinSameNode(Configuration conf) throws Exception {
|
||||||
public void testScheduleBlockWithinSameNode() throws Exception {
|
|
||||||
final Configuration conf = new HdfsConfiguration();
|
|
||||||
initConf(conf);
|
|
||||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.numDataNodes(3)
|
.numDataNodes(3)
|
||||||
.storageTypes(
|
.storageTypes(
|
||||||
new StorageType[] { StorageType.DISK, StorageType.ARCHIVE })
|
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
|
||||||
.build();
|
.build();
|
||||||
try {
|
try {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -133,13 +169,11 @@ public class TestMover {
|
||||||
dfs.mkdirs(dir);
|
dfs.mkdirs(dir);
|
||||||
// write to DISK
|
// write to DISK
|
||||||
dfs.setStoragePolicy(dir, "HOT");
|
dfs.setStoragePolicy(dir, "HOT");
|
||||||
{
|
final FSDataOutputStream out = dfs.create(new Path(file));
|
||||||
final FSDataOutputStream out = dfs.create(new Path(file));
|
out.writeChars("testScheduleWithinSameNode");
|
||||||
out.writeChars("testScheduleWithinSameNode");
|
out.close();
|
||||||
out.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
//verify before movement
|
// verify before movement
|
||||||
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
|
LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
|
||||||
StorageType[] storageTypes = lb.getStorageTypes();
|
StorageType[] storageTypes = lb.getStorageTypes();
|
||||||
for (StorageType storageType : storageTypes) {
|
for (StorageType storageType : storageTypes) {
|
||||||
|
@ -148,21 +182,49 @@ public class TestMover {
|
||||||
// move to ARCHIVE
|
// move to ARCHIVE
|
||||||
dfs.setStoragePolicy(dir, "COLD");
|
dfs.setStoragePolicy(dir, "COLD");
|
||||||
int rc = ToolRunner.run(conf, new Mover.Cli(),
|
int rc = ToolRunner.run(conf, new Mover.Cli(),
|
||||||
new String[] { "-p", dir.toString() });
|
new String[] {"-p", dir.toString()});
|
||||||
Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc);
|
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
|
||||||
|
|
||||||
// Wait till namenode notified
|
// Wait till namenode notified about the block location details
|
||||||
Thread.sleep(3000);
|
waitForLocatedBlockWithArchiveStorageType(dfs, file, 3);
|
||||||
lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
|
|
||||||
storageTypes = lb.getStorageTypes();
|
|
||||||
for (StorageType storageType : storageTypes) {
|
|
||||||
Assert.assertTrue(StorageType.ARCHIVE == storageType);
|
|
||||||
}
|
|
||||||
} finally {
|
} finally {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void waitForLocatedBlockWithArchiveStorageType(
|
||||||
|
final DistributedFileSystem dfs, final String file,
|
||||||
|
int expectedArchiveCount) throws Exception {
|
||||||
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
|
@Override
|
||||||
|
public Boolean get() {
|
||||||
|
LocatedBlock lb = null;
|
||||||
|
try {
|
||||||
|
lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.error("Exception while getting located blocks", e);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
int archiveCount = 0;
|
||||||
|
for (StorageType storageType : lb.getStorageTypes()) {
|
||||||
|
if (StorageType.ARCHIVE == storageType) {
|
||||||
|
archiveCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG.info("Archive replica count, expected={} and actual={}",
|
||||||
|
expectedArchiveCount, archiveCount);
|
||||||
|
return expectedArchiveCount == archiveCount;
|
||||||
|
}
|
||||||
|
}, 100, 3000);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testScheduleBlockWithinSameNode() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
initConf(conf);
|
||||||
|
testWithinSameNode(conf);
|
||||||
|
}
|
||||||
|
|
||||||
private void checkMovePaths(List<Path> actual, Path... expected) {
|
private void checkMovePaths(List<Path> actual, Path... expected) {
|
||||||
Assert.assertEquals(expected.length, actual.size());
|
Assert.assertEquals(expected.length, actual.size());
|
||||||
for (Path p : expected) {
|
for (Path p : expected) {
|
||||||
|
@ -334,19 +396,10 @@ public class TestMover {
|
||||||
dfs.setStoragePolicy(new Path(file), "COLD");
|
dfs.setStoragePolicy(new Path(file), "COLD");
|
||||||
int rc = ToolRunner.run(conf, new Mover.Cli(),
|
int rc = ToolRunner.run(conf, new Mover.Cli(),
|
||||||
new String[] { "-p", file.toString() });
|
new String[] { "-p", file.toString() });
|
||||||
Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc);
|
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
|
||||||
|
|
||||||
// Wait till namenode notified
|
// Wait till namenode notified about the block location details
|
||||||
Thread.sleep(3000);
|
waitForLocatedBlockWithArchiveStorageType(dfs, file, 2);
|
||||||
lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
|
|
||||||
storageTypes = lb.getStorageTypes();
|
|
||||||
int archiveCount = 0;
|
|
||||||
for (StorageType storageType : storageTypes) {
|
|
||||||
if (StorageType.ARCHIVE == storageType) {
|
|
||||||
archiveCount++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Assert.assertEquals(archiveCount, 2);
|
|
||||||
} finally {
|
} finally {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
@ -514,7 +567,7 @@ public class TestMover {
|
||||||
// run Mover
|
// run Mover
|
||||||
int rc = ToolRunner.run(conf, new Mover.Cli(),
|
int rc = ToolRunner.run(conf, new Mover.Cli(),
|
||||||
new String[] { "-p", barDir });
|
new String[] { "-p", barDir });
|
||||||
Assert.assertEquals("Movement to ARCHIVE should be successfull", 0, rc);
|
Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc);
|
||||||
|
|
||||||
// verify storage types and locations
|
// verify storage types and locations
|
||||||
locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
|
locatedBlocks = client.getBlockLocations(fooFile, 0, fileLen);
|
||||||
|
@ -562,4 +615,87 @@ public class TestMover {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void initSecureConf(Configuration conf) throws Exception {
|
||||||
|
String username = "mover";
|
||||||
|
File baseDir = GenericTestUtils.getTestDir(TestMover.class.getSimpleName());
|
||||||
|
FileUtil.fullyDelete(baseDir);
|
||||||
|
Assert.assertTrue(baseDir.mkdirs());
|
||||||
|
|
||||||
|
Properties kdcConf = MiniKdc.createConf();
|
||||||
|
MiniKdc kdc = new MiniKdc(kdcConf, baseDir);
|
||||||
|
kdc.start();
|
||||||
|
|
||||||
|
SecurityUtil.setAuthenticationMethod(
|
||||||
|
UserGroupInformation.AuthenticationMethod.KERBEROS, conf);
|
||||||
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
KerberosName.resetDefaultRealm();
|
||||||
|
Assert.assertTrue("Expected configuration to enable security",
|
||||||
|
UserGroupInformation.isSecurityEnabled());
|
||||||
|
|
||||||
|
keytabFile = new File(baseDir, username + ".keytab");
|
||||||
|
String keytab = keytabFile.getAbsolutePath();
|
||||||
|
// Windows will not reverse name lookup "127.0.0.1" to "localhost".
|
||||||
|
String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
|
||||||
|
principal = username + "/" + krbInstance + "@" + kdc.getRealm();
|
||||||
|
String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm();
|
||||||
|
kdc.createPrincipal(keytabFile, username, username + "/" + krbInstance,
|
||||||
|
"HTTP/" + krbInstance);
|
||||||
|
|
||||||
|
conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, principal);
|
||||||
|
conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
|
||||||
|
conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, principal);
|
||||||
|
conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
|
||||||
|
conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
|
||||||
|
conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
|
||||||
|
conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
|
||||||
|
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
|
||||||
|
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
|
||||||
|
conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
|
||||||
|
|
||||||
|
conf.setBoolean(DFS_MOVER_KEYTAB_ENABLED_KEY, true);
|
||||||
|
conf.set(DFS_MOVER_ADDRESS_KEY, "localhost:0");
|
||||||
|
conf.set(DFS_MOVER_KEYTAB_FILE_KEY, keytab);
|
||||||
|
conf.set(DFS_MOVER_KERBEROS_PRINCIPAL_KEY, principal);
|
||||||
|
|
||||||
|
String keystoresDir = baseDir.getAbsolutePath();
|
||||||
|
String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestMover.class);
|
||||||
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
|
|
||||||
|
conf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getClientSSLConfigFileName());
|
||||||
|
conf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
|
||||||
|
KeyStoreTestUtil.getServerSSLConfigFileName());
|
||||||
|
initConf(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test Mover runs fine when logging in with a keytab in kerberized env.
|
||||||
|
* Reusing testWithinSameNode here for basic functionality testing.
|
||||||
|
*/
|
||||||
|
@Test(timeout = 300000)
|
||||||
|
public void testMoverWithKeytabs() throws Exception {
|
||||||
|
final Configuration conf = new HdfsConfiguration();
|
||||||
|
try {
|
||||||
|
initSecureConf(conf);
|
||||||
|
final UserGroupInformation ugi = UserGroupInformation
|
||||||
|
.loginUserFromKeytabAndReturnUGI(principal,
|
||||||
|
keytabFile.getAbsolutePath());
|
||||||
|
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void run() throws Exception {
|
||||||
|
// verify that mover runs Ok.
|
||||||
|
testWithinSameNode(conf);
|
||||||
|
// verify that UGI was logged in using keytab.
|
||||||
|
Assert.assertTrue(UserGroupInformation.isLoginKeytabBased());
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
// Reset UGI so that other tests are not affected.
|
||||||
|
UserGroupInformation.reset();
|
||||||
|
UserGroupInformation.setConfiguration(new Configuration());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue