HADOOP-13137. TraceAdmin should support Kerberized cluster (Wei-Chiu Chuang via cmccabe)

This commit is contained in:
Colin Patrick Mccabe 2016-05-31 17:54:34 -07:00
parent c7921c9bdd
commit 8ceb06e239
3 changed files with 92 additions and 2 deletions

View File

@ -29,6 +29,7 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
@ -36,6 +37,8 @@
import org.apache.hadoop.tools.TableListing; import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/** /**
* A command-line tool for viewing and modifying tracing settings. * A command-line tool for viewing and modifying tracing settings.
@ -44,6 +47,7 @@
public class TraceAdmin extends Configured implements Tool { public class TraceAdmin extends Configured implements Tool {
private TraceAdminProtocolPB proxy; private TraceAdminProtocolPB proxy;
private TraceAdminProtocolTranslatorPB remote; private TraceAdminProtocolTranslatorPB remote;
private static final Logger LOG = LoggerFactory.getLogger(TraceAdmin.class);
private void usage() { private void usage() {
PrintStream err = System.err; PrintStream err = System.err;
@ -61,7 +65,9 @@ private void usage() {
" -list: List the current span receivers.\n" + " -list: List the current span receivers.\n" +
" -remove [id]\n" + " -remove [id]\n" +
" Remove the span receiver with the specified id. Use -list to\n" + " Remove the span receiver with the specified id. Use -list to\n" +
" find the id of each receiver.\n" " find the id of each receiver.\n" +
" -principal: If the daemon is Kerberized, specify the service\n" +
" principal name."
); );
} }
@ -166,6 +172,14 @@ public int run(String argv[]) throws Exception {
System.err.println("You must specify an operation."); System.err.println("You must specify an operation.");
return 1; return 1;
} }
String servicePrincipal = StringUtils.popOptionWithArgument("-principal",
args);
if (servicePrincipal != null) {
LOG.debug("Set service principal: {}", servicePrincipal);
getConf().set(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
servicePrincipal);
}
RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class, RPC.setProtocolEngine(getConf(), TraceAdminProtocolPB.class,
ProtobufRpcEngine.class); ProtobufRpcEngine.class);
InetSocketAddress address = NetUtils.createSocketAddr(hostPort); InetSocketAddress address = NetUtils.createSocketAddr(hostPort);

View File

@ -84,6 +84,15 @@ You can specify the configuration associated with span receiver by `-Ckey=value`
ID CLASS ID CLASS
2 org.apache.htrace.core.LocalFileSpanReceiver 2 org.apache.htrace.core.LocalFileSpanReceiver
If the cluster is Kerberized, the service principal name must be specified using `-principal` option.
For example, to show list of span receivers of a namenode:
$ hadoop trace -list -host NN1:8020 -principal namenode/NN1@EXAMPLE.COM
Or, for a datanode:
$ hadoop trace -list -host DN2:9867 -principal datanode/DN1@EXAMPLE.COM
### Starting tracing spans by HTrace API ### Starting tracing spans by HTrace API
In order to trace, you will need to wrap the traced logic with **tracing span** as shown below. In order to trace, you will need to wrap the traced logic with **tracing span** as shown below.

View File

@ -19,8 +19,12 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.net.unix.TemporarySocketDirectory; import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.htrace.core.Tracer; import org.apache.htrace.core.Tracer;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -28,9 +32,18 @@
import java.io.ByteArrayOutputStream; import java.io.ByteArrayOutputStream;
import java.io.File; import java.io.File;
import java.io.PrintStream; import java.io.PrintStream;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
public class TestTraceAdmin { import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertEquals;
/**
* Test cases for TraceAdmin.
*/
public class TestTraceAdmin extends SaslDataTransferTestCase {
private static final String NEWLINE = System.getProperty("line.separator"); private static final String NEWLINE = System.getProperty("line.separator");
private final static int ONE_DATANODE = 1;
private String runTraceCommand(TraceAdmin trace, String... cmd) private String runTraceCommand(TraceAdmin trace, String... cmd)
throws Exception { throws Exception {
@ -58,6 +71,12 @@ private String getHostPortForNN(MiniDFSCluster cluster) {
return "127.0.0.1:" + cluster.getNameNodePort(); return "127.0.0.1:" + cluster.getNameNodePort();
} }
private String getHostPortForDN(MiniDFSCluster cluster, int index) {
ArrayList<DataNode> dns = cluster.getDataNodes();
assertTrue(index >= 0 && index < dns.size());
return "127.0.0.1:" + dns.get(index).getIpcPort();
}
@Test @Test
public void testCreateAndDestroySpanReceiver() throws Exception { public void testCreateAndDestroySpanReceiver() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
@ -102,4 +121,52 @@ public void testCreateAndDestroySpanReceiver() throws Exception {
tempDir.close(); tempDir.close();
} }
} }
/**
* Test running hadoop trace commands with -principal option against
* Kerberized NN and DN.
*
* @throws Exception
*/
@Test
public void testKerberizedTraceAdmin() throws Exception {
MiniDFSCluster cluster = null;
final HdfsConfiguration conf = createSecureConfig(
"authentication,privacy");
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(ONE_DATANODE)
.build();
cluster.waitActive();
final String nnHost = getHostPortForNN(cluster);
final String dnHost = getHostPortForDN(cluster, 0);
// login using keytab and run commands
UserGroupInformation
.loginUserFromKeytabAndReturnUGI(getHdfsPrincipal(), getHdfsKeytab())
.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// send trace command to NN
TraceAdmin trace = new TraceAdmin();
trace.setConf(conf);
final String[] nnTraceCmd = new String[] {
"-list", "-host", nnHost, "-principal",
conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)};
int ret = trace.run(nnTraceCmd);
assertEquals(0, ret);
// send trace command to DN
final String[] dnTraceCmd = new String[] {
"-list", "-host", dnHost, "-principal",
conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)};
ret = trace.run(dnTraceCmd);
assertEquals(0, ret);
return null;
}
});
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }