From 574f99bd6b596c39bd1accc7a134de3f5ad96bd2 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Mon, 16 Apr 2012 21:51:52 +0000 Subject: [PATCH] HDFS-3284. bootstrapStandby fails in secure cluster. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1326813 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../server/namenode/ha/BootstrapStandby.java | 7 ++-- .../apache/hadoop/hdfs/tools/DFSHAAdmin.java | 33 ++++++++++++------- 3 files changed, 28 insertions(+), 14 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 67152967d36..1eb6d2c80d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -522,6 +522,8 @@ Release 2.0.0 - UNRELEASED HDFS-3268. FileContext API mishandles token service and incompatible with HA (Daryn Sharp via todd) + HDFS-3284. bootstrapStandby fails in secure cluster (todd) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 481dde3cd2d..1777ca6f6a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.tools.DFSHAAdmin; import org.apache.hadoop.hdfs.tools.NNHAServiceTarget; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; @@ -144,8 +145,8 @@ public class BootstrapStandby implements Tool, Configurable { private HAServiceProtocol createHAProtocolProxy() throws IOException { - return new NNHAServiceTarget(new HdfsConfiguration(conf), - nsId, otherNNId).getProxy(conf, 15000); + return new NNHAServiceTarget(new HdfsConfiguration(conf), nsId, otherNNId) + .getProxy(conf, 15000); } private int doRun() throws IOException { @@ -334,7 +335,7 @@ public class BootstrapStandby implements Tool, Configurable { @Override public void setConf(Configuration conf) { - this.conf = conf; + this.conf = DFSHAAdmin.addSecurityConfiguration(conf); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index 4db5a86b93d..d4397276ea1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -46,21 +46,32 @@ public class DFSHAAdmin extends HAAdmin { @Override public void setConf(Configuration conf) { if (conf != null) { - // Make a copy so we don't mutate it. Also use an HdfsConfiguration to - // force loading of hdfs-site.xml. - conf = new HdfsConfiguration(conf); - String nameNodePrincipal = conf.get( - DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""); - if (LOG.isDebugEnabled()) { - LOG.debug("Using NN principal: " + nameNodePrincipal); - } - - conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, - nameNodePrincipal); + conf = addSecurityConfiguration(conf); } super.setConf(conf); } + /** + * Add the requisite security principal settings to the given Configuration, + * returning a copy. + * @param conf the original config + * @return a copy with the security settings added + */ + public static Configuration addSecurityConfiguration(Configuration conf) { + // Make a copy so we don't mutate it. Also use an HdfsConfiguration to + // force loading of hdfs-site.xml. + conf = new HdfsConfiguration(conf); + String nameNodePrincipal = conf.get( + DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, ""); + if (LOG.isDebugEnabled()) { + LOG.debug("Using NN principal: " + nameNodePrincipal); + } + + conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, + nameNodePrincipal); + return conf; + } + /** * Try to map the given namenode ID to its service address. */