diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f5c0b60c86e..d3ec859e59e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -211,6 +211,9 @@ Release 2.0.0 - UNRELEASED HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts. (Hari Mankude via eli) + HDFS-3084. FenceMethod.tryFence() and ShellCommandFencer should pass + namenodeId as well as host:port (todd) + OPTIMIZATIONS HDFS-2477. Optimize computing the diff between a block report and the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 91031ab1347..4a2a5d5eaf4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1033,13 +1033,7 @@ public class DFSUtil { String nsId, String nnId) { if (nsId == null) { - Collection nsIds = getNameServiceIds(conf); - if (1 == nsIds.size()) { - nsId = nsIds.toArray(new String[1])[0]; - } else { - // No nameservice ID was given and more than one is configured - return null; - } + nsId = getOnlyNameServiceIdOrNull(conf); } String serviceAddrKey = concatSuffixes( @@ -1054,4 +1048,18 @@ public class DFSUtil { } return serviceRpcAddr; } + + /** + * If the configuration refers to only a single nameservice, return the + * name of that nameservice. If it refers to 0 or more than 1, return null. + */ + public static String getOnlyNameServiceIdOrNull(Configuration conf) { + Collection nsIds = getNameServiceIds(conf); + if (1 == nsIds.size()) { + return nsIds.toArray(new String[1])[0]; + } else { + // No nameservice ID was given and more than one is configured + return null; + } + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java index 9e8c239e7e8..c0b972bda3b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.tools; import java.net.InetSocketAddress; +import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ha.BadFencingConfigurationException; @@ -28,6 +29,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetUtils; +import com.google.common.base.Preconditions; + /** * One of the NN NameNodes acting as the target of an administrative command * (e.g. failover). @@ -35,14 +38,36 @@ import org.apache.hadoop.net.NetUtils; @InterfaceAudience.Private public class NNHAServiceTarget extends HAServiceTarget { + // Keys added to the fencing script environment + private static final String NAMESERVICE_ID_KEY = "nameserviceid"; + private static final String NAMENODE_ID_KEY = "namenodeid"; + private final InetSocketAddress addr; private NodeFencer fencer; private BadFencingConfigurationException fenceConfigError; + private final String nnId; + private final String nsId; public NNHAServiceTarget(HdfsConfiguration conf, String nsId, String nnId) { + Preconditions.checkNotNull(nnId); + + if (nsId == null) { + nsId = DFSUtil.getOnlyNameServiceIdOrNull(conf); + if (nsId == null) { + throw new IllegalArgumentException( + "Unable to determine the nameservice id."); + } + } + assert nsId != null; + + // Make a copy of the conf, and override configs based on the + // target node -- not the node we happen to be running on. + HdfsConfiguration targetConf = new HdfsConfiguration(conf); + NameNode.initializeGenericKeys(targetConf, nsId, nnId); + String serviceAddr = - DFSUtil.getNamenodeServiceAddr(conf, nsId, nnId); + DFSUtil.getNamenodeServiceAddr(targetConf, nsId, nnId); if (serviceAddr == null) { throw new IllegalArgumentException( "Unable to determine service address for namenode '" + nnId + "'"); @@ -50,10 +75,12 @@ public class NNHAServiceTarget extends HAServiceTarget { this.addr = NetUtils.createSocketAddr(serviceAddr, NameNode.DEFAULT_PORT); try { - this.fencer = NodeFencer.create(conf); + this.fencer = NodeFencer.create(targetConf); } catch (BadFencingConfigurationException e) { this.fenceConfigError = e; } + this.nnId = nnId; + this.nsId = nsId; } /** @@ -81,4 +108,19 @@ public class NNHAServiceTarget extends HAServiceTarget { return "NameNode at " + addr; } + public String getNameServiceId() { + return this.nsId; + } + + public String getNameNodeId() { + return this.nnId; + } + + @Override + protected void addFencingParameters(Map ret) { + super.addFencingParameters(ret); + + ret.put(NAMESERVICE_ID_KEY, getNameServiceId()); + ret.put(NAMENODE_ID_KEY, getNameNodeId()); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index ef8f8503958..17949f68e73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -179,6 +179,17 @@ public class TestDFSUtil { assertEquals("nn1", it.next().toString()); assertEquals("nn2", it.next().toString()); } + + @Test + public void testGetOnlyNameServiceIdOrNull() { + HdfsConfiguration conf = new HdfsConfiguration(); + conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2"); + assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); + conf.set(DFS_FEDERATION_NAMESERVICES, ""); + assertNull(DFSUtil.getOnlyNameServiceIdOrNull(conf)); + conf.set(DFS_FEDERATION_NAMESERVICES, "ns1"); + assertEquals("ns1", DFSUtil.getOnlyNameServiceIdOrNull(conf)); + } /** * Test for {@link DFSUtil#getNNServiceRpcAddresses(Configuration)} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index 72b1a782d8f..9189e4b0e64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.tools; import static org.junit.Assert.*; import java.io.ByteArrayOutputStream; +import java.io.File; import java.io.IOException; import java.io.PrintStream; @@ -41,6 +42,7 @@ import org.junit.Test; import com.google.common.base.Charsets; import com.google.common.base.Joiner; +import com.google.common.io.Files; /** * Tests for HAAdmin command with {@link MiniDFSCluster} set up in HA mode. @@ -59,6 +61,8 @@ public class TestDFSHAAdminMiniCluster { private String errOutput; + private int nn1Port; + @Before public void setup() throws IOException { conf = new Configuration(); @@ -69,6 +73,8 @@ public class TestDFSHAAdminMiniCluster { tool.setConf(conf); tool.setErrOut(new PrintStream(errOutBytes)); cluster.waitActive(); + + nn1Port = cluster.getNameNodePort(0); } @After @@ -124,9 +130,17 @@ public class TestDFSHAAdminMiniCluster { public void testFencer() throws Exception { // Test failover with no fencer assertEquals(-1, runTool("-failover", "nn1", "nn2")); - + + // Set up fencer to write info about the fencing target into a + // tmp file, so we can verify that the args were substituted right + File tmpFile = File.createTempFile("testFencer", ".txt"); + tmpFile.deleteOnExit(); + conf.set(NodeFencer.CONF_METHODS_KEY, + "shell(echo -n $target_nameserviceid.$target_namenodeid " + + "$target_port $dfs_ha_namenode_id > " + + tmpFile.getAbsolutePath() + ")"); + // Test failover with fencer - conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)"); tool.setConf(conf); assertEquals(0, runTool("-transitionToActive", "nn1")); assertEquals(0, runTool("-failover", "nn1", "nn2")); @@ -134,21 +148,36 @@ public class TestDFSHAAdminMiniCluster { // Test failover with fencer and nameservice assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1")); + // Fencer has not run yet, since none of the above required fencing + assertEquals("", Files.toString(tmpFile, Charsets.UTF_8)); + // Test failover with fencer and forcefence option assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); - + + // The fence script should run with the configuration from the target + // node, rather than the configuration from the fencing node + assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", + Files.toString(tmpFile, Charsets.UTF_8)); + tmpFile.delete(); + // Test failover with forceactive option assertEquals(0, runTool("-failover", "nn2", "nn1", "--forceactive")); + + // Fencing should not occur, since it was graceful + assertFalse(tmpFile.exists()); + // Test failover with not fencer and forcefence option conf.unset(NodeFencer.CONF_METHODS_KEY); tool.setConf(conf); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); - + assertFalse(tmpFile.exists()); + // Test failover with bad fencer and forcefence option conf.set(NodeFencer.CONF_METHODS_KEY, "foobar!"); tool.setConf(conf); assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence")); + assertFalse(tmpFile.exists()); // Test failover with force fence listed before the other arguments conf.set(NodeFencer.CONF_METHODS_KEY, "shell(true)");