From c6349c3204679bade32a6365f816eaf8bcb85458 Mon Sep 17 00:00:00 2001 From: Brahma Reddy Battula Date: Wed, 7 Feb 2018 23:10:33 +0530 Subject: [PATCH] HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang. (cherry picked from commit 01bd6ab18fa48f4c7cac1497905b52e547962599) --- .../java/org/apache/hadoop/hdfs/HAUtil.java | 9 +- .../hdfs/server/namenode/FSNamesystem.java | 2 +- .../apache/hadoop/hdfs/tools/DFSAdmin.java | 192 +++++--- .../hadoop/hdfs/tools/TestDFSAdminWithHA.java | 464 +++++++++++++++++- 4 files changed, 602 insertions(+), 65 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 355608647c9..1d294beb504 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; +import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.StandbyException; @@ -325,6 +326,7 @@ public class HAUtil { */ public static boolean isAtLeastOneActive(List namenodes) throws IOException { + List exceptions = new ArrayList<>(); for (ClientProtocol namenode : namenodes) { try { namenode.getFileInfo("/"); @@ -334,10 +336,15 @@ public class HAUtil { if (cause instanceof StandbyException) { // This is expected to happen for a standby NN. } else { - throw re; + exceptions.add(re); } + } catch (IOException ioe) { + exceptions.add(ioe); } } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); + } return false; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ae57599041c..6f5bbc5e6b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4391,7 +4391,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, } void setBalancerBandwidth(long bandwidth) throws IOException { - checkOperation(OperationCategory.UNCHECKED); + checkOperation(OperationCategory.WRITE); checkSuperuserPrivilege(); getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index a3e1bc133f9..a3a140d5518 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -50,7 +50,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.FsStatus; -import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.shell.Command; @@ -86,6 +85,7 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; +import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; @@ -811,16 +811,26 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap); - if (saved) { - System.out.println("Save namespace successful for " + + try{ + boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap); + if (saved) { + System.out.println("Save namespace successful for " + + proxy.getAddress()); + } else { + System.out.println("No extra checkpoint has been made for " + + proxy.getAddress()); + } + }catch (IOException ioe){ + System.out.println("Save namespace failed for " + proxy.getAddress()); - } else { - System.out.println("No extra checkpoint has been made for " - + proxy.getAddress()); + exceptions.add(ioe); } } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); + } } else { boolean saved = dfs.saveNamespace(timeWindow, txGap); if (saved) { @@ -863,10 +873,20 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - Boolean res = proxy.getProxy().restoreFailedStorage(arg); - System.out.println("restoreFailedStorage is set to " + res + " for " - + proxy.getAddress()); + try{ + Boolean res = proxy.getProxy().restoreFailedStorage(arg); + System.out.println("restoreFailedStorage is set to " + res + " for " + + proxy.getAddress()); + } catch (IOException ioe){ + System.out.println("restoreFailedStorage failed for " + + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); } } else { Boolean res = dfs.restoreFailedStorage(arg); @@ -896,10 +916,20 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy: proxies) { - proxy.getProxy().refreshNodes(); - System.out.println("Refresh nodes successful for " + - proxy.getAddress()); + try{ + proxy.getProxy().refreshNodes(); + System.out.println("Refresh nodes successful for " + + proxy.getAddress()); + }catch (IOException ioe){ + System.out.println("Refresh nodes failed for " + + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); } } else { dfs.refreshNodes(); @@ -944,21 +974,14 @@ public class DFSAdmin extends FsShell { EnumSet openFilesTypes = EnumSet.copyOf(types); DistributedFileSystem dfs = getDFS(); - Configuration dfsConf = dfs.getConf(); - URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); - RemoteIterator openFilesRemoteIterator; - if (isHaEnabled) { - ProxyAndInfo proxy = NameNodeProxies.createNonHAProxy( - dfsConf, HAUtil.getAddressOfActive(getDFS()), ClientProtocol.class, - UserGroupInformation.getCurrentUser(), false); - openFilesRemoteIterator = new OpenFilesIterator(proxy.getProxy(), - FsTracer.get(dfsConf), openFilesTypes, path); - } else { + try{ openFilesRemoteIterator = dfs.listOpenFiles(openFilesTypes, path); + printOpenFiles(openFilesRemoteIterator); + } catch (IOException ioe){ + System.out.println("List open files failed."); + throw ioe; } - printOpenFiles(openFilesRemoteIterator); return 0; } @@ -976,8 +999,7 @@ public class DFSAdmin extends FsShell { } /** - * Command to ask the namenode to set the balancer bandwidth for all of the - * datanodes. + * Command to ask the active namenode to set the balancer bandwidth. * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. @@ -1008,23 +1030,12 @@ public class DFSAdmin extends FsShell { } DistributedFileSystem dfs = (DistributedFileSystem) fs; - Configuration dfsConf = dfs.getConf(); - URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); - - if (isHaEnabled) { - String nsId = dfsUri.getHost(); - List> proxies = - HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, - nsId, ClientProtocol.class); - for (ProxyAndInfo proxy : proxies) { - proxy.getProxy().setBalancerBandwidth(bandwidth); - System.out.println("Balancer bandwidth is set to " + bandwidth + - " for " + proxy.getAddress()); - } - } else { + try{ dfs.setBalancerBandwidth(bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth); + } catch (IOException ioe){ + System.err.println("Balancer bandwidth is set failed."); + throw ioe; } exitCode = 0; @@ -1382,10 +1393,20 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - proxy.getProxy().finalizeUpgrade(); - System.out.println("Finalize upgrade successful for " + - proxy.getAddress()); + try{ + proxy.getProxy().finalizeUpgrade(); + System.out.println("Finalize upgrade successful for " + + proxy.getAddress()); + }catch (IOException ioe){ + System.out.println("Finalize upgrade failed for " + + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); } } else { dfs.finalizeUpgrade(); @@ -1415,10 +1436,21 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - proxy.getProxy().metaSave(pathname); - System.out.println("Created metasave file " + pathname + " in the log " - + "directory of namenode " + proxy.getAddress()); + try{ + proxy.getProxy().metaSave(pathname); + System.out.println("Created metasave file " + pathname + + " in the log directory of namenode " + proxy.getAddress()); + } catch (IOException ioe){ + System.out.println("Created metasave file " + pathname + + " in the log directory of namenode " + proxy.getAddress() + + " failed"); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); } } else { dfs.metaSave(pathname); @@ -1503,10 +1535,20 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshAuthorizationPolicyProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - proxy.getProxy().refreshServiceAcl(); - System.out.println("Refresh service acl successful for " - + proxy.getAddress()); + try{ + proxy.getProxy().refreshServiceAcl(); + System.out.println("Refresh service acl successful for " + + proxy.getAddress()); + }catch (IOException ioe){ + System.out.println("Refresh service acl failed for " + + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()) { + throw MultipleIOException.createIOException(exceptions); } } else { // Create the client @@ -1546,10 +1588,20 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - proxy.getProxy().refreshUserToGroupsMappings(); - System.out.println("Refresh user to groups mapping successful for " - + proxy.getAddress()); + try{ + proxy.getProxy().refreshUserToGroupsMappings(); + System.out.println("Refresh user to groups mapping successful for " + + proxy.getAddress()); + }catch (IOException ioe){ + System.out.println("Refresh user to groups mapping failed for " + + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); } } else { // Create the client @@ -1591,10 +1643,20 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshUserMappingsProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - proxy.getProxy().refreshSuperUserGroupsConfiguration(); - System.out.println("Refresh super user groups configuration " + - "successful for " + proxy.getAddress()); + try{ + proxy.getProxy().refreshSuperUserGroupsConfiguration(); + System.out.println("Refresh super user groups configuration " + + "successful for " + proxy.getAddress()); + }catch (IOException ioe){ + System.out.println("Refresh super user groups configuration " + + "failed for " + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); } } else { // Create the client @@ -1630,10 +1692,20 @@ public class DFSAdmin extends FsShell { List> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, RefreshCallQueueProtocol.class); + List exceptions = new ArrayList<>(); for (ProxyAndInfo proxy : proxies) { - proxy.getProxy().refreshCallQueue(); - System.out.println("Refresh call queue successful for " - + proxy.getAddress()); + try{ + proxy.getProxy().refreshCallQueue(); + System.out.println("Refresh call queue successful for " + + proxy.getAddress()); + }catch (IOException ioe){ + System.out.println("Refresh call queue failed for " + + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); } } else { // Create the client diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java index 74f5e7a44a3..97daf0926fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java @@ -33,6 +33,7 @@ import org.junit.After; import org.junit.Test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -50,7 +51,7 @@ public class TestDFSAdminWithHA { private static String newLine = System.getProperty("line.separator"); private void assertOutputMatches(String string) { - String errOutput = new String(out.toByteArray(), Charsets.UTF_8); + String errOutput = new String(err.toByteArray(), Charsets.UTF_8); String output = new String(out.toByteArray(), Charsets.UTF_8); if (!errOutput.matches(string) && !output.matches(string)) { @@ -155,6 +156,60 @@ public class TestDFSAdminWithHA { assertOutputMatches(message + newLine + message + newLine); } + @Test (timeout = 30000) + public void testSaveNamespaceNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + // Safe mode should be turned ON in order to create namespace image. + int exitCode = admin.run(new String[] {"-safemode", "enter"}); + assertEquals(err.toString().trim(), 0, exitCode); + String message = "Safe mode is ON in.*"; + assertOutputMatches(message + newLine + message + newLine); + + cluster.getDfsCluster().shutdownNameNode(1); +// + exitCode = admin.run(new String[] {"-saveNamespace"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "Save namespace successful for.*" + newLine + + "Save namespace failed for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testSaveNamespaceNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + // Safe mode should be turned ON in order to create namespace image. + int exitCode = admin.run(new String[] {"-safemode", "enter"}); + assertEquals(err.toString().trim(), 0, exitCode); + String message = "Safe mode is ON in.*"; + assertOutputMatches(message + newLine + message + newLine); + + cluster.getDfsCluster().shutdownNameNode(0); + + exitCode = admin.run(new String[] {"-saveNamespace"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "Save namespace failed for.*" + newLine + + "Save namespace successful for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testSaveNamespaceNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + // Safe mode should be turned ON in order to create namespace image. + int exitCode = admin.run(new String[] {"-safemode", "enter"}); + assertEquals(err.toString().trim(), 0, exitCode); + String message = "Safe mode is ON in.*"; + assertOutputMatches(message + newLine + message + newLine); + + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + + exitCode = admin.run(new String[] {"-saveNamespace"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "Save namespace failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + @Test (timeout = 30000) public void testRestoreFailedStorage() throws Exception { setUpHaCluster(false); @@ -175,6 +230,76 @@ public class TestDFSAdminWithHA { assertOutputMatches(message + newLine + message + newLine); } + @Test (timeout = 30000) + public void testRestoreFailedStorageNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "restoreFailedStorage is set to false for.*" + newLine + + "restoreFailedStorage failed for.*" + newLine; + // Default is false + assertOutputMatches(message); + + exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "restoreFailedStorage is set to true for.*" + newLine + + "restoreFailedStorage failed for.*" + newLine; + assertOutputMatches(message); + + exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "restoreFailedStorage is set to false for.*" + newLine + + "restoreFailedStorage failed for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRestoreFailedStorageNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "restoreFailedStorage failed for.*" + newLine + + "restoreFailedStorage is set to false for.*" + newLine; + // Default is false + assertOutputMatches(message); + + exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "restoreFailedStorage failed for.*" + newLine + + "restoreFailedStorage is set to true for.*" + newLine; + assertOutputMatches(message); + + exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "restoreFailedStorage failed for.*" + newLine + + "restoreFailedStorage is set to false for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRestoreFailedStorageNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "restoreFailedStorage failed for.*"; + // Default is false + assertOutputMatches(message + newLine + message + newLine); + + exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "restoreFailedStorage failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + + exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + message = "restoreFailedStorage failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + @Test (timeout = 30000) public void testRefreshNodes() throws Exception { setUpHaCluster(false); @@ -184,13 +309,82 @@ public class TestDFSAdminWithHA { assertOutputMatches(message + newLine + message + newLine); } + @Test (timeout = 30000) + public void testRefreshNodesNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshNodes"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh nodes successful for.*" + newLine + + "Refresh nodes failed for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshNodesNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + int exitCode = admin.run(new String[] {"-refreshNodes"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh nodes failed for.*" + newLine + + "Refresh nodes successful for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshNodesNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshNodes"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh nodes failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + @Test (timeout = 30000) public void testSetBalancerBandwidth() throws Exception { setUpHaCluster(false); + cluster.getDfsCluster().transitionToActive(0); + int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"}); assertEquals(err.toString().trim(), 0, exitCode); - String message = "Balancer bandwidth is set to 10 for.*"; - assertOutputMatches(message + newLine + message + newLine); + String message = "Balancer bandwidth is set to 10"; + assertOutputMatches(message + newLine); + } + + @Test (timeout = 30000) + public void testSetBalancerBandwidthNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + cluster.getDfsCluster().transitionToActive(0); + int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"}); + assertEquals(err.toString().trim(), 0, exitCode); + String message = "Balancer bandwidth is set to 10"; + assertOutputMatches(message + newLine); + } + + @Test (timeout = 30000) + public void testSetBalancerBandwidthNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().transitionToActive(1); + int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"}); + assertEquals(err.toString().trim(), 0, exitCode); + String message = "Balancer bandwidth is set to 10"; + assertOutputMatches(message + newLine); + } + + @Test + public void testSetBalancerBandwidthNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Balancer bandwidth is set failed." + newLine + + ".*" + newLine; + assertOutputMatches(message); } @Test (timeout = 30000) @@ -210,6 +404,44 @@ public class TestDFSAdminWithHA { assertOutputMatches(message + newLine + message + newLine); } + @Test (timeout = 30000) + public void testMetaSaveNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Created metasave file dfs.meta in the log directory" + + " of namenode.*" + newLine + + "Created metasave file dfs.meta in the log directory" + + " of namenode.*failed" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testMetaSaveNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Created metasave file dfs.meta in the log directory" + + " of namenode.*failed" + newLine + + "Created metasave file dfs.meta in the log directory" + + " of namenode.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testMetaSaveNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Created metasave file dfs.meta in the log directory" + + " of namenode.*failed"; + assertOutputMatches(message + newLine + message + newLine); + } + @Test (timeout = 30000) public void testRefreshServiceAcl() throws Exception { setUpHaCluster(true); @@ -219,6 +451,40 @@ public class TestDFSAdminWithHA { assertOutputMatches(message + newLine + message + newLine); } + @Test (timeout = 30000) + public void testRefreshServiceAclNN1UpNN2Down() throws Exception { + setUpHaCluster(true); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshServiceAcl"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh service acl successful for.*" + newLine + + "Refresh service acl failed for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshServiceAclNN1DownNN2Up() throws Exception { + setUpHaCluster(true); + cluster.getDfsCluster().shutdownNameNode(0); + int exitCode = admin.run(new String[] {"-refreshServiceAcl"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh service acl failed for.*" + newLine + + "Refresh service acl successful for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshServiceAclNN1DownNN2Down() throws Exception { + setUpHaCluster(true); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshServiceAcl"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh service acl failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + + @Test (timeout = 30000) public void testRefreshUserToGroupsMappings() throws Exception { setUpHaCluster(false); @@ -228,6 +494,43 @@ public class TestDFSAdminWithHA { assertOutputMatches(message + newLine + message + newLine); } + @Test (timeout = 30000) + public void testRefreshUserToGroupsMappingsNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh user to groups mapping successful for.*" + + newLine + + "Refresh user to groups mapping failed for.*" + + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshUserToGroupsMappingsNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh user to groups mapping failed for.*" + + newLine + + "Refresh user to groups mapping successful for.*" + + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshUserToGroupsMappingsNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh user to groups mapping failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + @Test (timeout = 30000) public void testRefreshSuperUserGroupsConfiguration() throws Exception { setUpHaCluster(false); @@ -238,6 +541,49 @@ public class TestDFSAdminWithHA { assertOutputMatches(message + newLine + message + newLine); } + @Test (timeout = 30000) + public void testRefreshSuperUserGroupsConfigurationNN1UpNN2Down() + throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run( + new String[] {"-refreshSuperUserGroupsConfiguration"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh super user groups configuration successful for.*" + + newLine + + "Refresh super user groups configuration failed for.*" + + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Up() + throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + int exitCode = admin.run( + new String[] {"-refreshSuperUserGroupsConfiguration"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh super user groups configuration failed for.*" + + newLine + + "Refresh super user groups configuration successful for.*" + + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Down() + throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run( + new String[] {"-refreshSuperUserGroupsConfiguration"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh super user groups configuration failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + @Test (timeout = 30000) public void testRefreshCallQueue() throws Exception { setUpHaCluster(false); @@ -246,4 +592,116 @@ public class TestDFSAdminWithHA { String message = "Refresh call queue successful for.*"; assertOutputMatches(message + newLine + message + newLine); } + + @Test (timeout = 30000) + public void testRefreshCallQueueNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshCallQueue"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh call queue successful for.*" + newLine + + "Refresh call queue failed for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshCallQueueNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + int exitCode = admin.run(new String[] {"-refreshCallQueue"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh call queue failed for.*" + newLine + + "Refresh call queue successful for.*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testRefreshCallQueueNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-refreshCallQueue"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Refresh call queue failed for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + + @Test (timeout = 30000) + public void testFinalizeUpgrade() throws Exception { + setUpHaCluster(false); + int exitCode = admin.run(new String[] {"-finalizeUpgrade"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = ".*Cannot finalize with no NameNode active"; + assertOutputMatches(message + newLine); + + cluster.getDfsCluster().transitionToActive(0); + exitCode = admin.run(new String[] {"-finalizeUpgrade"}); + assertEquals(err.toString().trim(), 0, exitCode); + message = "Finalize upgrade successful for.*"; + assertOutputMatches(message + newLine + message + newLine); + } + + @Test (timeout = 30000) + public void testFinalizeUpgradeNN1UpNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + cluster.getDfsCluster().transitionToActive(0); + int exitCode = admin.run(new String[] {"-finalizeUpgrade"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Finalize upgrade successful for .*" + newLine + + "Finalize upgrade failed for .*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testFinalizeUpgradeNN1DownNN2Up() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().transitionToActive(1); + int exitCode = admin.run(new String[] {"-finalizeUpgrade"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = "Finalize upgrade failed for .*" + newLine + + "Finalize upgrade successful for .*" + newLine; + assertOutputMatches(message); + } + + @Test (timeout = 30000) + public void testFinalizeUpgradeNN1DownNN2Down() throws Exception { + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-finalizeUpgrade"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = ".*2 exceptions.*"; + assertOutputMatches(message + newLine); + } + + @Test (timeout = 30000) + public void testListOpenFilesNN1UpNN2Down() throws Exception{ + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(1); + cluster.getDfsCluster().transitionToActive(0); + int exitCode = admin.run(new String[] {"-listOpenFiles"}); + assertEquals(err.toString().trim(), 0, exitCode); + } + + @Test (timeout = 30000) + public void testListOpenFilesNN1DownNN2Up() throws Exception{ + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().transitionToActive(1); + int exitCode = admin.run(new String[] {"-listOpenFiles"}); + assertEquals(err.toString().trim(), 0, exitCode); + } + + @Test + public void testListOpenFilesNN1DownNN2Down() throws Exception{ + setUpHaCluster(false); + cluster.getDfsCluster().shutdownNameNode(0); + cluster.getDfsCluster().shutdownNameNode(1); + int exitCode = admin.run(new String[] {"-listOpenFiles"}); + assertNotEquals(err.toString().trim(), 0, exitCode); + String message = ".*" + newLine + "List open files failed." + newLine; + assertOutputMatches(message); + } }