HDFS-12935. Get ambiguous result for DFSAdmin command in HA mode when only one namenode is up. Contributed by Jianfei Jiang.

This commit is contained in:
Brahma Reddy Battula 2018-02-08 18:33:11 +05:30
parent b7f7fb003e
commit 9873eb63a7
4 changed files with 601 additions and 66 deletions

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.ipc.StandbyException;
@ -320,6 +321,7 @@ public class HAUtil {
*/ */
public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes) public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
throws IOException { throws IOException {
List<IOException> exceptions = new ArrayList<>();
for (ClientProtocol namenode : namenodes) { for (ClientProtocol namenode : namenodes) {
try { try {
namenode.getFileInfo("/"); namenode.getFileInfo("/");
@ -329,9 +331,14 @@ public class HAUtil {
if (cause instanceof StandbyException) { if (cause instanceof StandbyException) {
// This is expected to happen for a standby NN. // This is expected to happen for a standby NN.
} else { } else {
throw re; exceptions.add(re);
}
} catch (IOException ioe) {
exceptions.add(ioe);
} }
} }
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
return false; return false;
} }

View File

@ -4265,7 +4265,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
} }
void setBalancerBandwidth(long bandwidth) throws IOException { void setBalancerBandwidth(long bandwidth) throws IOException {
checkOperation(OperationCategory.UNCHECKED); checkOperation(OperationCategory.WRITE);
checkSuperuserPrivilege(); checkSuperuserPrivilege();
getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth); getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
} }

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.hdfs.tools; package org.apache.hadoop.hdfs.tools;
import java.io.File; import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -49,7 +48,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.Command;
@ -76,12 +74,12 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol; import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RefreshCallQueueProtocol; import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
@ -768,15 +766,25 @@ public class DFSAdmin extends FsShell {
Configuration dfsConf = dfs.getConf(); Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri(); URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
List<IOException> exceptions = new ArrayList<>();
if (isHaEnabled) { if (isHaEnabled) {
String nsId = dfsUri.getHost(); String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies = List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class); nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) { for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
try{
proxy.getProxy().saveNamespace(); proxy.getProxy().saveNamespace();
System.out.println("Save namespace successful for " + System.out.println("Save namespace successful for " +
proxy.getAddress()); proxy.getAddress());
}catch (IOException ioe){
System.out.println("Save namespace failed for " +
proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
dfs.saveNamespace(); dfs.saveNamespace();
@ -818,10 +826,20 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<ClientProtocol>> proxies = List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class); nsId, ClientProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<ClientProtocol> proxy : proxies) { for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
try{
Boolean res = proxy.getProxy().restoreFailedStorage(arg); Boolean res = proxy.getProxy().restoreFailedStorage(arg);
System.out.println("restoreFailedStorage is set to " + res + " for " System.out.println("restoreFailedStorage is set to " + res + " for "
+ proxy.getAddress()); + proxy.getAddress());
} catch (IOException ioe){
System.out.println("restoreFailedStorage failed for "
+ proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
Boolean res = dfs.restoreFailedStorage(arg); Boolean res = dfs.restoreFailedStorage(arg);
@ -851,10 +869,20 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<ClientProtocol>> proxies = List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class); nsId, ClientProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<ClientProtocol> proxy: proxies) { for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
try{
proxy.getProxy().refreshNodes(); proxy.getProxy().refreshNodes();
System.out.println("Refresh nodes successful for " + System.out.println("Refresh nodes successful for " +
proxy.getAddress()); proxy.getAddress());
}catch (IOException ioe){
System.out.println("Refresh nodes failed for " +
proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
dfs.refreshNodes(); dfs.refreshNodes();
@ -873,21 +901,15 @@ public class DFSAdmin extends FsShell {
*/ */
public int listOpenFiles() throws IOException { public int listOpenFiles() throws IOException {
DistributedFileSystem dfs = getDFS(); DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
RemoteIterator<OpenFileEntry> openFilesRemoteIterator; RemoteIterator<OpenFileEntry> openFilesRemoteIterator;
if (isHaEnabled) {
ProxyAndInfo<ClientProtocol> proxy = NameNodeProxies.createNonHAProxy( try{
dfsConf, HAUtil.getAddressOfActive(getDFS()), ClientProtocol.class,
UserGroupInformation.getCurrentUser(), false);
openFilesRemoteIterator = new OpenFilesIterator(proxy.getProxy(),
FsTracer.get(dfsConf));
} else {
openFilesRemoteIterator = dfs.listOpenFiles(); openFilesRemoteIterator = dfs.listOpenFiles();
}
printOpenFiles(openFilesRemoteIterator); printOpenFiles(openFilesRemoteIterator);
} catch (IOException ioe){
System.out.println("List open files failed.");
throw ioe;
}
return 0; return 0;
} }
@ -905,8 +927,7 @@ public class DFSAdmin extends FsShell {
} }
/** /**
* Command to ask the namenode to set the balancer bandwidth for all of the * Command to ask the active namenode to set the balancer bandwidth.
* datanodes.
* Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
* @param argv List of of command line parameters. * @param argv List of of command line parameters.
* @param idx The index of the command that is being processed. * @param idx The index of the command that is being processed.
@ -937,27 +958,15 @@ public class DFSAdmin extends FsShell {
} }
DistributedFileSystem dfs = (DistributedFileSystem) fs; DistributedFileSystem dfs = (DistributedFileSystem) fs;
Configuration dfsConf = dfs.getConf(); try{
URI dfsUri = dfs.getUri();
boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaEnabled) {
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
proxy.getProxy().setBalancerBandwidth(bandwidth);
System.out.println("Balancer bandwidth is set to " + bandwidth +
" for " + proxy.getAddress());
}
} else {
dfs.setBalancerBandwidth(bandwidth); dfs.setBalancerBandwidth(bandwidth);
System.out.println("Balancer bandwidth is set to " + bandwidth); System.out.println("Balancer bandwidth is set to " + bandwidth);
} catch (IOException ioe){
System.err.println("Balancer bandwidth is set failed.");
throw ioe;
} }
exitCode = 0;
return exitCode; return 0;
} }
/** /**
@ -1304,10 +1313,20 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<ClientProtocol>> proxies = List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class); nsId, ClientProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<ClientProtocol> proxy : proxies) { for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
try{
proxy.getProxy().finalizeUpgrade(); proxy.getProxy().finalizeUpgrade();
System.out.println("Finalize upgrade successful for " + System.out.println("Finalize upgrade successful for " +
proxy.getAddress()); proxy.getAddress());
}catch (IOException ioe){
System.out.println("Finalize upgrade failed for " +
proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
dfs.finalizeUpgrade(); dfs.finalizeUpgrade();
@ -1337,10 +1356,21 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<ClientProtocol>> proxies = List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class); nsId, ClientProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<ClientProtocol> proxy : proxies) { for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
try{
proxy.getProxy().metaSave(pathname); proxy.getProxy().metaSave(pathname);
System.out.println("Created metasave file " + pathname + " in the log " System.out.println("Created metasave file " + pathname
+ "directory of namenode " + proxy.getAddress()); + " in the log directory of namenode " + proxy.getAddress());
} catch (IOException ioe){
System.out.println("Created metasave file " + pathname
+ " in the log directory of namenode " + proxy.getAddress()
+ " failed");
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
dfs.metaSave(pathname); dfs.metaSave(pathname);
@ -1425,10 +1455,20 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies = List<ProxyAndInfo<RefreshAuthorizationPolicyProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshAuthorizationPolicyProtocol.class); RefreshAuthorizationPolicyProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) { for (ProxyAndInfo<RefreshAuthorizationPolicyProtocol> proxy : proxies) {
try{
proxy.getProxy().refreshServiceAcl(); proxy.getProxy().refreshServiceAcl();
System.out.println("Refresh service acl successful for " System.out.println("Refresh service acl successful for "
+ proxy.getAddress()); + proxy.getAddress());
}catch (IOException ioe){
System.out.println("Refresh service acl failed for "
+ proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()) {
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
// Create the client // Create the client
@ -1468,10 +1508,20 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshUserMappingsProtocol.class); RefreshUserMappingsProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
try{
proxy.getProxy().refreshUserToGroupsMappings(); proxy.getProxy().refreshUserToGroupsMappings();
System.out.println("Refresh user to groups mapping successful for " System.out.println("Refresh user to groups mapping successful for "
+ proxy.getAddress()); + proxy.getAddress());
}catch (IOException ioe){
System.out.println("Refresh user to groups mapping failed for "
+ proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
// Create the client // Create the client
@ -1513,10 +1563,20 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies = List<ProxyAndInfo<RefreshUserMappingsProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshUserMappingsProtocol.class); RefreshUserMappingsProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) { for (ProxyAndInfo<RefreshUserMappingsProtocol> proxy : proxies) {
try{
proxy.getProxy().refreshSuperUserGroupsConfiguration(); proxy.getProxy().refreshSuperUserGroupsConfiguration();
System.out.println("Refresh super user groups configuration " + System.out.println("Refresh super user groups configuration " +
"successful for " + proxy.getAddress()); "successful for " + proxy.getAddress());
}catch (IOException ioe){
System.out.println("Refresh super user groups configuration " +
"failed for " + proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
// Create the client // Create the client
@ -1552,10 +1612,20 @@ public class DFSAdmin extends FsShell {
List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies = List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId, HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
RefreshCallQueueProtocol.class); RefreshCallQueueProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) { for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
try{
proxy.getProxy().refreshCallQueue(); proxy.getProxy().refreshCallQueue();
System.out.println("Refresh call queue successful for " System.out.println("Refresh call queue successful for "
+ proxy.getAddress()); + proxy.getAddress());
}catch (IOException ioe){
System.out.println("Refresh call queue failed for "
+ proxy.getAddress());
exceptions.add(ioe);
}
}
if(!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
} }
} else { } else {
// Create the client // Create the client

View File

@ -33,6 +33,7 @@ import org.junit.After;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -50,7 +51,7 @@ public class TestDFSAdminWithHA {
private static String newLine = System.getProperty("line.separator"); private static String newLine = System.getProperty("line.separator");
private void assertOutputMatches(String string) { private void assertOutputMatches(String string) {
String errOutput = new String(out.toByteArray(), Charsets.UTF_8); String errOutput = new String(err.toByteArray(), Charsets.UTF_8);
String output = new String(out.toByteArray(), Charsets.UTF_8); String output = new String(out.toByteArray(), Charsets.UTF_8);
if (!errOutput.matches(string) && !output.matches(string)) { if (!errOutput.matches(string) && !output.matches(string)) {
@ -155,6 +156,60 @@ public class TestDFSAdminWithHA {
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testSaveNamespaceNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
// Safe mode should be turned ON in order to create namespace image.
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
assertOutputMatches(message + newLine + message + newLine);
cluster.getDfsCluster().shutdownNameNode(1);
//
exitCode = admin.run(new String[] {"-saveNamespace"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace successful for.*" + newLine
+ "Save namespace failed for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testSaveNamespaceNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
// Safe mode should be turned ON in order to create namespace image.
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
assertOutputMatches(message + newLine + message + newLine);
cluster.getDfsCluster().shutdownNameNode(0);
exitCode = admin.run(new String[] {"-saveNamespace"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace failed for.*" + newLine
+ "Save namespace successful for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testSaveNamespaceNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
// Safe mode should be turned ON in order to create namespace image.
int exitCode = admin.run(new String[] {"-safemode", "enter"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Safe mode is ON in.*";
assertOutputMatches(message + newLine + message + newLine);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
exitCode = admin.run(new String[] {"-saveNamespace"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "Save namespace failed for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testRestoreFailedStorage() throws Exception { public void testRestoreFailedStorage() throws Exception {
setUpHaCluster(false); setUpHaCluster(false);
@ -175,6 +230,76 @@ public class TestDFSAdminWithHA {
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testRestoreFailedStorageNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage is set to false for.*" + newLine
+ "restoreFailedStorage failed for.*" + newLine;
// Default is false
assertOutputMatches(message);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to true for.*" + newLine
+ "restoreFailedStorage failed for.*" + newLine;
assertOutputMatches(message);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage is set to false for.*" + newLine
+ "restoreFailedStorage failed for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRestoreFailedStorageNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage failed for.*" + newLine
+ "restoreFailedStorage is set to false for.*" + newLine;
// Default is false
assertOutputMatches(message);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage failed for.*" + newLine
+ "restoreFailedStorage is set to true for.*" + newLine;
assertOutputMatches(message);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage failed for.*" + newLine
+ "restoreFailedStorage is set to false for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRestoreFailedStorageNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-restoreFailedStorage", "check"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "restoreFailedStorage failed for.*";
// Default is false
assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "true"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage failed for.*";
assertOutputMatches(message + newLine + message + newLine);
exitCode = admin.run(new String[] {"-restoreFailedStorage", "false"});
assertNotEquals(err.toString().trim(), 0, exitCode);
message = "restoreFailedStorage failed for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testRefreshNodes() throws Exception { public void testRefreshNodes() throws Exception {
setUpHaCluster(false); setUpHaCluster(false);
@ -184,13 +309,82 @@ public class TestDFSAdminWithHA {
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testRefreshNodesNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes successful for.*" + newLine
+ "Refresh nodes failed for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshNodesNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes failed for.*" + newLine
+ "Refresh nodes successful for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshNodesNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshNodes"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh nodes failed for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testSetBalancerBandwidth() throws Exception { public void testSetBalancerBandwidth() throws Exception {
setUpHaCluster(false); setUpHaCluster(false);
cluster.getDfsCluster().transitionToActive(0);
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"}); int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode); assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10 for.*"; String message = "Balancer bandwidth is set to 10";
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine);
}
@Test (timeout = 30000)
public void testSetBalancerBandwidthNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
cluster.getDfsCluster().transitionToActive(0);
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10";
assertOutputMatches(message + newLine);
}
@Test (timeout = 30000)
public void testSetBalancerBandwidthNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().transitionToActive(1);
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set to 10";
assertOutputMatches(message + newLine);
}
@Test
public void testSetBalancerBandwidthNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-setBalancerBandwidth", "10"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Balancer bandwidth is set failed." + newLine
+ ".*" + newLine;
assertOutputMatches(message);
} }
@Test (timeout = 30000) @Test (timeout = 30000)
@ -210,6 +404,44 @@ public class TestDFSAdminWithHA {
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testMetaSaveNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*" + newLine
+ "Created metasave file dfs.meta in the log directory"
+ " of namenode.*failed" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testMetaSaveNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*failed" + newLine
+ "Created metasave file dfs.meta in the log directory"
+ " of namenode.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testMetaSaveNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-metasave", "dfs.meta"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Created metasave file dfs.meta in the log directory"
+ " of namenode.*failed";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testRefreshServiceAcl() throws Exception { public void testRefreshServiceAcl() throws Exception {
setUpHaCluster(true); setUpHaCluster(true);
@ -219,6 +451,40 @@ public class TestDFSAdminWithHA {
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testRefreshServiceAclNN1UpNN2Down() throws Exception {
setUpHaCluster(true);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl successful for.*" + newLine
+ "Refresh service acl failed for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshServiceAclNN1DownNN2Up() throws Exception {
setUpHaCluster(true);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl failed for.*" + newLine
+ "Refresh service acl successful for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshServiceAclNN1DownNN2Down() throws Exception {
setUpHaCluster(true);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshServiceAcl"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh service acl failed for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testRefreshUserToGroupsMappings() throws Exception { public void testRefreshUserToGroupsMappings() throws Exception {
setUpHaCluster(false); setUpHaCluster(false);
@ -228,6 +494,43 @@ public class TestDFSAdminWithHA {
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testRefreshUserToGroupsMappingsNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping successful for.*"
+ newLine
+ "Refresh user to groups mapping failed for.*"
+ newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshUserToGroupsMappingsNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping failed for.*"
+ newLine
+ "Refresh user to groups mapping successful for.*"
+ newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshUserToGroupsMappingsNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshUserToGroupsMappings"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh user to groups mapping failed for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testRefreshSuperUserGroupsConfiguration() throws Exception { public void testRefreshSuperUserGroupsConfiguration() throws Exception {
setUpHaCluster(false); setUpHaCluster(false);
@ -238,6 +541,49 @@ public class TestDFSAdminWithHA {
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testRefreshSuperUserGroupsConfigurationNN1UpNN2Down()
throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration successful for.*"
+ newLine
+ "Refresh super user groups configuration failed for.*"
+ newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Up()
throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration failed for.*"
+ newLine
+ "Refresh super user groups configuration successful for.*"
+ newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshSuperUserGroupsConfigurationNN1DownNN2Down()
throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(
new String[] {"-refreshSuperUserGroupsConfiguration"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh super user groups configuration failed for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000) @Test (timeout = 30000)
public void testRefreshCallQueue() throws Exception { public void testRefreshCallQueue() throws Exception {
setUpHaCluster(false); setUpHaCluster(false);
@ -246,4 +592,116 @@ public class TestDFSAdminWithHA {
String message = "Refresh call queue successful for.*"; String message = "Refresh call queue successful for.*";
assertOutputMatches(message + newLine + message + newLine); assertOutputMatches(message + newLine + message + newLine);
} }
@Test (timeout = 30000)
public void testRefreshCallQueueNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue successful for.*" + newLine
+ "Refresh call queue failed for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshCallQueueNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue failed for.*" + newLine
+ "Refresh call queue successful for.*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testRefreshCallQueueNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-refreshCallQueue"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Refresh call queue failed for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testFinalizeUpgrade() throws Exception {
setUpHaCluster(false);
int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = ".*Cannot finalize with no NameNode active";
assertOutputMatches(message + newLine);
cluster.getDfsCluster().transitionToActive(0);
exitCode = admin.run(new String[] {"-finalizeUpgrade"});
assertEquals(err.toString().trim(), 0, exitCode);
message = "Finalize upgrade successful for.*";
assertOutputMatches(message + newLine + message + newLine);
}
@Test (timeout = 30000)
public void testFinalizeUpgradeNN1UpNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
cluster.getDfsCluster().transitionToActive(0);
int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Finalize upgrade successful for .*" + newLine
+ "Finalize upgrade failed for .*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testFinalizeUpgradeNN1DownNN2Up() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().transitionToActive(1);
int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = "Finalize upgrade failed for .*" + newLine
+ "Finalize upgrade successful for .*" + newLine;
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testFinalizeUpgradeNN1DownNN2Down() throws Exception {
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-finalizeUpgrade"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = ".*2 exceptions.*";
assertOutputMatches(message + newLine);
}
@Test (timeout = 30000)
public void testListOpenFilesNN1UpNN2Down() throws Exception{
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(1);
cluster.getDfsCluster().transitionToActive(0);
int exitCode = admin.run(new String[] {"-listOpenFiles"});
assertEquals(err.toString().trim(), 0, exitCode);
}
@Test (timeout = 30000)
public void testListOpenFilesNN1DownNN2Up() throws Exception{
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().transitionToActive(1);
int exitCode = admin.run(new String[] {"-listOpenFiles"});
assertEquals(err.toString().trim(), 0, exitCode);
}
@Test
public void testListOpenFilesNN1DownNN2Down() throws Exception{
setUpHaCluster(false);
cluster.getDfsCluster().shutdownNameNode(0);
cluster.getDfsCluster().shutdownNameNode(1);
int exitCode = admin.run(new String[] {"-listOpenFiles"});
assertNotEquals(err.toString().trim(), 0, exitCode);
String message = ".*" + newLine + "List open files failed." + newLine;
assertOutputMatches(message);
}
} }