HDFS-13589: Add dfsAdmin command to query if upgrade is finalized. Contributed by Hanisha Koneru

This commit is contained in:
Bharat Viswanadham 2018-05-23 10:15:40 -07:00
parent 699a6918ac
commit bc6d9d4c79
13 changed files with 260 additions and 0 deletions

View File

@ -2341,6 +2341,16 @@ public void finalizeUpgrade() throws IOException {
}
}
/**
* @see ClientProtocol#upgradeStatus()
*/
public boolean upgradeStatus() throws IOException {
checkOpen();
try (TraceScope ignored = tracer.newScope("isUpgradeFinalized")) {
return namenode.upgradeStatus();
}
}
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException {
checkOpen();

View File

@ -1533,6 +1533,16 @@ public void finalizeUpgrade() throws IOException {
dfs.finalizeUpgrade();
}
/**
* Get status of upgrade - finalized or not.
* @return true if upgrade is finalized or if no upgrade is in progress and
* false otherwise.
* @throws IOException
*/
public boolean upgradeStatus() throws IOException {
return dfs.upgradeStatus();
}
/**
* Rolling upgrade: prepare/finalize/query.
*/

View File

@ -940,6 +940,15 @@ boolean setSafeMode(HdfsConstants.SafeModeAction action, boolean isChecked)
@Idempotent
void finalizeUpgrade() throws IOException;
/**
* Get status of upgrade - finalized or not.
* @return true if upgrade is finalized or if no upgrade is in progress and
* false otherwise.
* @throws IOException
*/
@Idempotent
boolean upgradeStatus() throws IOException;
/**
* Rolling upgrade operations.
* @param action either query, prepare or finalize.

View File

@ -146,6 +146,13 @@ public static RollingUpgradeAction fromString(String s) {
}
}
/**
* Upgrade actions.
*/
public enum UpgradeAction {
QUERY, FINALIZE;
}
// type of the datanode report
public enum DatanodeReportType {
ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE, IN_MAINTENANCE

View File

@ -186,6 +186,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.*;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
@ -273,6 +275,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
VOID_FINALIZE_UPGRADE_REQUEST =
FinalizeUpgradeRequestProto.newBuilder().build();
private final static UpgradeStatusRequestProto
VOID_UPGRADE_STATUS_REQUEST =
UpgradeStatusRequestProto.newBuilder().build();
private final static GetDataEncryptionKeyRequestProto
VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
GetDataEncryptionKeyRequestProto.newBuilder().build();
@ -830,6 +836,17 @@ public void finalizeUpgrade() throws IOException {
}
}
@Override
public boolean upgradeStatus() throws IOException {
try {
final UpgradeStatusResponseProto proto = rpcProxy.upgradeStatus(
null, VOID_UPGRADE_STATUS_REQUEST);
return proto.getUpgradeFinalized();
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException {

View File

@ -450,6 +450,13 @@ message FinalizeUpgradeRequestProto { // no parameters
message FinalizeUpgradeResponseProto { // void response
}
message UpgradeStatusRequestProto { // no parameters
}
message UpgradeStatusResponseProto {
required bool upgradeFinalized = 1;
}
enum RollingUpgradeActionProto {
QUERY = 1;
START = 2;
@ -879,6 +886,8 @@ service ClientNamenodeProtocol {
rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
returns(FinalizeUpgradeResponseProto);
rpc upgradeStatus(UpgradeStatusRequestProto)
returns(UpgradeStatusResponseProto);
rpc rollingUpgrade(RollingUpgradeRequestProto)
returns(RollingUpgradeResponseProto);
rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)

View File

@ -1481,6 +1481,13 @@ public void finalizeUpgrade() throws IOException {
rpcClient.invokeConcurrent(nss, method, true, false);
}
@Override // ClientProtocol
public boolean upgradeStatus() throws IOException {
String methodName = getMethodName();
throw new UnsupportedOperationException(
"Operation \"" + methodName + "\" is not supported");
}
@Override // ClientProtocol
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action)
throws IOException {

View File

@ -157,6 +157,8 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetStoragePolicyResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeStatusResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
@ -899,6 +901,21 @@ public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
}
}
@Override
public UpgradeStatusResponseProto upgradeStatus(
RpcController controller, UpgradeStatusRequestProto req)
throws ServiceException {
try {
final boolean isUpgradeFinalized = server.upgradeStatus();
UpgradeStatusResponseProto.Builder b =
UpgradeStatusResponseProto.newBuilder();
b.setUpgradeFinalized(isUpgradeFinalized);
return b.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public RollingUpgradeResponseProto rollingUpgrade(RpcController controller,
RollingUpgradeRequestProto req) throws ServiceException {

View File

@ -1321,6 +1321,12 @@ public void finalizeUpgrade() throws IOException {
namesystem.finalizeUpgrade();
}
@Override // ClientProtocol
public boolean upgradeStatus() throws IOException {
checkNNStartup();
return namesystem.isUpgradeFinalized();
}
@Override // ClientProtocol
public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
checkNNStartup();

View File

@ -78,6 +78,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
import org.apache.hadoop.hdfs.protocol.ReconfigurationProtocol;
@ -443,6 +444,7 @@ static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOExcep
"\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" +
"\t[-finalizeUpgrade]\n" +
"\t[" + RollingUpgradeCommand.USAGE +"]\n" +
"\t[-upgrade <query | finalize>]\n" +
"\t[-refreshServiceAcl]\n" +
"\t[-refreshUserToGroupsMappings]\n" +
"\t[-refreshSuperUserGroupsConfiguration]\n" +
@ -1147,6 +1149,11 @@ private void printHelp(String cmd) {
"\t\tfollowed by Namenode doing the same.\n" +
"\t\tThis completes the upgrade process.\n";
String upgrade = "-upgrade <query | finalize>:\n"
+ " query: query the current upgrade status.\n"
+ " finalize: finalize the upgrade of HDFS (equivalent to " +
"-finalizeUpgrade.";
String metaSave = "-metasave <filename>: \tSave Namenode's primary data structures\n" +
"\t\tto <filename> in the directory specified by hadoop.log.dir property.\n" +
"\t\t<filename> is overwritten if it exists.\n" +
@ -1278,6 +1285,8 @@ private void printHelp(String cmd) {
System.out.println(finalizeUpgrade);
} else if (RollingUpgradeCommand.matches("-"+cmd)) {
System.out.println(RollingUpgradeCommand.DESCRIPTION);
} else if ("upgrade".equals(cmd)) {
System.out.println(upgrade);
} else if ("metasave".equals(cmd)) {
System.out.println(metaSave);
} else if (SetQuotaCommand.matches("-"+cmd)) {
@ -1338,6 +1347,7 @@ private void printHelp(String cmd) {
System.out.println(refreshNodes);
System.out.println(finalizeUpgrade);
System.out.println(RollingUpgradeCommand.DESCRIPTION);
System.out.println(upgrade);
System.out.println(metaSave);
System.out.println(SetQuotaCommand.DESCRIPTION);
System.out.println(ClearQuotaCommand.DESCRIPTION);
@ -1416,6 +1426,83 @@ public int finalizeUpgrade() throws IOException {
return 0;
}
/**
* Command to get the upgrade status of each namenode in the nameservice.
* Usage: hdfs dfsadmin -upgrade query
* @exception IOException
*/
public int getUpgradeStatus() throws IOException {
DistributedFileSystem dfs = getDFS();
Configuration dfsConf = dfs.getConf();
URI dfsUri = dfs.getUri();
boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
if (isHaAndLogicalUri) {
// In the case of HA and logical URI, run upgrade query for all
// NNs in this nameservice.
String nsId = dfsUri.getHost();
List<ProxyAndInfo<ClientProtocol>> proxies =
HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
nsId, ClientProtocol.class);
List<IOException> exceptions = new ArrayList<>();
for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
try {
boolean upgradeFinalized = proxy.getProxy().upgradeStatus();
if (upgradeFinalized) {
System.out.println("Upgrade finalized for " + proxy.getAddress());
} else {
System.out.println("Upgrade not finalized for " +
proxy.getAddress());
}
} catch (IOException ioe){
System.err.println("Getting upgrade status failed for " +
proxy.getAddress());
exceptions.add(ioe);
}
}
if (!exceptions.isEmpty()){
throw MultipleIOException.createIOException(exceptions);
}
} else {
if (dfs.upgradeStatus()) {
System.out.println("Upgrade finalized");
} else {
System.out.println("Upgrade not finalized");
}
}
return 0;
}
/**
* Upgrade command to get the status of upgrade or ask NameNode to finalize
* the previously performed upgrade.
* Usage: hdfs dfsadmin -upgrade [query | finalize]
* @exception IOException
*/
public int upgrade(String arg) throws IOException {
UpgradeAction action;
if ("query".equalsIgnoreCase(arg)) {
action = UpgradeAction.QUERY;
} else if ("finalize".equalsIgnoreCase(arg)) {
action = UpgradeAction.FINALIZE;
} else {
printUsage("-upgrade");
return -1;
}
switch (action) {
case QUERY:
return getUpgradeStatus();
case FINALIZE:
return finalizeUpgrade();
default:
printUsage("-upgrade");
return -1;
}
}
/**
* Dumps DFS data structures into specified file.
* Usage: hdfs dfsadmin -metasave filename
@ -1997,6 +2084,9 @@ private static void printUsage(String cmd) {
} else if (RollingUpgradeCommand.matches(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [" + RollingUpgradeCommand.USAGE+"]");
} else if ("-upgrade".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-upgrade query | finalize]");
} else if ("-metasave".equals(cmd)) {
System.err.println("Usage: hdfs dfsadmin"
+ " [-metasave filename]");
@ -2146,6 +2236,11 @@ public int run(String[] argv) throws Exception {
printUsage(cmd);
return exitCode;
}
} else if ("-upgrade".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
return exitCode;
}
} else if ("-metasave".equals(cmd)) {
if (argv.length != 2) {
printUsage(cmd);
@ -2263,6 +2358,8 @@ public int run(String[] argv) throws Exception {
exitCode = finalizeUpgrade();
} else if (RollingUpgradeCommand.matches(cmd)) {
exitCode = RollingUpgradeCommand.run(getDFS(), argv, i);
} else if ("-upgrade".equals(cmd)) {
exitCode = upgrade(argv[i]);
} else if ("-metasave".equals(cmd)) {
exitCode = metaSave(argv, i);
} else if (ClearQuotaCommand.matches(cmd)) {

View File

@ -352,6 +352,7 @@ Usage:
hdfs dfsadmin [-clrSpaceQuota [-storageType <storagetype>] <dirname>...<dirname>]
hdfs dfsadmin [-finalizeUpgrade]
hdfs dfsadmin [-rollingUpgrade [<query> |<prepare> |<finalize>]]
hdfs dfsadmin [-upgrade [query | finalize]
hdfs dfsadmin [-refreshServiceAcl]
hdfs dfsadmin [-refreshUserToGroupsMappings]
hdfs dfsadmin [-refreshSuperUserGroupsConfiguration]
@ -389,6 +390,7 @@ Usage:
| `-clrSpaceQuota` `[-storageType <storagetype>]` \<dirname\>...\<dirname\> | See [HDFS Quotas Guide](../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands) for the detail. |
| `-finalizeUpgrade` | Finalize upgrade of HDFS. Datanodes delete their previous version working directories, followed by Namenode doing the same. This completes the upgrade process. |
| `-rollingUpgrade` [\<query\>\|\<prepare\>\|\<finalize\>] | See [Rolling Upgrade document](../hadoop-hdfs/HdfsRollingUpgrade.html#dfsadmin_-rollingUpgrade) for the detail. |
| `-upgrade` query\|finalize | Query the current upgrade status.<br/>Finalize upgrade of HDFS (equivalent to -finalizeUpgrade). |
| `-refreshServiceAcl` | Reload the service-level authorization policy file. |
| `-refreshUserToGroupsMappings` | Refresh user-to-groups mappings. |
| `-refreshSuperUserGroupsConfiguration` | Refresh superuser proxy groups mappings |

View File

@ -632,6 +632,8 @@ When moving between versions of HDFS, sometimes the newer software can simply be
Note that if at any time you want to restart the NameNodes before finalizing or rolling back the upgrade, you should start the NNs as normal, i.e. without any special startup flag.
**To query status of upgrade**, the operator will use the `` `hdfs dfsadmin -upgrade query' `` command while atleast one of the NNs is running. The command will return whether the NN upgrade process is finalized or not, for each NN.
**To finalize an HA upgrade**, the operator will use the `` `hdfs dfsadmin -finalizeUpgrade' `` command while the NNs are running and one of them is active. The active NN at the time this happens will perform the finalization of the shared log, and the NN whose local storage directories contain the previous FS state will delete its local state.
**To perform a rollback** of an upgrade, both NNs should first be shut down. The operator should run the roll back command on the NN where they initiated the upgrade procedure, which will perform the rollback on the local dirs there, as well as on the shared log, either NFS or on the JNs. Afterward, this NN should be started and the operator should run `` `-bootstrapStandby' `` on the other NN to bring the two NNs in sync with this rolled-back file system state.

View File

@ -28,7 +28,10 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
import org.junit.After;
import org.junit.Test;
@ -676,6 +679,70 @@ public void testFinalizeUpgradeNN1DownNN2Down() throws Exception {
assertOutputMatches(message + newLine);
}
@Test (timeout = 300000)
public void testUpgradeCommand() throws Exception {
final String finalizedMsg = "Upgrade finalized for.*";
final String notFinalizedMsg = "Upgrade not finalized for.*";
final String failMsg = "Getting upgrade status failed for.*" + newLine +
"upgrade: .*";
final String finalizeSuccessMsg = "Finalize upgrade successful for.*";
setUpHaCluster(false);
MiniDFSCluster dfsCluster = cluster.getDfsCluster();
// Before upgrade is initialized, the query should return upgrade
// finalized (as no upgrade is in progress)
String message = finalizedMsg + newLine + finalizedMsg + newLine;
verifyUpgradeQueryOutput(message, 0);
// Shutdown the NNs
dfsCluster.shutdownNameNode(0);
dfsCluster.shutdownNameNode(1);
// Start NN1 with -upgrade option
dfsCluster.getNameNodeInfos()[0].setStartOpt(
HdfsServerConstants.StartupOption.UPGRADE);
dfsCluster.restartNameNode(0, true);
// Running -upgrade query should return "not finalized" for NN1 and
// connection exception for NN2 (as NN2 is down)
message = notFinalizedMsg + newLine;
verifyUpgradeQueryOutput(message, -1);
String errorMsg = failMsg + newLine;
verifyUpgradeQueryOutput(errorMsg, -1);
// Bootstrap the standby (NN2) with the upgraded info.
int rc = BootstrapStandby.run(
new String[]{"-force"},
dfsCluster.getConfiguration(1));
assertEquals(0, rc);
out.reset();
// Restart NN2.
dfsCluster.restartNameNode(1);
// Both NNs should return "not finalized" msg for -upgrade query
message = notFinalizedMsg + newLine + notFinalizedMsg + newLine;
verifyUpgradeQueryOutput(message, 0);
// Finalize the upgrade
int exitCode = admin.run(new String[] {"-upgrade", "finalize"});
assertEquals(err.toString().trim(), 0, exitCode);
message = finalizeSuccessMsg + newLine + finalizeSuccessMsg + newLine;
assertOutputMatches(message);
// NNs should return "upgrade finalized" msg
message = finalizedMsg + newLine + finalizedMsg + newLine;
verifyUpgradeQueryOutput(message, 0);
}
private void verifyUpgradeQueryOutput(String message, int expected) throws
Exception {
int exitCode = admin.run(new String[] {"-upgrade", "query"});
assertEquals(err.toString().trim(), expected, exitCode);
assertOutputMatches(message);
}
@Test (timeout = 30000)
public void testListOpenFilesNN1UpNN2Down() throws Exception{
setUpHaCluster(false);