diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 576b6c2b588..bf9ac29163c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -222,6 +222,8 @@ Release 2.0.1-alpha - UNRELEASED HDFS-2727. libhdfs should get the default block size from the server. (Colin Patrick McCabe via eli) + HDFS-3832. Remove protocol methods related to DistributedUpgrade. (suresh) + OPTIMIZATIONS HDFS-2982. Startup performance suffers when there are many edit log diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 3c0acaee04b..2c8f3e7d696 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -111,7 +111,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -131,7 +130,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.DataOutputBuffer; @@ -1238,7 +1236,7 @@ public class DFSClient implements java.io.Closeable { * @param blockSize maximum block size * @param progress interface for reporting client progress * @param buffersize underlying buffer size - * @param checksumOpts checksum options + * @param checksumOpt checksum options * * @return output stream * @@ -1935,14 +1933,6 @@ public class DFSClient implements java.io.Closeable { namenode.finalizeUpgrade(); } - /** - * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction) - */ - public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) - throws IOException { - return namenode.distributedUpgradeProgress(action); - } - /** */ @Deprecated diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index dae56d81be8..2341c23eaaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -55,13 +55,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; @@ -698,11 +696,6 @@ public class DistributedFileSystem extends FileSystem { dfs.finalizeUpgrade(); } - public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action - ) throws IOException { - return dfs.distributedUpgradeProgress(action); - } - /* * Requests the namenode to dump data strcutures into specified * file. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 6b401be0cbb..86bbe10b981 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -33,8 +33,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -694,17 +692,6 @@ public interface ClientProtocol { */ public void finalizeUpgrade() throws IOException; - /** - * Method no longer used - retained only for backward compatibility - * - * Report distributed upgrade progress or force current upgrade to proceed. - * @param action {@link HdfsConstants.UpgradeAction} to perform - * @return upgrade status information or null if no upgrades are in progress - * @throws IOException - */ - public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) - throws IOException; - /** * @return CorruptFileBlocks, containing a list of corrupt files (with * duplicates if there is more than one corrupt block in a file) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index da64b9e7648..4193686fbbf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -60,7 +60,7 @@ public class HdfsConstants { public static int MAX_PATH_LENGTH = 8000; public static int MAX_PATH_DEPTH = 1000; - // TODO mb@media-style.com: should be conf injected? + // TODO should be conf injected? public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt( DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY, @@ -84,16 +84,6 @@ public class HdfsConstants { // An invalid transaction ID that will never be seen in a real namesystem. public static final long INVALID_TXID = -12345; - /** - * Distributed upgrade actions: - * - * 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the - * upgrade if it is stuck, no matter what the status is. - */ - public static enum UpgradeAction { - GET_STATUS, DETAILED_STATUS, FORCE_PROCEED; - } - /** * URI Scheme for hdfs://namenode/ URIs. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 1a9d4e6de0c..37ab28505c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -50,8 +50,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; @@ -130,7 +128,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.io.Text; import com.google.protobuf.RpcController; @@ -570,24 +567,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - @Override - public DistributedUpgradeProgressResponseProto distributedUpgradeProgress( - RpcController controller, DistributedUpgradeProgressRequestProto req) - throws ServiceException { - try { - UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper - .convert(req.getAction())); - DistributedUpgradeProgressResponseProto.Builder builder = - DistributedUpgradeProgressResponseProto.newBuilder(); - if (result != null) { - builder.setReport(PBHelper.convert(result)); - } - return builder.build(); - } catch (IOException e) { - throw new ServiceException(e); - } - } - @Override public ListCorruptFileBlocksResponseProto listCorruptFileBlocks( RpcController controller, ListCorruptFileBlocksRequestProto req) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 4f0792e9b51..5626f038acd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -58,8 +57,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Concat import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; @@ -102,7 +99,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.io.EnumSetWritable; @@ -128,8 +124,7 @@ public class ClientNamenodeProtocolTranslatorPB implements ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator { final private ClientNamenodeProtocolPB rpcProxy; - public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) - throws IOException { + public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) { rpcProxy = proxy; } @@ -564,21 +559,6 @@ public class ClientNamenodeProtocolTranslatorPB implements } } - @Override - public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) - throws IOException { - DistributedUpgradeProgressRequestProto req = - DistributedUpgradeProgressRequestProto.newBuilder(). - setAction(PBHelper.convert(action)).build(); - try { - DistributedUpgradeProgressResponseProto res = rpcProxy - .distributedUpgradeProgress(null, req); - return res.hasReport() ? PBHelper.convert(res.getReport()) : null; - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - } - @Override public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 92563d265db..3150414d468 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -41,8 +41,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeComm import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto; @@ -59,7 +57,6 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; @@ -252,20 +249,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements } } - @Override - public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) - throws IOException { - ProcessUpgradeRequestProto req = ProcessUpgradeRequestProto.newBuilder() - .setCmd(PBHelper.convert(comm)).build(); - ProcessUpgradeResponseProto resp; - try { - resp = rpcProxy.processUpgrade(NULL_CONTROLLER, req); - } catch (ServiceException se) { - throw ProtobufHelper.getRemoteException(se); - } - return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; - } - @Override public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { ReportBadBlocksRequestProto.Builder builder = ReportBadBlocksRequestProto diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 707054654db..861852f9b31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -33,8 +33,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportR import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto; @@ -56,7 +54,6 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -211,25 +208,6 @@ public class DatanodeProtocolServerSideTranslatorPB implements .setInfo(PBHelper.convert(info)).build(); } - @Override - public ProcessUpgradeResponseProto processUpgrade(RpcController controller, - ProcessUpgradeRequestProto request) throws ServiceException { - UpgradeCommand ret; - try { - UpgradeCommand cmd = request.hasCmd() ? PBHelper - .convert(request.getCmd()) : null; - ret = impl.processUpgradeCommand(cmd); - } catch (IOException e) { - throw new ServiceException(e); - } - ProcessUpgradeResponseProto.Builder builder = - ProcessUpgradeResponseProto.newBuilder(); - if (ret != null) { - builder.setCmd(PBHelper.convert(ret)); - } - return builder.build(); - } - @Override public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, ReportBadBlocksRequestProto request) throws ServiceException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index b620d922a20..09032baf398 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -47,7 +46,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeActionProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; @@ -61,7 +59,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHe import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; @@ -96,7 +93,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.BlockKey; @@ -106,7 +102,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand; @@ -132,7 +127,6 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStat import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.util.DataChecksum; @@ -640,8 +634,6 @@ public class PBHelper { return PBHelper.convert(proto.getKeyUpdateCmd()); case RegisterCommand: return REG_CMD; - case UpgradeCommand: - return PBHelper.convert(proto.getUpgradeCmd()); } return null; } @@ -738,11 +730,6 @@ public class PBHelper { builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd( PBHelper.convert((BlockCommand) datanodeCommand)); break; - case DatanodeProtocol.DNA_UC_ACTION_REPORT_STATUS: - case DatanodeProtocol.DNA_UC_ACTION_START_UPGRADE: - builder.setCmdType(DatanodeCommandProto.Type.UpgradeCommand) - .setUpgradeCmd(PBHelper.convert((UpgradeCommand) datanodeCommand)); - break; case DatanodeProtocol.DNA_UNKNOWN: //Not expected default: builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand); @@ -750,19 +737,6 @@ public class PBHelper { return builder.build(); } - public static UpgradeCommand convert(UpgradeCommandProto upgradeCmd) { - int action = UpgradeCommand.UC_ACTION_UNKNOWN; - switch (upgradeCmd.getAction()) { - case REPORT_STATUS: - action = UpgradeCommand.UC_ACTION_REPORT_STATUS; - break; - case START_UPGRADE: - action = UpgradeCommand.UC_ACTION_START_UPGRADE; - } - return new UpgradeCommand(action, upgradeCmd.getVersion(), - (short) upgradeCmd.getUpgradeStatus()); - } - public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) { return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys())); } @@ -852,28 +826,6 @@ public class PBHelper { .build(); } - public static UpgradeCommandProto convert(UpgradeCommand comm) { - UpgradeCommandProto.Builder builder = UpgradeCommandProto.newBuilder(); - if (comm == null) { - return builder.setAction(UpgradeCommandProto.Action.UNKNOWN) - .setVersion(0).setUpgradeStatus(0).build(); - } - builder.setVersion(comm.getVersion()).setUpgradeStatus( - comm.getCurrentStatus()); - switch (comm.getAction()) { - case UpgradeCommand.UC_ACTION_REPORT_STATUS: - builder.setAction(UpgradeCommandProto.Action.REPORT_STATUS); - break; - case UpgradeCommand.UC_ACTION_START_UPGRADE: - builder.setAction(UpgradeCommandProto.Action.START_UPGRADE); - break; - default: - builder.setAction(UpgradeCommandProto.Action.UNKNOWN); - break; - } - return builder.build(); - } - public static ReceivedDeletedBlockInfo convert( ReceivedDeletedBlockInfoProto proto) { ReceivedDeletedBlockInfo.BlockStatus status = null; @@ -1238,51 +1190,6 @@ public class PBHelper { } } - public static UpgradeActionProto convert( - UpgradeAction a) { - switch (a) { - case GET_STATUS: - return UpgradeActionProto.GET_STATUS; - case DETAILED_STATUS: - return UpgradeActionProto.DETAILED_STATUS; - case FORCE_PROCEED: - return UpgradeActionProto.FORCE_PROCEED; - default: - throw new IllegalArgumentException("Unexpected UpgradeAction :" + a); - } - } - - - public static UpgradeAction convert( - UpgradeActionProto a) { - switch (a) { - case GET_STATUS: - return UpgradeAction.GET_STATUS; - case DETAILED_STATUS: - return UpgradeAction.DETAILED_STATUS; - case FORCE_PROCEED: - return UpgradeAction.FORCE_PROCEED; - default: - throw new IllegalArgumentException("Unexpected UpgradeAction :" + a); - } - } - - public static UpgradeStatusReportProto convert(UpgradeStatusReport r) { - if (r == null) - return null; - return UpgradeStatusReportProto.newBuilder() - .setVersion(r.getVersion()) - .setUpgradeStatus(r.getUpgradeStatus()) - .setFinalized(r.isFinalized()) - .build(); - } - - public static UpgradeStatusReport convert(UpgradeStatusReportProto r) { - if (r == null) return null; - return new UpgradeStatusReport(r.getVersion(), - (short) r.getUpgradeStatus(), r.getFinalized()); - } - public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { if (c == null) return null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 290d171dca0..74d64dab766 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -561,10 +560,6 @@ class BPOfferService { dn.finalizeUpgradeForPool(bp); break; - case UpgradeCommand.UC_ACTION_START_UPGRADE: - // start distributed upgrade here - LOG.warn("Distibuted upgrade is no longer supported"); - break; case DatanodeProtocol.DNA_RECOVERBLOCK: String who = "NameNode at " + actor.getNNSocketAddress(); dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 4435a5ebd34..2ff4e569fd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -60,7 +60,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -89,7 +88,6 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; @@ -108,7 +106,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReport; -import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -740,13 +737,6 @@ class NameNodeRpcServer implements NamenodeProtocols { namesystem.finalizeUpgrade(); } - @Override // ClientProtocol - public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) - throws IOException { - throw new UnsupportedActionException( - "Deprecated method. No longer supported"); - } - @Override // ClientProtocol public void metaSave(String filename) throws IOException { namesystem.checkOperation(OperationCategory.UNCHECKED); @@ -917,13 +907,6 @@ class NameNodeRpcServer implements NamenodeProtocols { return namesystem.getNamespaceInfo(); } - @Override // DatanodeProtocol - public UpgradeCommand processUpgradeCommand(UpgradeCommand comm) - throws IOException { - throw new UnsupportedActionException( - "Deprecated method, no longer supported"); - } - /** * Verifies the given registration. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 2c1981cb624..fa1a285158c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeID; -import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; @@ -49,7 +48,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; -import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.Text; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 9439c631d33..275b7c9a5c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -72,8 +72,6 @@ public interface DatanodeProtocol { final static int DNA_RECOVERBLOCK = 6; // request a block recovery final static int DNA_ACCESSKEYUPDATE = 7; // update access key final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth - final static int DNA_UC_ACTION_REPORT_STATUS = 100; // Report upgrade status - final static int DNA_UC_ACTION_START_UPGRADE = 101; // start upgrade /** * Register Datanode. @@ -150,18 +148,6 @@ public interface DatanodeProtocol { public NamespaceInfo versionRequest() throws IOException; - /** - * This is a very general way to send a command to the name-node during - * distributed upgrade process. - * - * The generosity is because the variety of upgrade commands is unpredictable. - * The reply from the name-node is also received in the form of an upgrade - * command. - * - * @return a reply in the form of an upgrade command - */ - UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException; - /** * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])} * } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java deleted file mode 100644 index fc9656a8f39..00000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/UpgradeCommand.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdfs.server.protocol; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * This as a generic distributed upgrade command. - * - * During the upgrade cluster components send upgrade commands to each other - * in order to obtain or share information with them. - * It is supposed that each upgrade defines specific upgrade command by - * deriving them from this class. - * The upgrade command contains version of the upgrade, which is verified - * on the receiving side and current status of the upgrade. - */ -@InterfaceAudience.Private -@InterfaceStability.Evolving -public class UpgradeCommand extends DatanodeCommand { - public final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN; - public final static int UC_ACTION_REPORT_STATUS = - DatanodeProtocol.DNA_UC_ACTION_REPORT_STATUS; - public final static int UC_ACTION_START_UPGRADE = - DatanodeProtocol.DNA_UC_ACTION_START_UPGRADE; - - private int version; - private short upgradeStatus; - - public UpgradeCommand() { - super(UC_ACTION_UNKNOWN); - this.version = 0; - this.upgradeStatus = 0; - } - - public UpgradeCommand(int action, int version, short status) { - super(action); - this.version = version; - this.upgradeStatus = status; - } - - public int getVersion() { - return this.version; - } - - public short getCurrentStatus() { - return this.upgradeStatus; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 4fd4e2674ab..439b0cdb58b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -296,19 +296,6 @@ message FinalizeUpgradeRequestProto { // no parameters message FinalizeUpgradeResponseProto { // void response } -enum UpgradeActionProto { - GET_STATUS = 1; - DETAILED_STATUS = 2; - FORCE_PROCEED = 3; -} - -message DistributedUpgradeProgressRequestProto { - required UpgradeActionProto action = 1; -} -message DistributedUpgradeProgressResponseProto { - optional UpgradeStatusReportProto report = 1; -} - message ListCorruptFileBlocksRequestProto { required string path = 1; optional string cookie = 2; @@ -490,8 +477,6 @@ service ClientNamenodeProtocol { rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto); rpc finalizeUpgrade(FinalizeUpgradeRequestProto) returns(FinalizeUpgradeResponseProto); - rpc distributedUpgradeProgress(DistributedUpgradeProgressRequestProto) - returns(DistributedUpgradeProgressResponseProto); rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto) returns(ListCorruptFileBlocksResponseProto); rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index f5f36e85bf6..65caa5beefb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -60,7 +60,7 @@ message DatanodeCommandProto { FinalizeCommand = 3; KeyUpdateCommand = 4; RegisterCommand = 5; - UpgradeCommand = 6; + UnusedUpgradeCommand = 6; NullDatanodeCommand = 7; } @@ -74,7 +74,6 @@ message DatanodeCommandProto { optional FinalizeCommandProto finalizeCmd = 5; optional KeyUpdateCommandProto keyUpdateCmd = 6; optional RegisterCommandProto registerCmd = 7; - optional UpgradeCommandProto upgradeCmd = 8; } /** @@ -131,20 +130,6 @@ message RegisterCommandProto { // void } -/** - * Generic distributed upgrade Command - */ -message UpgradeCommandProto { - enum Action { - UNKNOWN = 0; // Unknown action - REPORT_STATUS = 100; // Report upgrade status - START_UPGRADE = 101; // Start upgrade - } - required Action action = 1; // Upgrade action - required uint32 version = 2; // Version of the upgrade - required uint32 upgradeStatus = 3; // % completed in range 0 & 100 -} - /** * registration - Information of the datanode registering with the namenode */ @@ -302,20 +287,6 @@ message ErrorReportRequestProto { message ErrorReportResponseProto { } -/** - * cmd - Upgrade command sent from datanode to namenode - */ -message ProcessUpgradeRequestProto { - optional UpgradeCommandProto cmd = 1; -} - -/** - * cmd - Upgrade command sent from namenode to datanode - */ -message ProcessUpgradeResponseProto { - optional UpgradeCommandProto cmd = 1; -} - /** * blocks - list of blocks that are reported as corrupt */ @@ -388,12 +359,6 @@ service DatanodeProtocolService { */ rpc versionRequest(VersionRequestProto) returns(VersionResponseProto); - /** - * Generic way to send commands from datanode to namenode during - * distributed upgrade process. - */ - rpc processUpgrade(ProcessUpgradeRequestProto) returns(ProcessUpgradeResponseProto); - /** * Report corrupt blocks at the specified location */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 10767549b8c..70a04752ffc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -210,15 +210,6 @@ message DirectoryListingProto { required uint32 remainingEntries = 2; } -/** - * Status of current cluster upgrade from one version to another - */ -message UpgradeStatusReportProto { - required uint32 version = 1;; - required uint32 upgradeStatus = 2; // % completed in range 0 & 100 - required bool finalized = 3; -} - /** * Common node information shared by all the nodes in the cluster */