HDFS-3832. Merging change 1376139 from trunk to branch-2. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1376145 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-08-22 17:05:38 +00:00
parent 8340bb151e
commit 8c28ebabcd
18 changed files with 6 additions and 378 deletions

View File

@ -222,6 +222,8 @@ Release 2.0.1-alpha - UNRELEASED
HDFS-2727. libhdfs should get the default block size from the server. HDFS-2727. libhdfs should get the default block size from the server.
(Colin Patrick McCabe via eli) (Colin Patrick McCabe via eli)
HDFS-3832. Remove protocol methods related to DistributedUpgrade. (suresh)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-2982. Startup performance suffers when there are many edit log HDFS-2982. Startup performance suffers when there are many edit log

View File

@ -111,7 +111,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil; import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -131,7 +130,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.DataOutputBuffer;
@ -1238,7 +1236,7 @@ public class DFSClient implements java.io.Closeable {
* @param blockSize maximum block size * @param blockSize maximum block size
* @param progress interface for reporting client progress * @param progress interface for reporting client progress
* @param buffersize underlying buffer size * @param buffersize underlying buffer size
* @param checksumOpts checksum options * @param checksumOpt checksum options
* *
* @return output stream * @return output stream
* *
@ -1935,14 +1933,6 @@ public class DFSClient implements java.io.Closeable {
namenode.finalizeUpgrade(); namenode.finalizeUpgrade();
} }
/**
* @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
*/
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException {
return namenode.distributedUpgradeProgress(action);
}
/** /**
*/ */
@Deprecated @Deprecated

View File

@ -55,13 +55,11 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
@ -698,11 +696,6 @@ public class DistributedFileSystem extends FileSystem {
dfs.finalizeUpgrade(); dfs.finalizeUpgrade();
} }
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action
) throws IOException {
return dfs.distributedUpgradeProgress(action);
}
/* /*
* Requests the namenode to dump data strcutures into specified * Requests the namenode to dump data strcutures into specified
* file. * file.

View File

@ -33,8 +33,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -694,17 +692,6 @@ public interface ClientProtocol {
*/ */
public void finalizeUpgrade() throws IOException; public void finalizeUpgrade() throws IOException;
/**
* <em>Method no longer used - retained only for backward compatibility</em>
*
* Report distributed upgrade progress or force current upgrade to proceed.
* @param action {@link HdfsConstants.UpgradeAction} to perform
* @return upgrade status information or null if no upgrades are in progress
* @throws IOException
*/
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException;
/** /**
* @return CorruptFileBlocks, containing a list of corrupt files (with * @return CorruptFileBlocks, containing a list of corrupt files (with
* duplicates if there is more than one corrupt block in a file) * duplicates if there is more than one corrupt block in a file)

View File

@ -60,7 +60,7 @@ public class HdfsConstants {
public static int MAX_PATH_LENGTH = 8000; public static int MAX_PATH_LENGTH = 8000;
public static int MAX_PATH_DEPTH = 1000; public static int MAX_PATH_DEPTH = 1000;
// TODO mb@media-style.com: should be conf injected? // TODO should be conf injected?
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt( public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY, DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
@ -84,16 +84,6 @@ public class HdfsConstants {
// An invalid transaction ID that will never be seen in a real namesystem. // An invalid transaction ID that will never be seen in a real namesystem.
public static final long INVALID_TXID = -12345; public static final long INVALID_TXID = -12345;
/**
* Distributed upgrade actions:
*
* 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the
* upgrade if it is stuck, no matter what the status is.
*/
public static enum UpgradeAction {
GET_STATUS, DETAILED_STATUS, FORCE_PROCEED;
}
/** /**
* URI Scheme for hdfs://namenode/ URIs. * URI Scheme for hdfs://namenode/ URIs.
*/ */

View File

@ -50,8 +50,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
@ -130,7 +128,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
@ -570,24 +567,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
} }
@Override
public DistributedUpgradeProgressResponseProto distributedUpgradeProgress(
RpcController controller, DistributedUpgradeProgressRequestProto req)
throws ServiceException {
try {
UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper
.convert(req.getAction()));
DistributedUpgradeProgressResponseProto.Builder builder =
DistributedUpgradeProgressResponseProto.newBuilder();
if (result != null) {
builder.setReport(PBHelper.convert(result));
}
return builder.build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override @Override
public ListCorruptFileBlocksResponseProto listCorruptFileBlocks( public ListCorruptFileBlocksResponseProto listCorruptFileBlocks(
RpcController controller, ListCorruptFileBlocksRequestProto req) RpcController controller, ListCorruptFileBlocksRequestProto req)

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -58,8 +57,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Concat
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
@ -102,7 +99,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -128,8 +124,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator { ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
final private ClientNamenodeProtocolPB rpcProxy; final private ClientNamenodeProtocolPB rpcProxy;
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
throws IOException {
rpcProxy = proxy; rpcProxy = proxy;
} }
@ -564,21 +559,6 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
} }
@Override
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException {
DistributedUpgradeProgressRequestProto req =
DistributedUpgradeProgressRequestProto.newBuilder().
setAction(PBHelper.convert(action)).build();
try {
DistributedUpgradeProgressResponseProto res = rpcProxy
.distributedUpgradeProgress(null, req);
return res.hasReport() ? PBHelper.convert(res.getReport()) : null;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override @Override
public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) public CorruptFileBlocks listCorruptFileBlocks(String path, String cookie)
throws IOException { throws IOException {

View File

@ -41,8 +41,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeComm
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlocksRequestProto;
@ -59,7 +57,6 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.io.retry.RetryProxy;
@ -252,20 +249,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements
} }
} }
@Override
public UpgradeCommand processUpgradeCommand(UpgradeCommand comm)
throws IOException {
ProcessUpgradeRequestProto req = ProcessUpgradeRequestProto.newBuilder()
.setCmd(PBHelper.convert(comm)).build();
ProcessUpgradeResponseProto resp;
try {
resp = rpcProxy.processUpgrade(NULL_CONTROLLER, req);
} catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se);
}
return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
}
@Override @Override
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException { public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
ReportBadBlocksRequestProto.Builder builder = ReportBadBlocksRequestProto ReportBadBlocksRequestProto.Builder builder = ReportBadBlocksRequestProto

View File

@ -33,8 +33,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportR
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ProcessUpgradeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterDatanodeResponseProto;
@ -56,7 +54,6 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import com.google.protobuf.RpcController; import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException; import com.google.protobuf.ServiceException;
@ -211,25 +208,6 @@ public class DatanodeProtocolServerSideTranslatorPB implements
.setInfo(PBHelper.convert(info)).build(); .setInfo(PBHelper.convert(info)).build();
} }
@Override
public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
ProcessUpgradeRequestProto request) throws ServiceException {
UpgradeCommand ret;
try {
UpgradeCommand cmd = request.hasCmd() ? PBHelper
.convert(request.getCmd()) : null;
ret = impl.processUpgradeCommand(cmd);
} catch (IOException e) {
throw new ServiceException(e);
}
ProcessUpgradeResponseProto.Builder builder =
ProcessUpgradeResponseProto.newBuilder();
if (ret != null) {
builder.setCmd(PBHelper.convert(ret));
}
return builder.build();
}
@Override @Override
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
ReportBadBlocksRequestProto request) throws ServiceException { ReportBadBlocksRequestProto request) throws ServiceException {

View File

@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -47,7 +46,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpgradeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
@ -61,7 +59,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHe
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.UpgradeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@ -96,7 +93,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto.NamenodeRoleProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReplicaStateProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto; import org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.BlockKey; import org.apache.hadoop.hdfs.security.token.block.BlockKey;
@ -106,7 +102,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand; import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@ -132,7 +127,6 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStat
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum;
@ -640,8 +634,6 @@ public class PBHelper {
return PBHelper.convert(proto.getKeyUpdateCmd()); return PBHelper.convert(proto.getKeyUpdateCmd());
case RegisterCommand: case RegisterCommand:
return REG_CMD; return REG_CMD;
case UpgradeCommand:
return PBHelper.convert(proto.getUpgradeCmd());
} }
return null; return null;
} }
@ -738,11 +730,6 @@ public class PBHelper {
builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd( builder.setCmdType(DatanodeCommandProto.Type.BlockCommand).setBlkCmd(
PBHelper.convert((BlockCommand) datanodeCommand)); PBHelper.convert((BlockCommand) datanodeCommand));
break; break;
case DatanodeProtocol.DNA_UC_ACTION_REPORT_STATUS:
case DatanodeProtocol.DNA_UC_ACTION_START_UPGRADE:
builder.setCmdType(DatanodeCommandProto.Type.UpgradeCommand)
.setUpgradeCmd(PBHelper.convert((UpgradeCommand) datanodeCommand));
break;
case DatanodeProtocol.DNA_UNKNOWN: //Not expected case DatanodeProtocol.DNA_UNKNOWN: //Not expected
default: default:
builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand); builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);
@ -750,19 +737,6 @@ public class PBHelper {
return builder.build(); return builder.build();
} }
public static UpgradeCommand convert(UpgradeCommandProto upgradeCmd) {
int action = UpgradeCommand.UC_ACTION_UNKNOWN;
switch (upgradeCmd.getAction()) {
case REPORT_STATUS:
action = UpgradeCommand.UC_ACTION_REPORT_STATUS;
break;
case START_UPGRADE:
action = UpgradeCommand.UC_ACTION_START_UPGRADE;
}
return new UpgradeCommand(action, upgradeCmd.getVersion(),
(short) upgradeCmd.getUpgradeStatus());
}
public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) { public static KeyUpdateCommand convert(KeyUpdateCommandProto keyUpdateCmd) {
return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys())); return new KeyUpdateCommand(PBHelper.convert(keyUpdateCmd.getKeys()));
} }
@ -852,28 +826,6 @@ public class PBHelper {
.build(); .build();
} }
public static UpgradeCommandProto convert(UpgradeCommand comm) {
UpgradeCommandProto.Builder builder = UpgradeCommandProto.newBuilder();
if (comm == null) {
return builder.setAction(UpgradeCommandProto.Action.UNKNOWN)
.setVersion(0).setUpgradeStatus(0).build();
}
builder.setVersion(comm.getVersion()).setUpgradeStatus(
comm.getCurrentStatus());
switch (comm.getAction()) {
case UpgradeCommand.UC_ACTION_REPORT_STATUS:
builder.setAction(UpgradeCommandProto.Action.REPORT_STATUS);
break;
case UpgradeCommand.UC_ACTION_START_UPGRADE:
builder.setAction(UpgradeCommandProto.Action.START_UPGRADE);
break;
default:
builder.setAction(UpgradeCommandProto.Action.UNKNOWN);
break;
}
return builder.build();
}
public static ReceivedDeletedBlockInfo convert( public static ReceivedDeletedBlockInfo convert(
ReceivedDeletedBlockInfoProto proto) { ReceivedDeletedBlockInfoProto proto) {
ReceivedDeletedBlockInfo.BlockStatus status = null; ReceivedDeletedBlockInfo.BlockStatus status = null;
@ -1238,51 +1190,6 @@ public class PBHelper {
} }
} }
public static UpgradeActionProto convert(
UpgradeAction a) {
switch (a) {
case GET_STATUS:
return UpgradeActionProto.GET_STATUS;
case DETAILED_STATUS:
return UpgradeActionProto.DETAILED_STATUS;
case FORCE_PROCEED:
return UpgradeActionProto.FORCE_PROCEED;
default:
throw new IllegalArgumentException("Unexpected UpgradeAction :" + a);
}
}
public static UpgradeAction convert(
UpgradeActionProto a) {
switch (a) {
case GET_STATUS:
return UpgradeAction.GET_STATUS;
case DETAILED_STATUS:
return UpgradeAction.DETAILED_STATUS;
case FORCE_PROCEED:
return UpgradeAction.FORCE_PROCEED;
default:
throw new IllegalArgumentException("Unexpected UpgradeAction :" + a);
}
}
public static UpgradeStatusReportProto convert(UpgradeStatusReport r) {
if (r == null)
return null;
return UpgradeStatusReportProto.newBuilder()
.setVersion(r.getVersion())
.setUpgradeStatus(r.getUpgradeStatus())
.setFinalized(r.isFinalized())
.build();
}
public static UpgradeStatusReport convert(UpgradeStatusReportProto r) {
if (r == null) return null;
return new UpgradeStatusReport(r.getVersion(),
(short) r.getUpgradeStatus(), r.getFinalized());
}
public static CorruptFileBlocks convert(CorruptFileBlocksProto c) { public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
if (c == null) if (c == null)
return null; return null;

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
@ -561,10 +560,6 @@ class BPOfferService {
dn.finalizeUpgradeForPool(bp); dn.finalizeUpgradeForPool(bp);
break; break;
case UpgradeCommand.UC_ACTION_START_UPGRADE:
// start distributed upgrade here
LOG.warn("Distibuted upgrade is no longer supported");
break;
case DatanodeProtocol.DNA_RECOVERBLOCK: case DatanodeProtocol.DNA_RECOVERBLOCK:
String who = "NameNode at " + actor.getNNSocketAddress(); String who = "NameNode at " + actor.getNNSocketAddress();
dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks()); dn.recoverBlocks(who, ((BlockRecoveryCommand)cmd).getRecoveringBlocks());

View File

@ -60,7 +60,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -89,7 +88,6 @@ import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
@ -108,7 +106,6 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtobufRpcEngine;
@ -740,13 +737,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
namesystem.finalizeUpgrade(); namesystem.finalizeUpgrade();
} }
@Override // ClientProtocol
public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
throws IOException {
throw new UnsupportedActionException(
"Deprecated method. No longer supported");
}
@Override // ClientProtocol @Override // ClientProtocol
public void metaSave(String filename) throws IOException { public void metaSave(String filename) throws IOException {
namesystem.checkOperation(OperationCategory.UNCHECKED); namesystem.checkOperation(OperationCategory.UNCHECKED);
@ -917,13 +907,6 @@ class NameNodeRpcServer implements NamenodeProtocols {
return namesystem.getNamespaceInfo(); return namesystem.getNamespaceInfo();
} }
@Override // DatanodeProtocol
public UpgradeCommand processUpgradeCommand(UpgradeCommand comm)
throws IOException {
throw new UnsupportedActionException(
"Deprecated method, no longer supported");
}
/** /**
* Verifies the given registration. * Verifies the given registration.
* *

View File

@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@ -49,7 +48,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;

View File

@ -72,8 +72,6 @@ public interface DatanodeProtocol {
final static int DNA_RECOVERBLOCK = 6; // request a block recovery final static int DNA_RECOVERBLOCK = 6; // request a block recovery
final static int DNA_ACCESSKEYUPDATE = 7; // update access key final static int DNA_ACCESSKEYUPDATE = 7; // update access key
final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth
final static int DNA_UC_ACTION_REPORT_STATUS = 100; // Report upgrade status
final static int DNA_UC_ACTION_START_UPGRADE = 101; // start upgrade
/** /**
* Register Datanode. * Register Datanode.
@ -150,18 +148,6 @@ public interface DatanodeProtocol {
public NamespaceInfo versionRequest() throws IOException; public NamespaceInfo versionRequest() throws IOException;
/**
* This is a very general way to send a command to the name-node during
* distributed upgrade process.
*
* The generosity is because the variety of upgrade commands is unpredictable.
* The reply from the name-node is also received in the form of an upgrade
* command.
*
* @return a reply in the form of an upgrade command
*/
UpgradeCommand processUpgradeCommand(UpgradeCommand comm) throws IOException;
/** /**
* same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])} * same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
* } * }

View File

@ -1,64 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* This as a generic distributed upgrade command.
*
* During the upgrade cluster components send upgrade commands to each other
* in order to obtain or share information with them.
* It is supposed that each upgrade defines specific upgrade command by
* deriving them from this class.
* The upgrade command contains version of the upgrade, which is verified
* on the receiving side and current status of the upgrade.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class UpgradeCommand extends DatanodeCommand {
public final static int UC_ACTION_UNKNOWN = DatanodeProtocol.DNA_UNKNOWN;
public final static int UC_ACTION_REPORT_STATUS =
DatanodeProtocol.DNA_UC_ACTION_REPORT_STATUS;
public final static int UC_ACTION_START_UPGRADE =
DatanodeProtocol.DNA_UC_ACTION_START_UPGRADE;
private int version;
private short upgradeStatus;
public UpgradeCommand() {
super(UC_ACTION_UNKNOWN);
this.version = 0;
this.upgradeStatus = 0;
}
public UpgradeCommand(int action, int version, short status) {
super(action);
this.version = version;
this.upgradeStatus = status;
}
public int getVersion() {
return this.version;
}
public short getCurrentStatus() {
return this.upgradeStatus;
}
}

View File

@ -296,19 +296,6 @@ message FinalizeUpgradeRequestProto { // no parameters
message FinalizeUpgradeResponseProto { // void response message FinalizeUpgradeResponseProto { // void response
} }
enum UpgradeActionProto {
GET_STATUS = 1;
DETAILED_STATUS = 2;
FORCE_PROCEED = 3;
}
message DistributedUpgradeProgressRequestProto {
required UpgradeActionProto action = 1;
}
message DistributedUpgradeProgressResponseProto {
optional UpgradeStatusReportProto report = 1;
}
message ListCorruptFileBlocksRequestProto { message ListCorruptFileBlocksRequestProto {
required string path = 1; required string path = 1;
optional string cookie = 2; optional string cookie = 2;
@ -490,8 +477,6 @@ service ClientNamenodeProtocol {
rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto); rpc refreshNodes(RefreshNodesRequestProto) returns(RefreshNodesResponseProto);
rpc finalizeUpgrade(FinalizeUpgradeRequestProto) rpc finalizeUpgrade(FinalizeUpgradeRequestProto)
returns(FinalizeUpgradeResponseProto); returns(FinalizeUpgradeResponseProto);
rpc distributedUpgradeProgress(DistributedUpgradeProgressRequestProto)
returns(DistributedUpgradeProgressResponseProto);
rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto) rpc listCorruptFileBlocks(ListCorruptFileBlocksRequestProto)
returns(ListCorruptFileBlocksResponseProto); returns(ListCorruptFileBlocksResponseProto);
rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto); rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);

View File

@ -60,7 +60,7 @@ message DatanodeCommandProto {
FinalizeCommand = 3; FinalizeCommand = 3;
KeyUpdateCommand = 4; KeyUpdateCommand = 4;
RegisterCommand = 5; RegisterCommand = 5;
UpgradeCommand = 6; UnusedUpgradeCommand = 6;
NullDatanodeCommand = 7; NullDatanodeCommand = 7;
} }
@ -74,7 +74,6 @@ message DatanodeCommandProto {
optional FinalizeCommandProto finalizeCmd = 5; optional FinalizeCommandProto finalizeCmd = 5;
optional KeyUpdateCommandProto keyUpdateCmd = 6; optional KeyUpdateCommandProto keyUpdateCmd = 6;
optional RegisterCommandProto registerCmd = 7; optional RegisterCommandProto registerCmd = 7;
optional UpgradeCommandProto upgradeCmd = 8;
} }
/** /**
@ -131,20 +130,6 @@ message RegisterCommandProto {
// void // void
} }
/**
* Generic distributed upgrade Command
*/
message UpgradeCommandProto {
enum Action {
UNKNOWN = 0; // Unknown action
REPORT_STATUS = 100; // Report upgrade status
START_UPGRADE = 101; // Start upgrade
}
required Action action = 1; // Upgrade action
required uint32 version = 2; // Version of the upgrade
required uint32 upgradeStatus = 3; // % completed in range 0 & 100
}
/** /**
* registration - Information of the datanode registering with the namenode * registration - Information of the datanode registering with the namenode
*/ */
@ -302,20 +287,6 @@ message ErrorReportRequestProto {
message ErrorReportResponseProto { message ErrorReportResponseProto {
} }
/**
* cmd - Upgrade command sent from datanode to namenode
*/
message ProcessUpgradeRequestProto {
optional UpgradeCommandProto cmd = 1;
}
/**
* cmd - Upgrade command sent from namenode to datanode
*/
message ProcessUpgradeResponseProto {
optional UpgradeCommandProto cmd = 1;
}
/** /**
* blocks - list of blocks that are reported as corrupt * blocks - list of blocks that are reported as corrupt
*/ */
@ -388,12 +359,6 @@ service DatanodeProtocolService {
*/ */
rpc versionRequest(VersionRequestProto) returns(VersionResponseProto); rpc versionRequest(VersionRequestProto) returns(VersionResponseProto);
/**
* Generic way to send commands from datanode to namenode during
* distributed upgrade process.
*/
rpc processUpgrade(ProcessUpgradeRequestProto) returns(ProcessUpgradeResponseProto);
/** /**
* Report corrupt blocks at the specified location * Report corrupt blocks at the specified location
*/ */

View File

@ -210,15 +210,6 @@ message DirectoryListingProto {
required uint32 remainingEntries = 2; required uint32 remainingEntries = 2;
} }
/**
* Status of current cluster upgrade from one version to another
*/
message UpgradeStatusReportProto {
required uint32 version = 1;;
required uint32 upgradeStatus = 2; // % completed in range 0 & 100
required bool finalized = 3;
}
/** /**
* Common node information shared by all the nodes in the cluster * Common node information shared by all the nodes in the cluster
*/ */