diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ba995a4ea31..c89a8dee748 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -32,6 +32,8 @@ Trunk (unreleased changes) HDFS-2647. Used protobuf based RPC for InterDatanodeProtocol, ClientDatanodeProtocol, JournalProtocol, NamenodeProtocol. (suresh) + HDFS-2663. Handle protobuf optional parameters correctly. (suresh) + IMPROVEMENTS HADOOP-7524 Change RPC to allow multiple protocols including multuple diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 10527f5ac10..1204a76c4e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -24,6 +24,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; @@ -52,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdd import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; @@ -124,6 +128,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto; import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable; +import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.RPC; @@ -208,11 +213,16 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements RpcController controller, GetBlockLocationsRequestProto req) throws ServiceException { try { - return GetBlockLocationsResponseProto - .newBuilder() - .setLocations( - PBHelper.convert(server.getBlockLocations(req.getSrc(), - req.getOffset(), req.getLength()))).build(); + LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(), + req.getLength()); + Builder builder = GetBlockLocationsResponseProto + .newBuilder(); + if (b != null) { + builder.setLocations( + PBHelper.convert(server.getBlockLocations(req.getSrc(), + req.getOffset(), req.getLength()))).build(); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } @@ -325,7 +335,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return AddBlockResponseProto.newBuilder().setBlock( PBHelper.convert( server.addBlock(req.getSrc(), req.getClientName(), - PBHelper.convert(req.getPrevious()), + req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null, PBHelper.convert( (DatanodeInfoProto[]) req.getExcludeNodesList().toArray())))) .build(); @@ -594,10 +604,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements RpcController controller, DistributedUpgradeProgressRequestProto req) throws ServiceException { try { - UpgradeStatusReportProto result = PBHelper.convert(server - .distributedUpgradeProgress(PBHelper.convert(req.getAction()))); - return DistributedUpgradeProgressResponseProto.newBuilder() - .setReport(result).build(); + UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper + .convert(req.getAction())); + DistributedUpgradeProgressResponseProto.Builder builder = + DistributedUpgradeProgressResponseProto.newBuilder(); + if (result != null) { + builder.setReport(PBHelper.convert(result)); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } @@ -636,9 +650,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements public GetFileInfoResponseProto getFileInfo(RpcController controller, GetFileInfoRequestProto req) throws ServiceException { try { - HdfsFileStatusProto result = - PBHelper.convert(server.getFileInfo(req.getSrc())); - return GetFileInfoResponseProto.newBuilder().setFs(result).build(); + HdfsFileStatus res = server.getFileInfo(req.getSrc()); + GetFileInfoResponseProto.Builder builder = + GetFileInfoResponseProto.newBuilder(); + if (res != null) { + builder.setFs(PBHelper.convert(res)); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 83aca3987b8..75fbc7bc8e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -83,14 +83,17 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto; @@ -205,7 +208,10 @@ public class ClientNamenodeProtocolTranslatorPB implements .setLength(length) .build(); try { - return PBHelper.convert(rpcProxy.getBlockLocations(null, req).getLocations()); + GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null, + req); + return resp.hasLocations() ? + PBHelper.convert(resp.getLocations()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -329,12 +335,15 @@ public class ClientNamenodeProtocolTranslatorPB implements throws AccessControlException, FileNotFoundException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException, IOException { - AddBlockRequestProto req = AddBlockRequestProto.newBuilder().setSrc(src) - .setClientName(clientName).setPrevious(PBHelper.convert(previous)) - .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))) - .build(); + AddBlockRequestProto.Builder builder = AddBlockRequestProto.newBuilder(); + builder.setSrc(src) + .setClientName(clientName) + .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))); + if (previous != null) { + builder.setPrevious(PBHelper.convert(previous)); + } try { - return PBHelper.convert(rpcProxy.addBlock(null, req).getBlock()); + return PBHelper.convert(rpcProxy.addBlock(null, builder.build()).getBlock()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -615,8 +624,9 @@ public class ClientNamenodeProtocolTranslatorPB implements DistributedUpgradeProgressRequestProto.newBuilder(). setAction(PBHelper.convert(action)).build(); try { - return PBHelper.convert( - rpcProxy.distributedUpgradeProgress(null, req).getReport()); + DistributedUpgradeProgressResponseProto res = rpcProxy + .distributedUpgradeProgress(null, req); + return res.hasReport() ? PBHelper.convert(res.getReport()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -653,7 +663,8 @@ public class ClientNamenodeProtocolTranslatorPB implements GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder() .setSrc(src).build(); try { - return PBHelper.convert(rpcProxy.getFileInfo(null, req).getFs()); + GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req); + return res.hasFs() ? PBHelper.convert(res.getFs()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index e05a884cd6f..66db4c39cc2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -204,7 +204,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements DatanodeProtocol, } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } - return PBHelper.convert(resp.getCmd()); + return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; } @Override @@ -262,7 +262,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements DatanodeProtocol, } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } - return PBHelper.convert(resp.getCmd()); + return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 25f08aee5d5..2dbf9150e92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportR import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; @@ -108,7 +109,9 @@ public class DatanodeProtocolServerSideTranslatorPB implements .newBuilder(); if (cmds != null) { for (int i = 0; i < cmds.length; i++) { - builder.addCmds(i, PBHelper.convert(cmds[i])); + if (cmds[i] != null) { + builder.addCmds(PBHelper.convert(cmds[i])); + } } } return builder.build(); @@ -129,8 +132,12 @@ public class DatanodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd)) - .build(); + BlockReportResponseProto.Builder builder = + BlockReportResponseProto.newBuilder(); + if (cmd != null) { + builder.setCmd(PBHelper.convert(cmd)); + } + return builder.build(); } @Override @@ -180,14 +187,20 @@ public class DatanodeProtocolServerSideTranslatorPB implements @Override public ProcessUpgradeResponseProto processUpgrade(RpcController controller, ProcessUpgradeRequestProto request) throws ServiceException { - UpgradeCommand cmd; + UpgradeCommand ret; try { - cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd())); + UpgradeCommand cmd = request.hasCmd() ? PBHelper + .convert(request.getCmd()) : null; + ret = impl.processUpgradeCommand(cmd); } catch (IOException e) { throw new ServiceException(e); } - return ProcessUpgradeResponseProto.newBuilder() - .setCmd(PBHelper.convert(cmd)).build(); + ProcessUpgradeResponseProto.Builder builder = + ProcessUpgradeResponseProto.newBuilder(); + if (ret != null) { + builder.setCmd(PBHelper.convert(ret)); + } + return builder.build(); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index ea0df18d735..34e03328411 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -127,6 +127,10 @@ import com.google.protobuf.ByteString; /** * Utilities for converting protobuf classes to and from implementation classes. + * + * Note that when converting from an internal type to protobuf type, the + * converter never return null for protobuf type. The check for internal type + * being null must be done before calling the convert() method. */ public class PBHelper { private static final RegisterCommandProto REG_CMD_PROTO = @@ -367,6 +371,7 @@ public class PBHelper { } public static NamenodeCommand convert(NamenodeCommandProto cmd) { + if (cmd == null) return null; switch (cmd.getType()) { case CheckPointCommand: CheckpointCommandProto chkPt = cmd.getCheckpointCmd(); @@ -423,7 +428,8 @@ public class PBHelper { if (di == null) return null; return new DatanodeInfo( PBHelper.convert(di.getId()), - di.getLocation(), di.getHostName(), + di.hasLocation() ? di.getLocation() : null , + di.hasHostName() ? di.getHostName() : null, di.getCapacity(), di.getDfsUsed(), di.getRemaining(), di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , PBHelper.convert(di.getAdminState())); @@ -431,10 +437,16 @@ public class PBHelper { static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { if (di == null) return null; - return DatanodeInfoProto.newBuilder(). + DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); + if (di.getHostName() != null) { + builder.setHostName(di.getHostName()); + } + if (di.getNetworkLocation() != null) { + builder.setLocation(di.getNetworkLocation()); + } + + return builder. setId(PBHelper.convert((DatanodeID) di)). - setLocation(di.getNetworkLocation()). - setHostName(di.getHostName()). setCapacity(di.getCapacity()). setDfsUsed(di.getDfsUsed()). setRemaining(di.getRemaining()). @@ -774,9 +786,14 @@ public class PBHelper { public static ReceivedDeletedBlockInfoProto convert( ReceivedDeletedBlockInfo receivedDeletedBlockInfo) { - return ReceivedDeletedBlockInfoProto.newBuilder() - .setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) - .setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build(); + ReceivedDeletedBlockInfoProto.Builder builder = + ReceivedDeletedBlockInfoProto.newBuilder(); + + if (receivedDeletedBlockInfo.getDelHints() != null) { + builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints()); + } + return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) + .build(); } public static UpgradeCommandProto convert(UpgradeCommand comm) { @@ -800,7 +817,7 @@ public class PBHelper { public static ReceivedDeletedBlockInfo convert( ReceivedDeletedBlockInfoProto proto) { return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()), - proto.getDeleteHint()); + proto.hasDeleteHint() ? proto.getDeleteHint() : null); } public static NamespaceInfoProto convert(NamespaceInfo info) { @@ -860,13 +877,10 @@ public class PBHelper { // LocatedBlocks public static LocatedBlocks convert(LocatedBlocksProto lb) { - if (lb == null) { - return null; - } return new LocatedBlocks( lb.getFileLength(), lb.getUnderConstruction(), PBHelper.convertLocatedBlock(lb.getBlocksList()), - PBHelper.convert(lb.getLastBlock()), + lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null, lb.getIsLastBlockComplete()); } @@ -874,11 +888,15 @@ public class PBHelper { if (lb == null) { return null; } - return LocatedBlocksProto.newBuilder(). - setFileLength(lb.getFileLength()). - setUnderConstruction(lb.isUnderConstruction()). - addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())). - setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build(); + LocatedBlocksProto.Builder builder = + LocatedBlocksProto.newBuilder(); + if (lb.getLastLocatedBlock() != null) { + builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())); + } + return builder.setFileLength(lb.getFileLength()) + .setUnderConstruction(lb.isUnderConstruction()) + .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())) + .setIsLastBlockComplete(lb.isLastBlockComplete()).build(); } public static FsServerDefaults convert(FsServerDefaultsProto fs) { @@ -979,11 +997,16 @@ public class PBHelper { setPermission(PBHelper.convert(fs.getPermission())). setOwner(fs.getOwner()). setGroup(fs.getGroup()). - setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())). setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); - LocatedBlocks locations = null; + + if (fs.getSymlink() != null) { + builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())); + } if (fs instanceof HdfsLocatedFileStatus) { - builder.setLocations(PBHelper.convert(locations)); + LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations(); + if (locations != null) { + builder.setLocations(PBHelper.convert(locations)); + } } return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 17b4d1d90ea..7a52460ef08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -39,7 +39,7 @@ message GetBlockLocationsRequestProto { } message GetBlockLocationsResponseProto { - required LocatedBlocksProto locations = 1; + optional LocatedBlocksProto locations = 1; } message GetServerDefaultsRequestProto { // No parameters @@ -115,7 +115,7 @@ message AbandonBlockResponseProto { // void response message AddBlockRequestProto { required string src = 1; required string clientName = 2; - required ExtendedBlockProto previous = 3; + optional ExtendedBlockProto previous = 3; repeated DatanodeInfoProto excludeNodes = 4; } @@ -306,7 +306,7 @@ message DistributedUpgradeProgressRequestProto { required UpgradeActionProto action = 1; } message DistributedUpgradeProgressResponseProto { - required UpgradeStatusReportProto report = 1; + optional UpgradeStatusReportProto report = 1; } message ListCorruptFileBlocksRequestProto { @@ -330,7 +330,7 @@ message GetFileInfoRequestProto { } message GetFileInfoResponseProto { - required HdfsFileStatusProto fs = 1; + optional HdfsFileStatusProto fs = 1; } message GetFileLinkInfoRequestProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index b98a2c2e978..71f609e40af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -170,7 +170,7 @@ message HeartbeatRequestProto { * cmds - Commands from namenode to datanode. */ message HeartbeatResponseProto { - repeated DatanodeCommandProto cmds = 1; + repeated DatanodeCommandProto cmds = 1; // Returned commands can be null } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 9fbf2b969ae..cc45593b29f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -30,7 +30,8 @@ message ExtendedBlockProto { required string poolId = 1; // Block pool id - gloablly unique across clusters required uint64 blockId = 2; // the local id within a pool required uint64 generationStamp = 3; - optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons + optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid + // here for historical reasons } /** @@ -65,12 +66,12 @@ message DatanodeInfosProto { */ message DatanodeInfoProto { required DatanodeIDProto id = 1; - optional uint64 capacity = 2; - optional uint64 dfsUsed = 3; - optional uint64 remaining = 4; - optional uint64 blockPoolUsed = 5; - optional uint64 lastUpdate = 6; - optional uint32 xceiverCount = 7; + optional uint64 capacity = 2 [default = 0]; + optional uint64 dfsUsed = 3 [default = 0]; + optional uint64 remaining = 4 [default = 0]; + optional uint64 blockPoolUsed = 5 [default = 0]; + optional uint64 lastUpdate = 6 [default = 0]; + optional uint32 xceiverCount = 7 [default = 0]; optional string location = 8; optional string hostName = 9; enum AdminState { @@ -79,7 +80,7 @@ message DatanodeInfoProto { DECOMMISSIONED = 2; } - optional AdminState adminState = 10; + optional AdminState adminState = 10 [default = NORMAL]; } /** @@ -162,8 +163,8 @@ message HdfsFileStatusProto { optional bytes symlink = 9; // if symlink, target encoded java UTF8 // Optional fields for file - optional uint32 block_replication = 10; // Actually a short - only 16bits used - optional uint64 blocksize = 11; + optional uint32 block_replication = 10 [default = 0]; // only 16bits used + optional uint64 blocksize = 11 [default = 0]; optional LocatedBlocksProto locations = 12; // suppled only if asked by client } @@ -218,7 +219,7 @@ message NamenodeRegistrationProto { CHECKPOINT = 3; } required StorageInfoProto storageInfo = 3; // Node information - optional NamenodeRoleProto role = 4; // Namenode role + optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role } /** @@ -264,7 +265,7 @@ message CheckpointCommandProto { message BlockProto { required uint64 blockId = 1; required uint64 genStamp = 2; - optional uint64 numBytes = 3; + optional uint64 numBytes = 3 [default = 0]; } /** @@ -313,7 +314,7 @@ message NamespaceInfoProto { message BlockKeyProto { required uint32 keyId = 1; // Key identifier required uint64 expiryDate = 2; // Expiry time in milliseconds - required bytes keyBytes = 3; // Key secret + optional bytes keyBytes = 3; // Key secret } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index ee3e46998ba..e50da8d748a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -507,6 +507,11 @@ public class MiniDFSCluster { this.waitSafeMode = waitSafeMode; // use alternate RPC engine if spec'd + /* + Turned off - see HDFS-2647 and HDFS-2660 for related comments. + This test can be turned on when Avro RPC is enabled using mechanism + similar to protobuf. + String rpcEngineName = System.getProperty("hdfs.rpc.engine"); if (rpcEngineName != null && !"".equals(rpcEngineName)) { @@ -530,6 +535,7 @@ public class MiniDFSCluster { conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, false); } + */ int replication = conf.getInt(DFS_REPLICATION_KEY, 3); conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java index 350d6ac52a1..7f4ad2f023e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java @@ -28,9 +28,16 @@ public class TestDfsOverAvroRpc extends TestLocalDFS { @Test(timeout=20000) public void testWorkingDirectory() throws IOException { + /* + Test turned off - see HDFS-2647 and HDFS-2660 for related comments. + This test can be turned on when Avro RPC is enabled using mechanism + similar to protobuf. + */ + /* System.setProperty("hdfs.rpc.engine", "org.apache.hadoop.ipc.AvroRpcEngine"); super.testWorkingDirectory(); + */ } }