HDFS-2663. Optional protobuf parameters are not handled correctly. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1213985 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2011-12-13 23:31:44 +00:00
parent 6a609cb471
commit 3954a2fb1c
9 changed files with 138 additions and 69 deletions

View File

@ -37,6 +37,9 @@ Trunk (unreleased changes)
HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly HDFS-234. Integration with BookKeeper logging system. (Ivan Kelly
via jitendra) via jitendra)
HDFS-2663. Optional protobuf parameters are not handled correctly.
(suresh)
IMPROVEMENTS IMPROVEMENTS
HADOOP-7524 Change RPC to allow multiple protocols including multuple HADOOP-7524 Change RPC to allow multiple protocols including multuple

View File

@ -24,6 +24,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
@ -52,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdd
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
@ -124,6 +128,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable; import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature; import org.apache.hadoop.ipc.ProtocolSignature;
import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RPC;
@ -208,11 +213,16 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
RpcController controller, GetBlockLocationsRequestProto req) RpcController controller, GetBlockLocationsRequestProto req)
throws ServiceException { throws ServiceException {
try { try {
return GetBlockLocationsResponseProto LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(),
.newBuilder() req.getLength());
.setLocations( Builder builder = GetBlockLocationsResponseProto
PBHelper.convert(server.getBlockLocations(req.getSrc(), .newBuilder();
req.getOffset(), req.getLength()))).build(); if (b != null) {
builder.setLocations(
PBHelper.convert(server.getBlockLocations(req.getSrc(),
req.getOffset(), req.getLength()))).build();
}
return builder.build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
@ -325,7 +335,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
return AddBlockResponseProto.newBuilder().setBlock( return AddBlockResponseProto.newBuilder().setBlock(
PBHelper.convert( PBHelper.convert(
server.addBlock(req.getSrc(), req.getClientName(), server.addBlock(req.getSrc(), req.getClientName(),
PBHelper.convert(req.getPrevious()), req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
PBHelper.convert( PBHelper.convert(
(DatanodeInfoProto[]) req.getExcludeNodesList().toArray())))) (DatanodeInfoProto[]) req.getExcludeNodesList().toArray()))))
.build(); .build();
@ -594,10 +604,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
RpcController controller, DistributedUpgradeProgressRequestProto req) RpcController controller, DistributedUpgradeProgressRequestProto req)
throws ServiceException { throws ServiceException {
try { try {
UpgradeStatusReportProto result = PBHelper.convert(server UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper
.distributedUpgradeProgress(PBHelper.convert(req.getAction()))); .convert(req.getAction()));
return DistributedUpgradeProgressResponseProto.newBuilder() DistributedUpgradeProgressResponseProto.Builder builder =
.setReport(result).build(); DistributedUpgradeProgressResponseProto.newBuilder();
if (result != null) {
builder.setReport(PBHelper.convert(result));
}
return builder.build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
@ -636,9 +650,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public GetFileInfoResponseProto getFileInfo(RpcController controller, public GetFileInfoResponseProto getFileInfo(RpcController controller,
GetFileInfoRequestProto req) throws ServiceException { GetFileInfoRequestProto req) throws ServiceException {
try { try {
HdfsFileStatusProto result = HdfsFileStatus res = server.getFileInfo(req.getSrc());
PBHelper.convert(server.getFileInfo(req.getSrc())); GetFileInfoResponseProto.Builder builder =
return GetFileInfoResponseProto.newBuilder().setFs(result).build(); GetFileInfoResponseProto.newBuilder();
if (res != null) {
builder.setFs(PBHelper.convert(res));
}
return builder.build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }

View File

@ -83,14 +83,17 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
@ -205,7 +208,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
.setLength(length) .setLength(length)
.build(); .build();
try { try {
return PBHelper.convert(rpcProxy.getBlockLocations(null, req).getLocations()); GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
req);
return resp.hasLocations() ?
PBHelper.convert(resp.getLocations()) : null;
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
@ -329,12 +335,15 @@ public class ClientNamenodeProtocolTranslatorPB implements
throws AccessControlException, FileNotFoundException, throws AccessControlException, FileNotFoundException,
NotReplicatedYetException, SafeModeException, UnresolvedLinkException, NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
IOException { IOException {
AddBlockRequestProto req = AddBlockRequestProto.newBuilder().setSrc(src) AddBlockRequestProto.Builder builder = AddBlockRequestProto.newBuilder();
.setClientName(clientName).setPrevious(PBHelper.convert(previous)) builder.setSrc(src)
.addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes))) .setClientName(clientName)
.build(); .addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)));
if (previous != null) {
builder.setPrevious(PBHelper.convert(previous));
}
try { try {
return PBHelper.convert(rpcProxy.addBlock(null, req).getBlock()); return PBHelper.convert(rpcProxy.addBlock(null, builder.build()).getBlock());
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
@ -615,8 +624,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
DistributedUpgradeProgressRequestProto.newBuilder(). DistributedUpgradeProgressRequestProto.newBuilder().
setAction(PBHelper.convert(action)).build(); setAction(PBHelper.convert(action)).build();
try { try {
return PBHelper.convert( DistributedUpgradeProgressResponseProto res = rpcProxy
rpcProxy.distributedUpgradeProgress(null, req).getReport()); .distributedUpgradeProgress(null, req);
return res.hasReport() ? PBHelper.convert(res.getReport()) : null;
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
@ -653,7 +663,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder() GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder()
.setSrc(src).build(); .setSrc(src).build();
try { try {
return PBHelper.convert(rpcProxy.getFileInfo(null, req).getFs()); GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }

View File

@ -204,7 +204,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements DatanodeProtocol,
} catch (ServiceException se) { } catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se); throw ProtobufHelper.getRemoteException(se);
} }
return PBHelper.convert(resp.getCmd()); return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
} }
@Override @Override
@ -262,7 +262,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements DatanodeProtocol,
} catch (ServiceException se) { } catch (ServiceException se) {
throw ProtobufHelper.getRemoteException(se); throw ProtobufHelper.getRemoteException(se);
} }
return PBHelper.convert(resp.getCmd()); return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
} }
@Override @Override

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportR
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
@ -108,7 +109,9 @@ public class DatanodeProtocolServerSideTranslatorPB implements
.newBuilder(); .newBuilder();
if (cmds != null) { if (cmds != null) {
for (int i = 0; i < cmds.length; i++) { for (int i = 0; i < cmds.length; i++) {
builder.addCmds(i, PBHelper.convert(cmds[i])); if (cmds[i] != null) {
builder.addCmds(PBHelper.convert(cmds[i]));
}
} }
} }
return builder.build(); return builder.build();
@ -129,8 +132,12 @@ public class DatanodeProtocolServerSideTranslatorPB implements
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd)) BlockReportResponseProto.Builder builder =
.build(); BlockReportResponseProto.newBuilder();
if (cmd != null) {
builder.setCmd(PBHelper.convert(cmd));
}
return builder.build();
} }
@Override @Override
@ -180,14 +187,20 @@ public class DatanodeProtocolServerSideTranslatorPB implements
@Override @Override
public ProcessUpgradeResponseProto processUpgrade(RpcController controller, public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
ProcessUpgradeRequestProto request) throws ServiceException { ProcessUpgradeRequestProto request) throws ServiceException {
UpgradeCommand cmd; UpgradeCommand ret;
try { try {
cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd())); UpgradeCommand cmd = request.hasCmd() ? PBHelper
.convert(request.getCmd()) : null;
ret = impl.processUpgradeCommand(cmd);
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
return ProcessUpgradeResponseProto.newBuilder() ProcessUpgradeResponseProto.Builder builder =
.setCmd(PBHelper.convert(cmd)).build(); ProcessUpgradeResponseProto.newBuilder();
if (ret != null) {
builder.setCmd(PBHelper.convert(ret));
}
return builder.build();
} }
@Override @Override

View File

@ -127,6 +127,10 @@ import com.google.protobuf.ByteString;
/** /**
* Utilities for converting protobuf classes to and from implementation classes. * Utilities for converting protobuf classes to and from implementation classes.
*
* Note that when converting from an internal type to protobuf type, the
* converter never return null for protobuf type. The check for internal type
* being null must be done before calling the convert() method.
*/ */
public class PBHelper { public class PBHelper {
private static final RegisterCommandProto REG_CMD_PROTO = private static final RegisterCommandProto REG_CMD_PROTO =
@ -370,6 +374,7 @@ public class PBHelper {
} }
public static NamenodeCommand convert(NamenodeCommandProto cmd) { public static NamenodeCommand convert(NamenodeCommandProto cmd) {
if (cmd == null) return null;
switch (cmd.getType()) { switch (cmd.getType()) {
case CheckPointCommand: case CheckPointCommand:
CheckpointCommandProto chkPt = cmd.getCheckpointCmd(); CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
@ -426,7 +431,8 @@ public class PBHelper {
if (di == null) return null; if (di == null) return null;
return new DatanodeInfo( return new DatanodeInfo(
PBHelper.convert(di.getId()), PBHelper.convert(di.getId()),
di.getLocation(), di.getHostName(), di.hasLocation() ? di.getLocation() : null ,
di.hasHostName() ? di.getHostName() : null,
di.getCapacity(), di.getDfsUsed(), di.getRemaining(), di.getCapacity(), di.getDfsUsed(), di.getRemaining(),
di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() , di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() ,
PBHelper.convert(di.getAdminState())); PBHelper.convert(di.getAdminState()));
@ -434,10 +440,16 @@ public class PBHelper {
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
if (di == null) return null; if (di == null) return null;
return DatanodeInfoProto.newBuilder(). DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
if (di.getHostName() != null) {
builder.setHostName(di.getHostName());
}
if (di.getNetworkLocation() != null) {
builder.setLocation(di.getNetworkLocation());
}
return builder.
setId(PBHelper.convert((DatanodeID) di)). setId(PBHelper.convert((DatanodeID) di)).
setLocation(di.getNetworkLocation()).
setHostName(di.getHostName()).
setCapacity(di.getCapacity()). setCapacity(di.getCapacity()).
setDfsUsed(di.getDfsUsed()). setDfsUsed(di.getDfsUsed()).
setRemaining(di.getRemaining()). setRemaining(di.getRemaining()).
@ -777,9 +789,14 @@ public class PBHelper {
public static ReceivedDeletedBlockInfoProto convert( public static ReceivedDeletedBlockInfoProto convert(
ReceivedDeletedBlockInfo receivedDeletedBlockInfo) { ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
return ReceivedDeletedBlockInfoProto.newBuilder() ReceivedDeletedBlockInfoProto.Builder builder =
.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock())) ReceivedDeletedBlockInfoProto.newBuilder();
.setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build();
if (receivedDeletedBlockInfo.getDelHints() != null) {
builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints());
}
return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
.build();
} }
public static UpgradeCommandProto convert(UpgradeCommand comm) { public static UpgradeCommandProto convert(UpgradeCommand comm) {
@ -803,7 +820,7 @@ public class PBHelper {
public static ReceivedDeletedBlockInfo convert( public static ReceivedDeletedBlockInfo convert(
ReceivedDeletedBlockInfoProto proto) { ReceivedDeletedBlockInfoProto proto) {
return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()), return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()),
proto.getDeleteHint()); proto.hasDeleteHint() ? proto.getDeleteHint() : null);
} }
public static NamespaceInfoProto convert(NamespaceInfo info) { public static NamespaceInfoProto convert(NamespaceInfo info) {
@ -863,13 +880,10 @@ public class PBHelper {
// LocatedBlocks // LocatedBlocks
public static LocatedBlocks convert(LocatedBlocksProto lb) { public static LocatedBlocks convert(LocatedBlocksProto lb) {
if (lb == null) {
return null;
}
return new LocatedBlocks( return new LocatedBlocks(
lb.getFileLength(), lb.getUnderConstruction(), lb.getFileLength(), lb.getUnderConstruction(),
PBHelper.convertLocatedBlock(lb.getBlocksList()), PBHelper.convertLocatedBlock(lb.getBlocksList()),
PBHelper.convert(lb.getLastBlock()), lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null,
lb.getIsLastBlockComplete()); lb.getIsLastBlockComplete());
} }
@ -877,11 +891,15 @@ public class PBHelper {
if (lb == null) { if (lb == null) {
return null; return null;
} }
return LocatedBlocksProto.newBuilder(). LocatedBlocksProto.Builder builder =
setFileLength(lb.getFileLength()). LocatedBlocksProto.newBuilder();
setUnderConstruction(lb.isUnderConstruction()). if (lb.getLastLocatedBlock() != null) {
addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())). builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build(); }
return builder.setFileLength(lb.getFileLength())
.setUnderConstruction(lb.isUnderConstruction())
.addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
.setIsLastBlockComplete(lb.isLastBlockComplete()).build();
} }
public static FsServerDefaults convert(FsServerDefaultsProto fs) { public static FsServerDefaults convert(FsServerDefaultsProto fs) {
@ -982,11 +1000,16 @@ public class PBHelper {
setPermission(PBHelper.convert(fs.getPermission())). setPermission(PBHelper.convert(fs.getPermission())).
setOwner(fs.getOwner()). setOwner(fs.getOwner()).
setGroup(fs.getGroup()). setGroup(fs.getGroup()).
setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())).
setPath(ByteString.copyFrom(fs.getLocalNameInBytes())); setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
LocatedBlocks locations = null;
if (fs.getSymlink() != null) {
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
}
if (fs instanceof HdfsLocatedFileStatus) { if (fs instanceof HdfsLocatedFileStatus) {
builder.setLocations(PBHelper.convert(locations)); LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations();
if (locations != null) {
builder.setLocations(PBHelper.convert(locations));
}
} }
return builder.build(); return builder.build();
} }

View File

@ -39,7 +39,7 @@ message GetBlockLocationsRequestProto {
} }
message GetBlockLocationsResponseProto { message GetBlockLocationsResponseProto {
required LocatedBlocksProto locations = 1; optional LocatedBlocksProto locations = 1;
} }
message GetServerDefaultsRequestProto { // No parameters message GetServerDefaultsRequestProto { // No parameters
@ -115,7 +115,7 @@ message AbandonBlockResponseProto { // void response
message AddBlockRequestProto { message AddBlockRequestProto {
required string src = 1; required string src = 1;
required string clientName = 2; required string clientName = 2;
required ExtendedBlockProto previous = 3; optional ExtendedBlockProto previous = 3;
repeated DatanodeInfoProto excludeNodes = 4; repeated DatanodeInfoProto excludeNodes = 4;
} }
@ -306,7 +306,7 @@ message DistributedUpgradeProgressRequestProto {
required UpgradeActionProto action = 1; required UpgradeActionProto action = 1;
} }
message DistributedUpgradeProgressResponseProto { message DistributedUpgradeProgressResponseProto {
required UpgradeStatusReportProto report = 1; optional UpgradeStatusReportProto report = 1;
} }
message ListCorruptFileBlocksRequestProto { message ListCorruptFileBlocksRequestProto {
@ -330,7 +330,7 @@ message GetFileInfoRequestProto {
} }
message GetFileInfoResponseProto { message GetFileInfoResponseProto {
required HdfsFileStatusProto fs = 1; optional HdfsFileStatusProto fs = 1;
} }
message GetFileLinkInfoRequestProto { message GetFileLinkInfoRequestProto {

View File

@ -170,7 +170,7 @@ message HeartbeatRequestProto {
* cmds - Commands from namenode to datanode. * cmds - Commands from namenode to datanode.
*/ */
message HeartbeatResponseProto { message HeartbeatResponseProto {
repeated DatanodeCommandProto cmds = 1; repeated DatanodeCommandProto cmds = 1; // Returned commands can be null
} }
/** /**

View File

@ -30,7 +30,8 @@ message ExtendedBlockProto {
required string poolId = 1; // Block pool id - gloablly unique across clusters required string poolId = 1; // Block pool id - gloablly unique across clusters
required uint64 blockId = 2; // the local id within a pool required uint64 blockId = 2; // the local id within a pool
required uint64 generationStamp = 3; required uint64 generationStamp = 3;
optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid
// here for historical reasons
} }
/** /**
@ -65,12 +66,12 @@ message DatanodeInfosProto {
*/ */
message DatanodeInfoProto { message DatanodeInfoProto {
required DatanodeIDProto id = 1; required DatanodeIDProto id = 1;
optional uint64 capacity = 2; optional uint64 capacity = 2 [default = 0];
optional uint64 dfsUsed = 3; optional uint64 dfsUsed = 3 [default = 0];
optional uint64 remaining = 4; optional uint64 remaining = 4 [default = 0];
optional uint64 blockPoolUsed = 5; optional uint64 blockPoolUsed = 5 [default = 0];
optional uint64 lastUpdate = 6; optional uint64 lastUpdate = 6 [default = 0];
optional uint32 xceiverCount = 7; optional uint32 xceiverCount = 7 [default = 0];
optional string location = 8; optional string location = 8;
optional string hostName = 9; optional string hostName = 9;
enum AdminState { enum AdminState {
@ -79,7 +80,7 @@ message DatanodeInfoProto {
DECOMMISSIONED = 2; DECOMMISSIONED = 2;
} }
optional AdminState adminState = 10; optional AdminState adminState = 10 [default = NORMAL];
} }
/** /**
@ -162,8 +163,8 @@ message HdfsFileStatusProto {
optional bytes symlink = 9; // if symlink, target encoded java UTF8 optional bytes symlink = 9; // if symlink, target encoded java UTF8
// Optional fields for file // Optional fields for file
optional uint32 block_replication = 10; // Actually a short - only 16bits used optional uint32 block_replication = 10 [default = 0]; // only 16bits used
optional uint64 blocksize = 11; optional uint64 blocksize = 11 [default = 0];
optional LocatedBlocksProto locations = 12; // suppled only if asked by client optional LocatedBlocksProto locations = 12; // suppled only if asked by client
} }
@ -218,7 +219,7 @@ message NamenodeRegistrationProto {
CHECKPOINT = 3; CHECKPOINT = 3;
} }
required StorageInfoProto storageInfo = 3; // Node information required StorageInfoProto storageInfo = 3; // Node information
optional NamenodeRoleProto role = 4; // Namenode role optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
} }
/** /**
@ -264,7 +265,7 @@ message CheckpointCommandProto {
message BlockProto { message BlockProto {
required uint64 blockId = 1; required uint64 blockId = 1;
required uint64 genStamp = 2; required uint64 genStamp = 2;
optional uint64 numBytes = 3; optional uint64 numBytes = 3 [default = 0];
} }
/** /**
@ -313,7 +314,7 @@ message NamespaceInfoProto {
message BlockKeyProto { message BlockKeyProto {
required uint32 keyId = 1; // Key identifier required uint32 keyId = 1; // Key identifier
required uint64 expiryDate = 2; // Expiry time in milliseconds required uint64 expiryDate = 2; // Expiry time in milliseconds
required bytes keyBytes = 3; // Key secret optional bytes keyBytes = 3; // Key secret
} }
/** /**