Reverting r1213512 because it committed changes that were not part of the patch.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1213980 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0ea8570be5
commit
3001a172c8
|
@ -32,8 +32,6 @@ Trunk (unreleased changes)
|
||||||
HDFS-2647. Used protobuf based RPC for InterDatanodeProtocol,
|
HDFS-2647. Used protobuf based RPC for InterDatanodeProtocol,
|
||||||
ClientDatanodeProtocol, JournalProtocol, NamenodeProtocol. (suresh)
|
ClientDatanodeProtocol, JournalProtocol, NamenodeProtocol. (suresh)
|
||||||
|
|
||||||
HDFS-2663. Handle protobuf optional parameters correctly. (suresh)
|
|
||||||
|
|
||||||
HDFS-2666. Fix TestBackupNode failure. (suresh)
|
HDFS-2666. Fix TestBackupNode failure. (suresh)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
|
@ -24,9 +24,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.Options.Rename;
|
import org.apache.hadoop.fs.Options.Rename;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
|
||||||
|
@ -55,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdd
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto.Builder;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||||
|
@ -128,7 +124,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.UpgradeStatusReportProto;
|
||||||
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
|
||||||
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
|
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
@ -213,16 +208,11 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
RpcController controller, GetBlockLocationsRequestProto req)
|
RpcController controller, GetBlockLocationsRequestProto req)
|
||||||
throws ServiceException {
|
throws ServiceException {
|
||||||
try {
|
try {
|
||||||
LocatedBlocks b = server.getBlockLocations(req.getSrc(), req.getOffset(),
|
return GetBlockLocationsResponseProto
|
||||||
req.getLength());
|
.newBuilder()
|
||||||
Builder builder = GetBlockLocationsResponseProto
|
.setLocations(
|
||||||
.newBuilder();
|
PBHelper.convert(server.getBlockLocations(req.getSrc(),
|
||||||
if (b != null) {
|
req.getOffset(), req.getLength()))).build();
|
||||||
builder.setLocations(
|
|
||||||
PBHelper.convert(server.getBlockLocations(req.getSrc(),
|
|
||||||
req.getOffset(), req.getLength()))).build();
|
|
||||||
}
|
|
||||||
return builder.build();
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
|
@ -335,7 +325,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
return AddBlockResponseProto.newBuilder().setBlock(
|
return AddBlockResponseProto.newBuilder().setBlock(
|
||||||
PBHelper.convert(
|
PBHelper.convert(
|
||||||
server.addBlock(req.getSrc(), req.getClientName(),
|
server.addBlock(req.getSrc(), req.getClientName(),
|
||||||
req.hasPrevious() ? PBHelper.convert(req.getPrevious()) : null,
|
PBHelper.convert(req.getPrevious()),
|
||||||
PBHelper.convert(
|
PBHelper.convert(
|
||||||
(DatanodeInfoProto[]) req.getExcludeNodesList().toArray()))))
|
(DatanodeInfoProto[]) req.getExcludeNodesList().toArray()))))
|
||||||
.build();
|
.build();
|
||||||
|
@ -604,14 +594,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
RpcController controller, DistributedUpgradeProgressRequestProto req)
|
RpcController controller, DistributedUpgradeProgressRequestProto req)
|
||||||
throws ServiceException {
|
throws ServiceException {
|
||||||
try {
|
try {
|
||||||
UpgradeStatusReport result = server.distributedUpgradeProgress(PBHelper
|
UpgradeStatusReportProto result = PBHelper.convert(server
|
||||||
.convert(req.getAction()));
|
.distributedUpgradeProgress(PBHelper.convert(req.getAction())));
|
||||||
DistributedUpgradeProgressResponseProto.Builder builder =
|
return DistributedUpgradeProgressResponseProto.newBuilder()
|
||||||
DistributedUpgradeProgressResponseProto.newBuilder();
|
.setReport(result).build();
|
||||||
if (result != null) {
|
|
||||||
builder.setReport(PBHelper.convert(result));
|
|
||||||
}
|
|
||||||
return builder.build();
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
|
@ -650,13 +636,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
public GetFileInfoResponseProto getFileInfo(RpcController controller,
|
public GetFileInfoResponseProto getFileInfo(RpcController controller,
|
||||||
GetFileInfoRequestProto req) throws ServiceException {
|
GetFileInfoRequestProto req) throws ServiceException {
|
||||||
try {
|
try {
|
||||||
HdfsFileStatus res = server.getFileInfo(req.getSrc());
|
HdfsFileStatusProto result =
|
||||||
GetFileInfoResponseProto.Builder builder =
|
PBHelper.convert(server.getFileInfo(req.getSrc()));
|
||||||
GetFileInfoResponseProto.newBuilder();
|
return GetFileInfoResponseProto.newBuilder().setFs(result).build();
|
||||||
if (res != null) {
|
|
||||||
builder.setFs(PBHelper.convert(res));
|
|
||||||
}
|
|
||||||
return builder.build();
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,17 +83,14 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DistributedUpgradeProgressResponseProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FinalizeUpgradeRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.FsyncRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetAdditionalDatanodeRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetBlockLocationsResponseProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
|
||||||
|
@ -208,10 +205,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
.setLength(length)
|
.setLength(length)
|
||||||
.build();
|
.build();
|
||||||
try {
|
try {
|
||||||
GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
|
return PBHelper.convert(rpcProxy.getBlockLocations(null, req).getLocations());
|
||||||
req);
|
|
||||||
return resp.hasLocations() ?
|
|
||||||
PBHelper.convert(resp.getLocations()) : null;
|
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
}
|
}
|
||||||
|
@ -335,15 +329,12 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
throws AccessControlException, FileNotFoundException,
|
throws AccessControlException, FileNotFoundException,
|
||||||
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
|
NotReplicatedYetException, SafeModeException, UnresolvedLinkException,
|
||||||
IOException {
|
IOException {
|
||||||
AddBlockRequestProto.Builder builder = AddBlockRequestProto.newBuilder();
|
AddBlockRequestProto req = AddBlockRequestProto.newBuilder().setSrc(src)
|
||||||
builder.setSrc(src)
|
.setClientName(clientName).setPrevious(PBHelper.convert(previous))
|
||||||
.setClientName(clientName)
|
.addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)))
|
||||||
.addAllExcludeNodes(Arrays.asList(PBHelper.convert(excludeNodes)));
|
.build();
|
||||||
if (previous != null) {
|
|
||||||
builder.setPrevious(PBHelper.convert(previous));
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
return PBHelper.convert(rpcProxy.addBlock(null, builder.build()).getBlock());
|
return PBHelper.convert(rpcProxy.addBlock(null, req).getBlock());
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
}
|
}
|
||||||
|
@ -624,9 +615,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
DistributedUpgradeProgressRequestProto.newBuilder().
|
DistributedUpgradeProgressRequestProto.newBuilder().
|
||||||
setAction(PBHelper.convert(action)).build();
|
setAction(PBHelper.convert(action)).build();
|
||||||
try {
|
try {
|
||||||
DistributedUpgradeProgressResponseProto res = rpcProxy
|
return PBHelper.convert(
|
||||||
.distributedUpgradeProgress(null, req);
|
rpcProxy.distributedUpgradeProgress(null, req).getReport());
|
||||||
return res.hasReport() ? PBHelper.convert(res.getReport()) : null;
|
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
}
|
}
|
||||||
|
@ -663,8 +653,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder()
|
GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder()
|
||||||
.setSrc(src).build();
|
.setSrc(src).build();
|
||||||
try {
|
try {
|
||||||
GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
|
return PBHelper.convert(rpcProxy.getFileInfo(null, req).getFs());
|
||||||
return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
|
|
||||||
} catch (ServiceException e) {
|
} catch (ServiceException e) {
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -204,7 +204,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements DatanodeProtocol,
|
||||||
} catch (ServiceException se) {
|
} catch (ServiceException se) {
|
||||||
throw ProtobufHelper.getRemoteException(se);
|
throw ProtobufHelper.getRemoteException(se);
|
||||||
}
|
}
|
||||||
return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
|
return PBHelper.convert(resp.getCmd());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -262,7 +262,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements DatanodeProtocol,
|
||||||
} catch (ServiceException se) {
|
} catch (ServiceException se) {
|
||||||
throw ProtobufHelper.getRemoteException(se);
|
throw ProtobufHelper.getRemoteException(se);
|
||||||
}
|
}
|
||||||
return resp.hasCmd() ? PBHelper.convert(resp.getCmd()) : null;
|
return PBHelper.convert(resp.getCmd());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportR
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.CommitBlockSynchronizationResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ErrorReportResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.HeartbeatRequestProto;
|
||||||
|
@ -109,9 +108,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
||||||
.newBuilder();
|
.newBuilder();
|
||||||
if (cmds != null) {
|
if (cmds != null) {
|
||||||
for (int i = 0; i < cmds.length; i++) {
|
for (int i = 0; i < cmds.length; i++) {
|
||||||
if (cmds[i] != null) {
|
builder.addCmds(i, PBHelper.convert(cmds[i]));
|
||||||
builder.addCmds(PBHelper.convert(cmds[i]));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
|
@ -132,12 +129,8 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
BlockReportResponseProto.Builder builder =
|
return BlockReportResponseProto.newBuilder().setCmd(PBHelper.convert(cmd))
|
||||||
BlockReportResponseProto.newBuilder();
|
.build();
|
||||||
if (cmd != null) {
|
|
||||||
builder.setCmd(PBHelper.convert(cmd));
|
|
||||||
}
|
|
||||||
return builder.build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -187,20 +180,14 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
||||||
@Override
|
@Override
|
||||||
public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
|
public ProcessUpgradeResponseProto processUpgrade(RpcController controller,
|
||||||
ProcessUpgradeRequestProto request) throws ServiceException {
|
ProcessUpgradeRequestProto request) throws ServiceException {
|
||||||
UpgradeCommand ret;
|
UpgradeCommand cmd;
|
||||||
try {
|
try {
|
||||||
UpgradeCommand cmd = request.hasCmd() ? PBHelper
|
cmd = impl.processUpgradeCommand(PBHelper.convert(request.getCmd()));
|
||||||
.convert(request.getCmd()) : null;
|
|
||||||
ret = impl.processUpgradeCommand(cmd);
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
}
|
}
|
||||||
ProcessUpgradeResponseProto.Builder builder =
|
return ProcessUpgradeResponseProto.newBuilder()
|
||||||
ProcessUpgradeResponseProto.newBuilder();
|
.setCmd(PBHelper.convert(cmd)).build();
|
||||||
if (ret != null) {
|
|
||||||
builder.setCmd(PBHelper.convert(ret));
|
|
||||||
}
|
|
||||||
return builder.build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -127,10 +127,6 @@ import com.google.protobuf.ByteString;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utilities for converting protobuf classes to and from implementation classes.
|
* Utilities for converting protobuf classes to and from implementation classes.
|
||||||
*
|
|
||||||
* Note that when converting from an internal type to protobuf type, the
|
|
||||||
* converter never return null for protobuf type. The check for internal type
|
|
||||||
* being null must be done before calling the convert() method.
|
|
||||||
*/
|
*/
|
||||||
public class PBHelper {
|
public class PBHelper {
|
||||||
private static final RegisterCommandProto REG_CMD_PROTO =
|
private static final RegisterCommandProto REG_CMD_PROTO =
|
||||||
|
@ -374,7 +370,6 @@ public class PBHelper {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
|
public static NamenodeCommand convert(NamenodeCommandProto cmd) {
|
||||||
if (cmd == null) return null;
|
|
||||||
switch (cmd.getType()) {
|
switch (cmd.getType()) {
|
||||||
case CheckPointCommand:
|
case CheckPointCommand:
|
||||||
CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
|
CheckpointCommandProto chkPt = cmd.getCheckpointCmd();
|
||||||
|
@ -431,8 +426,7 @@ public class PBHelper {
|
||||||
if (di == null) return null;
|
if (di == null) return null;
|
||||||
return new DatanodeInfo(
|
return new DatanodeInfo(
|
||||||
PBHelper.convert(di.getId()),
|
PBHelper.convert(di.getId()),
|
||||||
di.hasLocation() ? di.getLocation() : null ,
|
di.getLocation(), di.getHostName(),
|
||||||
di.hasHostName() ? di.getHostName() : null,
|
|
||||||
di.getCapacity(), di.getDfsUsed(), di.getRemaining(),
|
di.getCapacity(), di.getDfsUsed(), di.getRemaining(),
|
||||||
di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() ,
|
di.getBlockPoolUsed() , di.getLastUpdate() , di.getXceiverCount() ,
|
||||||
PBHelper.convert(di.getAdminState()));
|
PBHelper.convert(di.getAdminState()));
|
||||||
|
@ -440,16 +434,10 @@ public class PBHelper {
|
||||||
|
|
||||||
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
|
static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
|
||||||
if (di == null) return null;
|
if (di == null) return null;
|
||||||
DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
|
return DatanodeInfoProto.newBuilder().
|
||||||
if (di.getHostName() != null) {
|
|
||||||
builder.setHostName(di.getHostName());
|
|
||||||
}
|
|
||||||
if (di.getNetworkLocation() != null) {
|
|
||||||
builder.setLocation(di.getNetworkLocation());
|
|
||||||
}
|
|
||||||
|
|
||||||
return builder.
|
|
||||||
setId(PBHelper.convert((DatanodeID) di)).
|
setId(PBHelper.convert((DatanodeID) di)).
|
||||||
|
setLocation(di.getNetworkLocation()).
|
||||||
|
setHostName(di.getHostName()).
|
||||||
setCapacity(di.getCapacity()).
|
setCapacity(di.getCapacity()).
|
||||||
setDfsUsed(di.getDfsUsed()).
|
setDfsUsed(di.getDfsUsed()).
|
||||||
setRemaining(di.getRemaining()).
|
setRemaining(di.getRemaining()).
|
||||||
|
@ -789,14 +777,9 @@ public class PBHelper {
|
||||||
|
|
||||||
public static ReceivedDeletedBlockInfoProto convert(
|
public static ReceivedDeletedBlockInfoProto convert(
|
||||||
ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
|
ReceivedDeletedBlockInfo receivedDeletedBlockInfo) {
|
||||||
ReceivedDeletedBlockInfoProto.Builder builder =
|
return ReceivedDeletedBlockInfoProto.newBuilder()
|
||||||
ReceivedDeletedBlockInfoProto.newBuilder();
|
.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
|
||||||
|
.setDeleteHint(receivedDeletedBlockInfo.getDelHints()).build();
|
||||||
if (receivedDeletedBlockInfo.getDelHints() != null) {
|
|
||||||
builder.setDeleteHint(receivedDeletedBlockInfo.getDelHints());
|
|
||||||
}
|
|
||||||
return builder.setBlock(PBHelper.convert(receivedDeletedBlockInfo.getBlock()))
|
|
||||||
.build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static UpgradeCommandProto convert(UpgradeCommand comm) {
|
public static UpgradeCommandProto convert(UpgradeCommand comm) {
|
||||||
|
@ -820,7 +803,7 @@ public class PBHelper {
|
||||||
public static ReceivedDeletedBlockInfo convert(
|
public static ReceivedDeletedBlockInfo convert(
|
||||||
ReceivedDeletedBlockInfoProto proto) {
|
ReceivedDeletedBlockInfoProto proto) {
|
||||||
return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()),
|
return new ReceivedDeletedBlockInfo(PBHelper.convert(proto.getBlock()),
|
||||||
proto.hasDeleteHint() ? proto.getDeleteHint() : null);
|
proto.getDeleteHint());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static NamespaceInfoProto convert(NamespaceInfo info) {
|
public static NamespaceInfoProto convert(NamespaceInfo info) {
|
||||||
|
@ -880,10 +863,13 @@ public class PBHelper {
|
||||||
|
|
||||||
// LocatedBlocks
|
// LocatedBlocks
|
||||||
public static LocatedBlocks convert(LocatedBlocksProto lb) {
|
public static LocatedBlocks convert(LocatedBlocksProto lb) {
|
||||||
|
if (lb == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
return new LocatedBlocks(
|
return new LocatedBlocks(
|
||||||
lb.getFileLength(), lb.getUnderConstruction(),
|
lb.getFileLength(), lb.getUnderConstruction(),
|
||||||
PBHelper.convertLocatedBlock(lb.getBlocksList()),
|
PBHelper.convertLocatedBlock(lb.getBlocksList()),
|
||||||
lb.hasLastBlock() ? PBHelper.convert(lb.getLastBlock()) : null,
|
PBHelper.convert(lb.getLastBlock()),
|
||||||
lb.getIsLastBlockComplete());
|
lb.getIsLastBlockComplete());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -891,15 +877,11 @@ public class PBHelper {
|
||||||
if (lb == null) {
|
if (lb == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
LocatedBlocksProto.Builder builder =
|
return LocatedBlocksProto.newBuilder().
|
||||||
LocatedBlocksProto.newBuilder();
|
setFileLength(lb.getFileLength()).
|
||||||
if (lb.getLastLocatedBlock() != null) {
|
setUnderConstruction(lb.isUnderConstruction()).
|
||||||
builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
|
addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks())).
|
||||||
}
|
setLastBlock(PBHelper.convert(lb.getLastLocatedBlock())).setIsLastBlockComplete(lb.isLastBlockComplete()).build();
|
||||||
return builder.setFileLength(lb.getFileLength())
|
|
||||||
.setUnderConstruction(lb.isUnderConstruction())
|
|
||||||
.addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
|
|
||||||
.setIsLastBlockComplete(lb.isLastBlockComplete()).build();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static FsServerDefaults convert(FsServerDefaultsProto fs) {
|
public static FsServerDefaults convert(FsServerDefaultsProto fs) {
|
||||||
|
@ -1000,16 +982,11 @@ public class PBHelper {
|
||||||
setPermission(PBHelper.convert(fs.getPermission())).
|
setPermission(PBHelper.convert(fs.getPermission())).
|
||||||
setOwner(fs.getOwner()).
|
setOwner(fs.getOwner()).
|
||||||
setGroup(fs.getGroup()).
|
setGroup(fs.getGroup()).
|
||||||
|
setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes())).
|
||||||
setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
|
setPath(ByteString.copyFrom(fs.getLocalNameInBytes()));
|
||||||
|
LocatedBlocks locations = null;
|
||||||
if (fs.getSymlink() != null) {
|
|
||||||
builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
|
|
||||||
}
|
|
||||||
if (fs instanceof HdfsLocatedFileStatus) {
|
if (fs instanceof HdfsLocatedFileStatus) {
|
||||||
LocatedBlocks locations = ((HdfsLocatedFileStatus)fs).getBlockLocations();
|
builder.setLocations(PBHelper.convert(locations));
|
||||||
if (locations != null) {
|
|
||||||
builder.setLocations(PBHelper.convert(locations));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ message GetBlockLocationsRequestProto {
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetBlockLocationsResponseProto {
|
message GetBlockLocationsResponseProto {
|
||||||
optional LocatedBlocksProto locations = 1;
|
required LocatedBlocksProto locations = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetServerDefaultsRequestProto { // No parameters
|
message GetServerDefaultsRequestProto { // No parameters
|
||||||
|
@ -115,7 +115,7 @@ message AbandonBlockResponseProto { // void response
|
||||||
message AddBlockRequestProto {
|
message AddBlockRequestProto {
|
||||||
required string src = 1;
|
required string src = 1;
|
||||||
required string clientName = 2;
|
required string clientName = 2;
|
||||||
optional ExtendedBlockProto previous = 3;
|
required ExtendedBlockProto previous = 3;
|
||||||
repeated DatanodeInfoProto excludeNodes = 4;
|
repeated DatanodeInfoProto excludeNodes = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,7 +306,7 @@ message DistributedUpgradeProgressRequestProto {
|
||||||
required UpgradeActionProto action = 1;
|
required UpgradeActionProto action = 1;
|
||||||
}
|
}
|
||||||
message DistributedUpgradeProgressResponseProto {
|
message DistributedUpgradeProgressResponseProto {
|
||||||
optional UpgradeStatusReportProto report = 1;
|
required UpgradeStatusReportProto report = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ListCorruptFileBlocksRequestProto {
|
message ListCorruptFileBlocksRequestProto {
|
||||||
|
@ -330,7 +330,7 @@ message GetFileInfoRequestProto {
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetFileInfoResponseProto {
|
message GetFileInfoResponseProto {
|
||||||
optional HdfsFileStatusProto fs = 1;
|
required HdfsFileStatusProto fs = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
message GetFileLinkInfoRequestProto {
|
message GetFileLinkInfoRequestProto {
|
||||||
|
|
|
@ -170,7 +170,7 @@ message HeartbeatRequestProto {
|
||||||
* cmds - Commands from namenode to datanode.
|
* cmds - Commands from namenode to datanode.
|
||||||
*/
|
*/
|
||||||
message HeartbeatResponseProto {
|
message HeartbeatResponseProto {
|
||||||
repeated DatanodeCommandProto cmds = 1; // Returned commands can be null
|
repeated DatanodeCommandProto cmds = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -30,8 +30,7 @@ message ExtendedBlockProto {
|
||||||
required string poolId = 1; // Block pool id - gloablly unique across clusters
|
required string poolId = 1; // Block pool id - gloablly unique across clusters
|
||||||
required uint64 blockId = 2; // the local id within a pool
|
required uint64 blockId = 2; // the local id within a pool
|
||||||
required uint64 generationStamp = 3;
|
required uint64 generationStamp = 3;
|
||||||
optional uint64 numBytes = 4 [default = 0]; // len does not belong in ebid
|
optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons
|
||||||
// here for historical reasons
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -66,12 +65,12 @@ message DatanodeInfosProto {
|
||||||
*/
|
*/
|
||||||
message DatanodeInfoProto {
|
message DatanodeInfoProto {
|
||||||
required DatanodeIDProto id = 1;
|
required DatanodeIDProto id = 1;
|
||||||
optional uint64 capacity = 2 [default = 0];
|
optional uint64 capacity = 2;
|
||||||
optional uint64 dfsUsed = 3 [default = 0];
|
optional uint64 dfsUsed = 3;
|
||||||
optional uint64 remaining = 4 [default = 0];
|
optional uint64 remaining = 4;
|
||||||
optional uint64 blockPoolUsed = 5 [default = 0];
|
optional uint64 blockPoolUsed = 5;
|
||||||
optional uint64 lastUpdate = 6 [default = 0];
|
optional uint64 lastUpdate = 6;
|
||||||
optional uint32 xceiverCount = 7 [default = 0];
|
optional uint32 xceiverCount = 7;
|
||||||
optional string location = 8;
|
optional string location = 8;
|
||||||
optional string hostName = 9;
|
optional string hostName = 9;
|
||||||
enum AdminState {
|
enum AdminState {
|
||||||
|
@ -80,7 +79,7 @@ message DatanodeInfoProto {
|
||||||
DECOMMISSIONED = 2;
|
DECOMMISSIONED = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
optional AdminState adminState = 10 [default = NORMAL];
|
optional AdminState adminState = 10;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -163,8 +162,8 @@ message HdfsFileStatusProto {
|
||||||
optional bytes symlink = 9; // if symlink, target encoded java UTF8
|
optional bytes symlink = 9; // if symlink, target encoded java UTF8
|
||||||
|
|
||||||
// Optional fields for file
|
// Optional fields for file
|
||||||
optional uint32 block_replication = 10 [default = 0]; // only 16bits used
|
optional uint32 block_replication = 10; // Actually a short - only 16bits used
|
||||||
optional uint64 blocksize = 11 [default = 0];
|
optional uint64 blocksize = 11;
|
||||||
optional LocatedBlocksProto locations = 12; // suppled only if asked by client
|
optional LocatedBlocksProto locations = 12; // suppled only if asked by client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,7 +218,7 @@ message NamenodeRegistrationProto {
|
||||||
CHECKPOINT = 3;
|
CHECKPOINT = 3;
|
||||||
}
|
}
|
||||||
required StorageInfoProto storageInfo = 3; // Node information
|
required StorageInfoProto storageInfo = 3; // Node information
|
||||||
optional NamenodeRoleProto role = 4 [default = NAMENODE]; // Namenode role
|
optional NamenodeRoleProto role = 4; // Namenode role
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -265,7 +264,7 @@ message CheckpointCommandProto {
|
||||||
message BlockProto {
|
message BlockProto {
|
||||||
required uint64 blockId = 1;
|
required uint64 blockId = 1;
|
||||||
required uint64 genStamp = 2;
|
required uint64 genStamp = 2;
|
||||||
optional uint64 numBytes = 3 [default = 0];
|
optional uint64 numBytes = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -314,7 +313,7 @@ message NamespaceInfoProto {
|
||||||
message BlockKeyProto {
|
message BlockKeyProto {
|
||||||
required uint32 keyId = 1; // Key identifier
|
required uint32 keyId = 1; // Key identifier
|
||||||
required uint64 expiryDate = 2; // Expiry time in milliseconds
|
required uint64 expiryDate = 2; // Expiry time in milliseconds
|
||||||
optional bytes keyBytes = 3; // Key secret
|
required bytes keyBytes = 3; // Key secret
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -507,11 +507,6 @@ public class MiniDFSCluster {
|
||||||
this.waitSafeMode = waitSafeMode;
|
this.waitSafeMode = waitSafeMode;
|
||||||
|
|
||||||
// use alternate RPC engine if spec'd
|
// use alternate RPC engine if spec'd
|
||||||
/*
|
|
||||||
Turned off - see HDFS-2647 and HDFS-2660 for related comments.
|
|
||||||
This test can be turned on when Avro RPC is enabled using mechanism
|
|
||||||
similar to protobuf.
|
|
||||||
|
|
||||||
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
|
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
|
||||||
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
|
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
|
||||||
|
|
||||||
|
@ -535,7 +530,6 @@ public class MiniDFSCluster {
|
||||||
conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION,
|
conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION,
|
||||||
false);
|
false);
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
|
|
||||||
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
|
int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
|
||||||
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
|
||||||
|
|
|
@ -28,16 +28,9 @@ public class TestDfsOverAvroRpc extends TestLocalDFS {
|
||||||
|
|
||||||
@Test(timeout=20000)
|
@Test(timeout=20000)
|
||||||
public void testWorkingDirectory() throws IOException {
|
public void testWorkingDirectory() throws IOException {
|
||||||
/*
|
|
||||||
Test turned off - see HDFS-2647 and HDFS-2660 for related comments.
|
|
||||||
This test can be turned on when Avro RPC is enabled using mechanism
|
|
||||||
similar to protobuf.
|
|
||||||
*/
|
|
||||||
/*
|
|
||||||
System.setProperty("hdfs.rpc.engine",
|
System.setProperty("hdfs.rpc.engine",
|
||||||
"org.apache.hadoop.ipc.AvroRpcEngine");
|
"org.apache.hadoop.ipc.AvroRpcEngine");
|
||||||
super.testWorkingDirectory();
|
super.testWorkingDirectory();
|
||||||
*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue