From 490bb5ebd6c6d6f9c08fcad167f976687fc3aa42 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Sat, 22 Aug 2015 13:30:19 -0700 Subject: [PATCH] HDFS-8934. Move ShortCircuitShm to hdfs-client. Contributed by Mingliang Liu. --- .../apache/hadoop/hdfs/ExtendedBlockId.java | 0 .../apache/hadoop/hdfs/net/DomainPeer.java | 0 .../java/org/apache/hadoop/hdfs/net/Peer.java | 0 .../datatransfer/BlockConstructionStage.java | 0 .../datatransfer/DataTransferProtoUtil.java | 12 +- .../datatransfer/DataTransferProtocol.java | 7 +- .../hadoop/hdfs/protocol/datatransfer/Op.java | 0 .../hdfs/protocol/datatransfer/Sender.java | 24 +- .../hdfs/protocolPB/PBHelperClient.java | 254 ++++++++++++++++++ .../block/InvalidBlockTokenException.java | 0 .../hdfs/server/datanode/CachingStrategy.java | 0 .../hdfs/shortcircuit/DfsClientShm.java | 0 .../shortcircuit/DfsClientShmManager.java | 28 +- .../hdfs/shortcircuit/ShortCircuitShm.java | 7 +- .../hdfs/util/ExactSizeInputStream.java | 0 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/BlockReaderFactory.java | 4 +- .../org/apache/hadoop/hdfs/DFSClient.java | 10 +- .../org/apache/hadoop/hdfs/DataStreamer.java | 6 +- .../apache/hadoop/hdfs/RemoteBlockReader.java | 4 +- .../hadoop/hdfs/RemoteBlockReader2.java | 4 +- .../protocol/datatransfer/PipelineAck.java | 2 +- .../hdfs/protocol/datatransfer/Receiver.java | 7 +- .../sasl/DataTransferSaslUtil.java | 2 +- ...atanodeProtocolServerSideTranslatorPB.java | 2 +- .../ClientDatanodeProtocolTranslatorPB.java | 6 +- ...amenodeProtocolServerSideTranslatorPB.java | 6 +- .../ClientNamenodeProtocolTranslatorPB.java | 28 +- ...atanodeProtocolClientSideTranslatorPB.java | 4 +- .../InterDatanodeProtocolTranslatorPB.java | 2 +- .../NamenodeProtocolTranslatorPB.java | 2 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 228 ++-------------- .../hdfs/server/balancer/Dispatcher.java | 2 +- .../hadoop/hdfs/server/datanode/DataNode.java | 4 +- .../hdfs/server/datanode/DataXceiver.java | 14 +- .../server/namenode/FSImageFormatPBINode.java | 5 +- .../hdfs/shortcircuit/ShortCircuitCache.java | 4 +- .../hadoop/hdfs/protocolPB/TestPBHelper.java | 20 +- 38 files changed, 388 insertions(+), 312 deletions(-) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/net/Peer.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java (93%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java (97%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java (92%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java (100%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java (96%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java (99%) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java (100%) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/Peer.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/Peer.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlockConstructionStage.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java similarity index 93% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java index 284281a762b..28097ab7e0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtoUtil.java @@ -21,8 +21,6 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.hdfs.net.Peer; -import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; @@ -32,7 +30,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationH import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ChecksumTypeProto; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.security.token.Token; @@ -60,7 +58,7 @@ public abstract class DataTransferProtoUtil { } public static ChecksumProto toProto(DataChecksum checksum) { - ChecksumTypeProto type = PBHelper.convert(checksum.getChecksumType()); + ChecksumTypeProto type = PBHelperClient.convert(checksum.getChecksumType()); // ChecksumType#valueOf never returns null return ChecksumProto.newBuilder() .setBytesPerChecksum(checksum.getBytesPerChecksum()) @@ -72,7 +70,7 @@ public abstract class DataTransferProtoUtil { if (proto == null) return null; int bytesPerChecksum = proto.getBytesPerChecksum(); - DataChecksum.Type type = PBHelper.convert(proto.getType()); + DataChecksum.Type type = PBHelperClient.convert(proto.getType()); return DataChecksum.newDataChecksum(type, bytesPerChecksum); } @@ -89,8 +87,8 @@ public abstract class DataTransferProtoUtil { static BaseHeaderProto buildBaseHeader(ExtendedBlock blk, Token blockToken) { BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder() - .setBlock(PBHelper.convert(blk)) - .setToken(PBHelper.convert(blockToken)); + .setBlock(PBHelperClient.convert(blk)) + .setToken(PBHelperClient.convert(blockToken)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java index 48e931d741a..1f7e378d870 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs.protocol.datatransfer; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; @@ -32,13 +30,16 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Transfer data to/from datanode using a streaming protocol. */ @InterfaceAudience.Private @InterfaceStability.Evolving public interface DataTransferProtocol { - public static final Log LOG = LogFactory.getLog(DataTransferProtocol.class); + public static final Logger LOG = LoggerFactory.getLogger(DataTransferProtocol.class); /** Version for data transfers between clients and datanodes * This should change when serialization of DatanodeInfo, not just diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Op.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java similarity index 92% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java index df69125882b..2d11dc26c3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockP import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; @@ -140,9 +140,9 @@ public class Sender implements DataTransferProtocol { OpWriteBlockProto.Builder proto = OpWriteBlockProto.newBuilder() .setHeader(header) - .setStorageType(PBHelper.convertStorageType(storageType)) - .addAllTargets(PBHelper.convert(targets, 1)) - .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes, 1)) + .setStorageType(PBHelperClient.convertStorageType(storageType)) + .addAllTargets(PBHelperClient.convert(targets, 1)) + .addAllTargetStorageTypes(PBHelperClient.convertStorageTypes(targetStorageTypes, 1)) .setStage(toProto(stage)) .setPipelineSize(pipelineSize) .setMinBytesRcvd(minBytesRcvd) @@ -152,10 +152,10 @@ public class Sender implements DataTransferProtocol { .setCachingStrategy(getCachingStrategy(cachingStrategy)) .setAllowLazyPersist(allowLazyPersist) .setPinning(pinning) - .addAllTargetPinnings(PBHelper.convert(targetPinnings, 1)); + .addAllTargetPinnings(PBHelperClient.convert(targetPinnings, 1)); if (source != null) { - proto.setSource(PBHelper.convertDatanodeInfo(source)); + proto.setSource(PBHelperClient.convertDatanodeInfo(source)); } send(out, Op.WRITE_BLOCK, proto.build()); @@ -171,8 +171,8 @@ public class Sender implements DataTransferProtocol { OpTransferBlockProto proto = OpTransferBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader( blk, clientName, blockToken)) - .addAllTargets(PBHelper.convert(targets)) - .addAllTargetStorageTypes(PBHelper.convertStorageTypes(targetStorageTypes)) + .addAllTargets(PBHelperClient.convert(targets)) + .addAllTargetStorageTypes(PBHelperClient.convertStorageTypes(targetStorageTypes)) .build(); send(out, Op.TRANSFER_BLOCK, proto); @@ -188,7 +188,7 @@ public class Sender implements DataTransferProtocol { .setHeader(DataTransferProtoUtil.buildBaseHeader( blk, blockToken)).setMaxVersion(maxVersion); if (slotId != null) { - builder.setSlotId(PBHelper.convert(slotId)); + builder.setSlotId(PBHelperClient.convert(slotId)); } builder.setSupportsReceiptVerification(supportsReceiptVerification); OpRequestShortCircuitAccessProto proto = builder.build(); @@ -199,7 +199,7 @@ public class Sender implements DataTransferProtocol { public void releaseShortCircuitFds(SlotId slotId) throws IOException { ReleaseShortCircuitAccessRequestProto.Builder builder = ReleaseShortCircuitAccessRequestProto.newBuilder(). - setSlotId(PBHelper.convert(slotId)); + setSlotId(PBHelperClient.convert(slotId)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() @@ -231,9 +231,9 @@ public class Sender implements DataTransferProtocol { final DatanodeInfo source) throws IOException { OpReplaceBlockProto proto = OpReplaceBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildBaseHeader(blk, blockToken)) - .setStorageType(PBHelper.convertStorageType(storageType)) + .setStorageType(PBHelperClient.convertStorageType(storageType)) .setDelHint(delHint) - .setSource(PBHelper.convertDatanodeInfo(source)) + .setSource(PBHelperClient.convertDatanodeInfo(source)) .build(); send(out, Op.REPLACE_BLOCK, proto); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java new file mode 100644 index 00000000000..edf658ab4c0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -0,0 +1,254 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.protocolPB; + +import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import com.google.protobuf.CodedInputStream; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto; +import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; +import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId; +import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; +import org.apache.hadoop.hdfs.util.ExactSizeInputStream; +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.DataChecksum; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.List; + +/** + * Utilities for converting protobuf classes to and from implementation classes + * and other helper utilities to help in dealing with protobuf. + * + * Note that when converting from an internal type to protobuf type, the + * converter never return null for protobuf type. The check for internal type + * being null must be done before calling the convert() method. + */ +public class PBHelperClient { + private PBHelperClient() { + /** Hidden constructor */ + } + + public static ByteString getByteString(byte[] bytes) { + return ByteString.copyFrom(bytes); + } + + public static ShmId convert(ShortCircuitShmIdProto shmId) { + return new ShmId(shmId.getHi(), shmId.getLo()); + } + + public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) { + return DataChecksum.Type.valueOf(type.getNumber()); + } + + public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { + return HdfsProtos.ChecksumTypeProto.valueOf(type.id); + } + + public static ExtendedBlockProto convert(final ExtendedBlock b) { + if (b == null) return null; + return ExtendedBlockProto.newBuilder(). + setPoolId(b.getBlockPoolId()). + setBlockId(b.getBlockId()). + setNumBytes(b.getNumBytes()). + setGenerationStamp(b.getGenerationStamp()). + build(); + } + + public static TokenProto convert(Token tok) { + return TokenProto.newBuilder(). + setIdentifier(ByteString.copyFrom(tok.getIdentifier())). + setPassword(ByteString.copyFrom(tok.getPassword())). + setKind(tok.getKind().toString()). + setService(tok.getService().toString()).build(); + } + + public static ShortCircuitShmIdProto convert(ShmId shmId) { + return ShortCircuitShmIdProto.newBuilder(). + setHi(shmId.getHi()). + setLo(shmId.getLo()). + build(); + + } + + public static ShortCircuitShmSlotProto convert(SlotId slotId) { + return ShortCircuitShmSlotProto.newBuilder(). + setShmId(convert(slotId.getShmId())). + setSlotIdx(slotId.getSlotIdx()). + build(); + } + + public static DatanodeIDProto convert(DatanodeID dn) { + // For wire compatibility with older versions we transmit the StorageID + // which is the same as the DatanodeUuid. Since StorageID is a required + // field we pass the empty string if the DatanodeUuid is not yet known. + return DatanodeIDProto.newBuilder() + .setIpAddr(dn.getIpAddr()) + .setHostName(dn.getHostName()) + .setXferPort(dn.getXferPort()) + .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "") + .setInfoPort(dn.getInfoPort()) + .setInfoSecurePort(dn.getInfoSecurePort()) + .setIpcPort(dn.getIpcPort()).build(); + } + + public static DatanodeInfoProto.AdminState convert( + final DatanodeInfo.AdminStates inAs) { + switch (inAs) { + case NORMAL: return DatanodeInfoProto.AdminState.NORMAL; + case DECOMMISSION_INPROGRESS: + return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS; + case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED; + default: return DatanodeInfoProto.AdminState.NORMAL; + } + } + + public static DatanodeInfoProto convert(DatanodeInfo info) { + DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); + if (info.getNetworkLocation() != null) { + builder.setLocation(info.getNetworkLocation()); + } + builder + .setId(convert((DatanodeID) info)) + .setCapacity(info.getCapacity()) + .setDfsUsed(info.getDfsUsed()) + .setRemaining(info.getRemaining()) + .setBlockPoolUsed(info.getBlockPoolUsed()) + .setCacheCapacity(info.getCacheCapacity()) + .setCacheUsed(info.getCacheUsed()) + .setLastUpdate(info.getLastUpdate()) + .setLastUpdateMonotonic(info.getLastUpdateMonotonic()) + .setXceiverCount(info.getXceiverCount()) + .setAdminState(convert(info.getAdminState())) + .build(); + return builder.build(); + } + + public static List convert( + DatanodeInfo[] dnInfos) { + return convert(dnInfos, 0); + } + + /** + * Copy from {@code dnInfos} to a target of list of same size starting at + * {@code startIdx}. + */ + public static List convert( + DatanodeInfo[] dnInfos, int startIdx) { + if (dnInfos == null) + return null; + ArrayList protos = Lists + .newArrayListWithCapacity(dnInfos.length); + for (int i = startIdx; i < dnInfos.length; i++) { + protos.add(convert(dnInfos[i])); + } + return protos; + } + + public static List convert(boolean[] targetPinnings, int idx) { + List pinnings = new ArrayList<>(); + if (targetPinnings == null) { + pinnings.add(Boolean.FALSE); + } else { + for (; idx < targetPinnings.length; ++idx) { + pinnings.add(targetPinnings[idx]); + } + } + return pinnings; + } + + static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { + if (di == null) return null; + return convert(di); + } + + public static StorageTypeProto convertStorageType(StorageType type) { + switch(type) { + case DISK: + return StorageTypeProto.DISK; + case SSD: + return StorageTypeProto.SSD; + case ARCHIVE: + return StorageTypeProto.ARCHIVE; + case RAM_DISK: + return StorageTypeProto.RAM_DISK; + default: + throw new IllegalStateException( + "BUG: StorageType not found, type=" + type); + } + } + + public static StorageType convertStorageType(StorageTypeProto type) { + switch(type) { + case DISK: + return StorageType.DISK; + case SSD: + return StorageType.SSD; + case ARCHIVE: + return StorageType.ARCHIVE; + case RAM_DISK: + return StorageType.RAM_DISK; + default: + throw new IllegalStateException( + "BUG: StorageTypeProto not found, type=" + type); + } + } + + public static List convertStorageTypes( + StorageType[] types) { + return convertStorageTypes(types, 0); + } + + public static List convertStorageTypes( + StorageType[] types, int startIdx) { + if (types == null) { + return null; + } + final List protos = new ArrayList<>( + types.length); + for (int i = startIdx; i < types.length; ++i) { + protos.add(PBHelperClient.convertStorageType(types[i])); + } + return protos; + } + + public static InputStream vintPrefixed(final InputStream input) + throws IOException { + final int firstByte = input.read(); + if (firstByte == -1) { + throw new EOFException("Premature EOF: no length prefix available"); + } + + int size = CodedInputStream.readRawVarint32(firstByte, input); + assert size >= 0; + return new ExactSizeInputStream(input, size); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/CachingStrategy.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java similarity index 96% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index 062539a0721..f70398aecda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -30,8 +30,6 @@ import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.lang.mutable.MutableBoolean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.net.DomainPeer; @@ -39,17 +37,18 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; -import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocketWatcher; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Manages short-circuit memory segments for an HDFS client. * @@ -63,7 +62,8 @@ import com.google.common.base.Preconditions; */ @InterfaceAudience.Private public class DfsClientShmManager implements Closeable { - private static final Log LOG = LogFactory.getLog(DfsClientShmManager.class); + private static final Logger LOG = LoggerFactory.getLogger( + DfsClientShmManager.class); /** * Manages short-circuit memory segments that pertain to a given DataNode. @@ -168,7 +168,7 @@ public class DfsClientShmManager implements Closeable { new Sender(out).requestShortCircuitShm(clientName); ShortCircuitShmResponseProto resp = ShortCircuitShmResponseProto.parseFrom( - PBHelper.vintPrefixed(peer.getInputStream())); + PBHelperClient.vintPrefixed(peer.getInputStream())); String error = resp.hasError() ? resp.getError() : "(unknown)"; switch (resp.getStatus()) { case SUCCESS: @@ -185,14 +185,18 @@ public class DfsClientShmManager implements Closeable { } try { DfsClientShm shm = - new DfsClientShm(PBHelper.convert(resp.getId()), + new DfsClientShm(PBHelperClient.convert(resp.getId()), fis[0], this, peer); if (LOG.isTraceEnabled()) { LOG.trace(this + ": createNewShm: created " + shm); } return shm; } finally { - IOUtils.cleanup(LOG, fis[0]); + try { + fis[0].close(); + } catch (Throwable e) { + LOG.debug("Exception in closing " + fis[0], e); + } } case ERROR_UNSUPPORTED: // The DataNode just does not support short-circuit shared memory @@ -497,7 +501,11 @@ public class DfsClientShmManager implements Closeable { } // When closed, the domainSocketWatcher will issue callbacks that mark // all the outstanding DfsClientShm segments as stale. - IOUtils.cleanup(LOG, domainSocketWatcher); + try { + domainSocketWatcher.close(); + } catch (Throwable e) { + LOG.debug("Exception in closing " + domainSocketWatcher, e); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java similarity index 99% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java index 7b89d0a978d..78325a389b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java @@ -27,8 +27,6 @@ import java.util.Random; import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.HashCodeBuilder; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.io.nativeio.NativeIO; @@ -36,6 +34,9 @@ import org.apache.hadoop.io.nativeio.NativeIO.POSIX; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import sun.misc.Unsafe; import com.google.common.base.Preconditions; @@ -46,7 +47,7 @@ import com.google.common.primitives.Ints; * A shared memory segment used to implement short-circuit reads. */ public class ShortCircuitShm { - private static final Log LOG = LogFactory.getLog(ShortCircuitShm.class); + private static final Logger LOG = LoggerFactory.getLogger(ShortCircuitShm.class); protected static final int BYTES_PER_SLOT = 64; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java similarity index 100% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ExactSizeInputStream.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 08602d463d0..78f69fb9077 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -827,6 +827,8 @@ Release 2.8.0 - UNRELEASED HDFS-8823. Move replication factor into individual blocks. (wheat9) + HDFS-8934. Move ShortCircuitShm to hdfs-client. (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java index 8517173ded0..fec6b85ad27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java @@ -45,7 +45,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; @@ -592,7 +592,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { failureInjector.getSupportsReceiptVerification()); DataInputStream in = new DataInputStream(peer.getInputStream()); BlockOpResponseProto resp = BlockOpResponseProto.parseFrom( - PBHelper.vintPrefixed(in)); + PBHelperClient.vintPrefixed(in)); DomainSocket sock = peer.getDomainSocket(); failureInjector.injectRequestFileDescriptorsFailure(); switch (resp.getStatus()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 99dbb1981b7..47aaed64e80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -144,7 +144,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; @@ -1853,7 +1853,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, new Sender(out).blockChecksum(block, lb.getBlockToken()); final BlockOpResponseProto reply = - BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); + BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in)); String logInfo = "for block " + block + " from datanode " + datanodes[j]; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); @@ -1885,7 +1885,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, // read crc-type final DataChecksum.Type ct; if (checksumData.hasCrcType()) { - ct = PBHelper.convert(checksumData + ct = PBHelperClient.convert(checksumData .getCrcType()); } else { LOG.debug("Retrieving checksum from an earlier-version DataNode: " + @@ -2013,11 +2013,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true, CachingStrategy.newDefaultStrategy()); final BlockOpResponseProto reply = - BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); + BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in)); String logInfo = "trying to read " + lb.getBlock() + " from datanode " + dn; DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo); - return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); + return PBHelperClient.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); } finally { IOUtils.cleanup(null, pair.in, pair.out); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java index 8dd85b72375..a9753128604 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java @@ -67,7 +67,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; @@ -1245,7 +1245,7 @@ class DataStreamer extends Daemon { //ack BlockOpResponseProto response = - BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); + BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(in)); if (SUCCESS != response.getStatus()) { throw new IOException("Failed to add a datanode"); } @@ -1524,7 +1524,7 @@ class DataStreamer extends Daemon { // receive ack for connect BlockOpResponseProto resp = BlockOpResponseProto.parseFrom( - PBHelper.vintPrefixed(blockReplyStream)); + PBHelperClient.vintPrefixed(blockReplyStream)); pipelineStatus = resp.getStatus(); firstBadLink = resp.getFirstBadLink(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index d70f41904bc..05a9f2caa07 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; @@ -414,7 +414,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader { new BufferedInputStream(peer.getInputStream(), bufferSize)); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( - PBHelper.vintPrefixed(in)); + PBHelperClient.vintPrefixed(in)); RemoteBlockReader2.checkSuccess(status, peer, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index c368d6515f1..4c23d363d6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ClientMmap; @@ -417,7 +417,7 @@ public class RemoteBlockReader2 implements BlockReader { DataInputStream in = new DataInputStream(peer.getInputStream()); BlockOpResponseProto status = BlockOpResponseProto.parseFrom( - PBHelper.vintPrefixed(in)); + PBHelperClient.vintPrefixed(in)); checkSuccess(status, peer, block, file); ReadOpChecksumInfoProto checksumInfo = status.getReadOpChecksumInfo(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java index a811f39ece0..44f38c66736 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol.datatransfer; -import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; +import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed; import java.io.IOException; import java.io.InputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java index d435543f650..694f5212b99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocol.datatransfer; import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto; import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.continueTraceSpan; -import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; +import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed; import java.io.DataInputStream; import java.io.IOException; @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProt import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId; import org.apache.htrace.TraceScope; @@ -136,7 +137,7 @@ public abstract class Receiver implements DataTransferProtocol { proto.getClass().getSimpleName()); try { writeBlock(PBHelper.convert(proto.getHeader().getBaseHeader().getBlock()), - PBHelper.convertStorageType(proto.getStorageType()), + PBHelperClient.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), targets, @@ -228,7 +229,7 @@ public abstract class Receiver implements DataTransferProtocol { proto.getClass().getSimpleName()); try { replaceBlock(PBHelper.convert(proto.getHeader().getBlock()), - PBHelper.convertStorageType(proto.getStorageType()), + PBHelperClient.convertStorageType(proto.getStorageType()), PBHelper.convert(proto.getHeader().getToken()), proto.getDelHint(), PBHelper.convert(proto.getSource())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index 398d44cc094..852819f1b1a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; -import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; +import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed; import java.io.IOException; import java.io.InputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java index c62d9ba1315..3886007c309 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java @@ -128,7 +128,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements throw new ServiceException(e); } return GetBlockLocalPathInfoResponseProto.newBuilder() - .setBlock(PBHelper.convert(resp.getBlock())) + .setBlock(PBHelperClient.convert(resp.getBlock())) .setLocalPath(resp.getBlockPath()).setLocalMetaPath(resp.getMetaPath()) .build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 214606317fa..7b427fa58e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -178,7 +178,7 @@ public class ClientDatanodeProtocolTranslatorPB implements @Override public long getReplicaVisibleLength(ExtendedBlock b) throws IOException { GetReplicaVisibleLengthRequestProto req = GetReplicaVisibleLengthRequestProto - .newBuilder().setBlock(PBHelper.convert(b)).build(); + .newBuilder().setBlock(PBHelperClient.convert(b)).build(); try { return rpcProxy.getReplicaVisibleLength(NULL_CONTROLLER, req).getLength(); } catch (ServiceException e) { @@ -211,8 +211,8 @@ public class ClientDatanodeProtocolTranslatorPB implements Token token) throws IOException { GetBlockLocalPathInfoRequestProto req = GetBlockLocalPathInfoRequestProto.newBuilder() - .setBlock(PBHelper.convert(block)) - .setToken(PBHelper.convert(token)).build(); + .setBlock(PBHelperClient.convert(block)) + .setToken(PBHelperClient.convert(token)).build(); GetBlockLocalPathInfoResponseProto resp; try { resp = rpcProxy.getBlockLocalPathInfo(NULL_CONTROLLER, req); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 8e81fdc14a6..beaa903e476 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -695,7 +695,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements RpcController controller, GetDatanodeReportRequestProto req) throws ServiceException { try { - List result = PBHelper.convert(server + List result = PBHelperClient.convert(server .getDatanodeReport(PBHelper.convert(req.getType()))); return GetDatanodeReportResponseProto.newBuilder() .addAllDi(result).build(); @@ -890,7 +890,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements server.setQuota(req.getPath(), req.getNamespaceQuota(), req.getStoragespaceQuota(), req.hasStorageType() ? - PBHelper.convertStorageType(req.getStorageType()): null); + PBHelperClient.convertStorageType(req.getStorageType()): null); return VOID_SETQUOTA_RESPONSE; } catch (IOException e) { throw new ServiceException(e); @@ -990,7 +990,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements GetDelegationTokenResponseProto.Builder rspBuilder = GetDelegationTokenResponseProto.newBuilder(); if (token != null) { - rspBuilder.setToken(PBHelper.convert(token)); + rspBuilder.setToken(PBHelperClient.convert(token)); } return rspBuilder.build(); } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index d6afa6ed6a4..d30982a5209 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -390,7 +390,7 @@ public class ClientNamenodeProtocolTranslatorPB implements String holder) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException { AbandonBlockRequestProto req = AbandonBlockRequestProto.newBuilder() - .setB(PBHelper.convert(b)).setSrc(src).setHolder(holder) + .setB(PBHelperClient.convert(b)).setSrc(src).setHolder(holder) .setFileId(fileId).build(); try { rpcProxy.abandonBlock(null, req); @@ -409,9 +409,9 @@ public class ClientNamenodeProtocolTranslatorPB implements AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder() .setSrc(src).setClientName(clientName).setFileId(fileId); if (previous != null) - req.setPrevious(PBHelper.convert(previous)); - if (excludeNodes != null) - req.addAllExcludeNodes(PBHelper.convert(excludeNodes)); + req.setPrevious(PBHelperClient.convert(previous)); + if (excludeNodes != null) + req.addAllExcludeNodes(PBHelperClient.convert(excludeNodes)); if (favoredNodes != null) { req.addAllFavoredNodes(Arrays.asList(favoredNodes)); } @@ -433,10 +433,10 @@ public class ClientNamenodeProtocolTranslatorPB implements .newBuilder() .setSrc(src) .setFileId(fileId) - .setBlk(PBHelper.convert(blk)) - .addAllExistings(PBHelper.convert(existings)) + .setBlk(PBHelperClient.convert(blk)) + .addAllExistings(PBHelperClient.convert(existings)) .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs)) - .addAllExcludes(PBHelper.convert(excludes)) + .addAllExcludes(PBHelperClient.convert(excludes)) .setNumAdditionalNodes(numAdditionalNodes) .setClientName(clientName) .build(); @@ -458,7 +458,7 @@ public class ClientNamenodeProtocolTranslatorPB implements .setClientName(clientName) .setFileId(fileId); if (last != null) - req.setLast(PBHelper.convert(last)); + req.setLast(PBHelperClient.convert(last)); try { return rpcProxy.complete(null, req.build()).getResult(); } catch (ServiceException e) { @@ -819,7 +819,7 @@ public class ClientNamenodeProtocolTranslatorPB implements .setNamespaceQuota(namespaceQuota) .setStoragespaceQuota(storagespaceQuota); if (type != null) { - builder.setStorageType(PBHelper.convertStorageType(type)); + builder.setStorageType(PBHelperClient.convertStorageType(type)); } final SetQuotaRequestProto req = builder.build(); try { @@ -897,7 +897,7 @@ public class ClientNamenodeProtocolTranslatorPB implements String clientName) throws IOException { UpdateBlockForPipelineRequestProto req = UpdateBlockForPipelineRequestProto .newBuilder() - .setBlock(PBHelper.convert(block)) + .setBlock(PBHelperClient.convert(block)) .setClientName(clientName) .build(); try { @@ -913,8 +913,8 @@ public class ClientNamenodeProtocolTranslatorPB implements ExtendedBlock newBlock, DatanodeID[] newNodes, String[] storageIDs) throws IOException { UpdatePipelineRequestProto req = UpdatePipelineRequestProto.newBuilder() .setClientName(clientName) - .setOldBlock(PBHelper.convert(oldBlock)) - .setNewBlock(PBHelper.convert(newBlock)) + .setOldBlock(PBHelperClient.convert(oldBlock)) + .setNewBlock(PBHelperClient.convert(newBlock)) .addAllNewNodes(Arrays.asList(PBHelper.convert(newNodes))) .addAllStorageIDs(storageIDs == null ? null : Arrays.asList(storageIDs)) .build(); @@ -945,7 +945,7 @@ public class ClientNamenodeProtocolTranslatorPB implements public long renewDelegationToken(Token token) throws IOException { RenewDelegationTokenRequestProto req = RenewDelegationTokenRequestProto.newBuilder(). - setToken(PBHelper.convert(token)). + setToken(PBHelperClient.convert(token)). build(); try { return rpcProxy.renewDelegationToken(null, req).getNewExpiryTime(); @@ -959,7 +959,7 @@ public class ClientNamenodeProtocolTranslatorPB implements throws IOException { CancelDelegationTokenRequestProto req = CancelDelegationTokenRequestProto .newBuilder() - .setToken(PBHelper.convert(token)) + .setToken(PBHelperClient.convert(token)) .build(); try { rpcProxy.cancelDelegationToken(null, req); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 94028a2523f..0b46927af29 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -298,11 +298,11 @@ public class DatanodeProtocolClientSideTranslatorPB implements ) throws IOException { CommitBlockSynchronizationRequestProto.Builder builder = CommitBlockSynchronizationRequestProto.newBuilder() - .setBlock(PBHelper.convert(block)).setNewGenStamp(newgenerationstamp) + .setBlock(PBHelperClient.convert(block)).setNewGenStamp(newgenerationstamp) .setNewLength(newlength).setCloseFile(closeFile) .setDeleteBlock(deleteblock); for (int i = 0; i < newtargets.length; i++) { - builder.addNewTaragets(PBHelper.convert(newtargets[i])); + builder.addNewTaragets(PBHelperClient.convert(newtargets[i])); builder.addNewTargetStorages(newtargetstorages[i]); } CommitBlockSynchronizationRequestProto req = builder.build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java index fee62a4e994..17ba1967f1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java @@ -105,7 +105,7 @@ public class InterDatanodeProtocolTranslatorPB implements long recoveryId, long newBlockId, long newLength) throws IOException { UpdateReplicaUnderRecoveryRequestProto req = UpdateReplicaUnderRecoveryRequestProto.newBuilder() - .setBlock(PBHelper.convert(oldBlock)) + .setBlock(PBHelperClient.convert(oldBlock)) .setNewLength(newLength).setNewBlockId(newBlockId) .setRecoveryId(recoveryId).build(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java index 82c5c4c8170..bcb96ba4d3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java @@ -101,7 +101,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol, public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size) throws IOException { GetBlocksRequestProto req = GetBlocksRequestProto.newBuilder() - .setDatanode(PBHelper.convert((DatanodeID)datanode)).setSize(size) + .setDatanode(PBHelperClient.convert((DatanodeID)datanode)).setSize(size) .build(); try { return PBHelper.convert(rpcProxy.getBlocks(NULL_CONTROLLER, req) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 4ca5b26243d..887accfe39f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -346,7 +346,7 @@ public class PBHelper { if (types == null || types.length == 0) { return null; } - List list = convertStorageTypes(types); + List list = PBHelperClient.convertStorageTypes(types); return StorageTypesProto.newBuilder().addAllStorageTypes(list).build(); } @@ -381,20 +381,6 @@ public class PBHelper { .getInfoSecurePort() : 0, dn.getIpcPort()); } - public static DatanodeIDProto convert(DatanodeID dn) { - // For wire compatibility with older versions we transmit the StorageID - // which is the same as the DatanodeUuid. Since StorageID is a required - // field we pass the empty string if the DatanodeUuid is not yet known. - return DatanodeIDProto.newBuilder() - .setIpAddr(dn.getIpAddr()) - .setHostName(dn.getHostName()) - .setXferPort(dn.getXferPort()) - .setDatanodeUuid(dn.getDatanodeUuid() != null ? dn.getDatanodeUuid() : "") - .setInfoPort(dn.getInfoPort()) - .setInfoSecurePort(dn.getInfoSecurePort()) - .setIpcPort(dn.getIpcPort()).build(); - } - // Arrays of DatanodeId public static DatanodeIDProto[] convert(DatanodeID[] did) { if (did == null) @@ -402,7 +388,7 @@ public class PBHelper { final int len = did.length; DatanodeIDProto[] result = new DatanodeIDProto[len]; for (int i = 0; i < len; ++i) { - result[i] = convert(did[i]); + result[i] = PBHelperClient.convert(did[i]); } return result; } @@ -433,7 +419,7 @@ public class PBHelper { .setBlock(convert(blk.getBlock())) .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids())) .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())) - .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes())) + .addAllStorageTypes(PBHelperClient.convertStorageTypes(blk.getStorageTypes())) .build(); } @@ -595,16 +581,6 @@ public class PBHelper { eb.getGenerationStamp()); } - public static ExtendedBlockProto convert(final ExtendedBlock b) { - if (b == null) return null; - return ExtendedBlockProto.newBuilder(). - setPoolId(b.getBlockPoolId()). - setBlockId(b.getBlockId()). - setNumBytes(b.getNumBytes()). - setGenerationStamp(b.getGenerationStamp()). - build(); - } - public static RecoveringBlockProto convert(RecoveringBlock b) { if (b == null) { return null; @@ -625,17 +601,6 @@ public class PBHelper { new RecoveringBlock(block, locs, b.getNewGenStamp()); } - public static DatanodeInfoProto.AdminState convert( - final DatanodeInfo.AdminStates inAs) { - switch (inAs) { - case NORMAL: return DatanodeInfoProto.AdminState.NORMAL; - case DECOMMISSION_INPROGRESS: - return DatanodeInfoProto.AdminState.DECOMMISSION_INPROGRESS; - case DECOMMISSIONED: return DatanodeInfoProto.AdminState.DECOMMISSIONED; - default: return DatanodeInfoProto.AdminState.NORMAL; - } - } - static public DatanodeInfo convert(DatanodeInfoProto di) { if (di == null) return null; return new DatanodeInfo( @@ -647,12 +612,6 @@ public class PBHelper { di.getXceiverCount(), PBHelper.convert(di.getAdminState())); } - static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) { - if (di == null) return null; - return convert(di); - } - - static public DatanodeInfo[] convert(DatanodeInfoProto di[]) { if (di == null) return null; DatanodeInfo[] result = new DatanodeInfo[di.length]; @@ -662,27 +621,6 @@ public class PBHelper { return result; } - public static List convert( - DatanodeInfo[] dnInfos) { - return convert(dnInfos, 0); - } - - /** - * Copy from {@code dnInfos} to a target of list of same size starting at - * {@code startIdx}. - */ - public static List convert( - DatanodeInfo[] dnInfos, int startIdx) { - if (dnInfos == null) - return null; - ArrayList protos = Lists - .newArrayListWithCapacity(dnInfos.length); - for (int i = startIdx; i < dnInfos.length; i++) { - protos.add(convert(dnInfos[i])); - } - return protos; - } - public static DatanodeInfo[] convert(List list) { DatanodeInfo[] info = new DatanodeInfo[list.size()]; for (int i = 0; i < info.length; i++) { @@ -690,32 +628,11 @@ public class PBHelper { } return info; } - - public static DatanodeInfoProto convert(DatanodeInfo info) { - DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder(); - if (info.getNetworkLocation() != null) { - builder.setLocation(info.getNetworkLocation()); - } - builder - .setId(PBHelper.convert((DatanodeID)info)) - .setCapacity(info.getCapacity()) - .setDfsUsed(info.getDfsUsed()) - .setRemaining(info.getRemaining()) - .setBlockPoolUsed(info.getBlockPoolUsed()) - .setCacheCapacity(info.getCacheCapacity()) - .setCacheUsed(info.getCacheUsed()) - .setLastUpdate(info.getLastUpdate()) - .setLastUpdateMonotonic(info.getLastUpdateMonotonic()) - .setXceiverCount(info.getXceiverCount()) - .setAdminState(PBHelper.convert(info.getAdminState())) - .build(); - return builder.build(); - } public static DatanodeStorageReportProto convertDatanodeStorageReport( DatanodeStorageReport report) { return DatanodeStorageReportProto.newBuilder() - .setDatanodeInfo(convert(report.getDatanodeInfo())) + .setDatanodeInfo(PBHelperClient.convert(report.getDatanodeInfo())) .addAllStorageReports(convertStorageReports(report.getStorageReports())) .build(); } @@ -767,7 +684,7 @@ public class PBHelper { Lists.newLinkedList(Arrays.asList(b.getCachedLocations())); for (int i = 0; i < locs.length; i++) { DatanodeInfo loc = locs[i]; - builder.addLocs(i, PBHelper.convert(loc)); + builder.addLocs(i, PBHelperClient.convert(loc)); boolean locIsCached = cachedLocs.contains(loc); builder.addIsCached(locIsCached); if (locIsCached) { @@ -781,7 +698,7 @@ public class PBHelper { StorageType[] storageTypes = b.getStorageTypes(); if (storageTypes != null) { for (int i = 0; i < storageTypes.length; ++i) { - builder.addStorageTypes(PBHelper.convertStorageType(storageTypes[i])); + builder.addStorageTypes(PBHelperClient.convertStorageType(storageTypes[i])); } } final String[] storageIDs = b.getStorageIDs(); @@ -789,8 +706,8 @@ public class PBHelper { builder.addAllStorageIDs(Arrays.asList(storageIDs)); } - return builder.setB(PBHelper.convert(b.getBlock())) - .setBlockToken(PBHelper.convert(b.getBlockToken())) + return builder.setB(PBHelperClient.convert(b.getBlock())) + .setBlockToken(PBHelperClient.convert(b.getBlockToken())) .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build(); } @@ -831,14 +748,6 @@ public class PBHelper { return lb; } - public static TokenProto convert(Token tok) { - return TokenProto.newBuilder(). - setIdentifier(ByteString.copyFrom(tok.getIdentifier())). - setPassword(ByteString.copyFrom(tok.getPassword())). - setKind(tok.getKind().toString()). - setService(tok.getService().toString()).build(); - } - public static Token convert( TokenProto blockToken) { return new Token(blockToken.getIdentifier() @@ -890,7 +799,7 @@ public class PBHelper { DatanodeRegistration registration) { DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto .newBuilder(); - return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration)) + return builder.setDatanodeID(PBHelperClient.convert((DatanodeID) registration)) .setStorageInfo(PBHelper.convert(registration.getStorageInfo())) .setKeys(PBHelper.convert(registration.getExportedKeys())) .setSoftwareVersion(registration.getSoftwareVersion()).build(); @@ -982,7 +891,7 @@ public class PBHelper { if (types != null) { for (StorageType[] ts : types) { StorageTypesProto.Builder builder = StorageTypesProto.newBuilder(); - builder.addAllStorageTypes(convertStorageTypes(ts)); + builder.addAllStorageTypes(PBHelperClient.convertStorageTypes(ts)); list.add(builder.build()); } } @@ -1013,7 +922,7 @@ public class PBHelper { DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length]; for (int i = 0; i < targets.length; i++) { ret[i] = DatanodeInfosProto.newBuilder() - .addAllDatanodes(PBHelper.convert(targets[i])).build(); + .addAllDatanodes(PBHelperClient.convert(targets[i])).build(); } return Arrays.asList(ret); } @@ -1337,7 +1246,7 @@ public class PBHelper { fs.getFileBufferSize(), fs.getEncryptDataTransfer(), fs.getTrashInterval(), - PBHelper.convert(fs.getChecksumType())); + PBHelperClient.convert(fs.getChecksumType())); } public static FsServerDefaultsProto convert(FsServerDefaults fs) { @@ -1350,7 +1259,7 @@ public class PBHelper { .setFileBufferSize(fs.getFileBufferSize()) .setEncryptDataTransfer(fs.getEncryptDataTransfer()) .setTrashInterval(fs.getTrashInterval()) - .setChecksumType(PBHelper.convert(fs.getChecksumType())) + .setChecksumType(PBHelperClient.convert(fs.getChecksumType())) .build(); } @@ -1738,7 +1647,7 @@ public class PBHelper { if (cs.hasTypeQuotaInfos()) { for (HdfsProtos.StorageTypeQuotaInfoProto info : cs.getTypeQuotaInfos().getTypeQuotaInfoList()) { - StorageType type = PBHelper.convertStorageType(info.getType()); + StorageType type = PBHelperClient.convertStorageType(info.getType()); builder.typeConsumed(type, info.getConsumed()); builder.typeQuota(type, info.getQuota()); } @@ -1762,7 +1671,7 @@ public class PBHelper { for (StorageType t: StorageType.getTypesSupportingQuota()) { HdfsProtos.StorageTypeQuotaInfoProto info = HdfsProtos.StorageTypeQuotaInfoProto.newBuilder(). - setType(convertStorageType(t)). + setType(PBHelperClient.convertStorageType(t)). setConsumed(cs.getTypeConsumed(t)). setQuota(cs.getTypeQuota(t)). build(); @@ -1807,7 +1716,7 @@ public class PBHelper { public static DatanodeStorageProto convert(DatanodeStorage s) { return DatanodeStorageProto.newBuilder() .setState(PBHelper.convertState(s.getState())) - .setStorageType(PBHelper.convertStorageType(s.getStorageType())) + .setStorageType(PBHelperClient.convertStorageType(s.getStorageType())) .setStorageUuid(s.getStorageID()).build(); } @@ -1821,44 +1730,10 @@ public class PBHelper { } } - public static List convertStorageTypes( - StorageType[] types) { - return convertStorageTypes(types, 0); - } - - public static List convertStorageTypes( - StorageType[] types, int startIdx) { - if (types == null) { - return null; - } - final List protos = new ArrayList( - types.length); - for (int i = startIdx; i < types.length; ++i) { - protos.add(convertStorageType(types[i])); - } - return protos; - } - - public static StorageTypeProto convertStorageType(StorageType type) { - switch(type) { - case DISK: - return StorageTypeProto.DISK; - case SSD: - return StorageTypeProto.SSD; - case ARCHIVE: - return StorageTypeProto.ARCHIVE; - case RAM_DISK: - return StorageTypeProto.RAM_DISK; - default: - throw new IllegalStateException( - "BUG: StorageType not found, type=" + type); - } - } - public static DatanodeStorage convert(DatanodeStorageProto s) { return new DatanodeStorage(s.getStorageUuid(), PBHelper.convertState(s.getState()), - PBHelper.convertStorageType(s.getStorageType())); + PBHelperClient.convertStorageType(s.getStorageType())); } private static State convertState(StorageState state) { @@ -1871,22 +1746,6 @@ public class PBHelper { } } - public static StorageType convertStorageType(StorageTypeProto type) { - switch(type) { - case DISK: - return StorageType.DISK; - case SSD: - return StorageType.SSD; - case ARCHIVE: - return StorageType.ARCHIVE; - case RAM_DISK: - return StorageType.RAM_DISK; - default: - throw new IllegalStateException( - "BUG: StorageTypeProto not found, type=" + type); - } - } - public static StorageType[] convertStorageTypes( List storageTypesList, int expectedSize) { final StorageType[] storageTypes = new StorageType[expectedSize]; @@ -1895,7 +1754,7 @@ public class PBHelper { Arrays.fill(storageTypes, StorageType.DEFAULT); } else { for (int i = 0; i < storageTypes.length; ++i) { - storageTypes[i] = convertStorageType(storageTypesList.get(i)); + storageTypes[i] = PBHelperClient.convertStorageType(storageTypesList.get(i)); } } return storageTypes; @@ -2079,10 +1938,6 @@ public class PBHelper { return reportProto; } - public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) { - return DataChecksum.Type.valueOf(type.getNumber()); - } - public static CacheDirectiveInfoProto convert (CacheDirectiveInfo info) { CacheDirectiveInfoProto.Builder builder = @@ -2255,9 +2110,6 @@ public class PBHelper { return new CachePoolEntry(info, stats); } - public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) { - return HdfsProtos.ChecksumTypeProto.valueOf(type.id); - } public static DatanodeLocalInfoProto convert(DatanodeLocalInfo info) { DatanodeLocalInfoProto.Builder builder = DatanodeLocalInfoProto.newBuilder(); @@ -2272,17 +2124,6 @@ public class PBHelper { proto.getConfigVersion(), proto.getUptime()); } - public static InputStream vintPrefixed(final InputStream input) - throws IOException { - final int firstByte = input.read(); - if (firstByte == -1) { - throw new EOFException("Premature EOF: no length prefix available"); - } - - int size = CodedInputStream.readRawVarint32(firstByte, input); - assert size >= 0; - return new ExactSizeInputStream(input, size); - } private static AclEntryScopeProto convert(AclEntryScope v) { return AclEntryScopeProto.valueOf(v.ordinal()); @@ -2506,30 +2347,11 @@ public class PBHelper { proto.getKeyName()); } - public static ShortCircuitShmSlotProto convert(SlotId slotId) { - return ShortCircuitShmSlotProto.newBuilder(). - setShmId(convert(slotId.getShmId())). - setSlotIdx(slotId.getSlotIdx()). - build(); - } - - public static ShortCircuitShmIdProto convert(ShmId shmId) { - return ShortCircuitShmIdProto.newBuilder(). - setHi(shmId.getHi()). - setLo(shmId.getLo()). - build(); - - } - public static SlotId convert(ShortCircuitShmSlotProto slotId) { - return new SlotId(PBHelper.convert(slotId.getShmId()), + return new SlotId(PBHelperClient.convert(slotId.getShmId()), slotId.getSlotIdx()); } - public static ShmId convert(ShortCircuitShmIdProto shmId) { - return new ShmId(shmId.getHi(), shmId.getLo()); - } - private static Event.CreateEvent.INodeType createTypeConvert(InotifyProtos.INodeType type) { switch (type) { @@ -3036,18 +2858,6 @@ public class PBHelper { ezKeyVersionName); } - public static List convert(boolean[] targetPinnings, int idx) { - List pinnings = new ArrayList(); - if (targetPinnings == null) { - pinnings.add(Boolean.FALSE); - } else { - for (; idx < targetPinnings.length; ++idx) { - pinnings.add(Boolean.valueOf(targetPinnings[idx])); - } - } - return pinnings; - } - public static boolean[] convertBooleanList( List targetPinningsList) { final boolean[] targetPinnings = new boolean[targetPinningsList.size()]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index a5e22ecde03..be1a9efac5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.balancer; -import static org.apache.hadoop.hdfs.protocolPB.PBHelper.vintPrefixed; +import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index ecf139cbc99..5bc50b04931 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -135,7 +135,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolServerSideTranslatorPB; import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier.AccessMode; @@ -2142,7 +2142,7 @@ public class DataNode extends ReconfigurableBase // read ack if (isClient) { DNTransferAckProto closeAck = DNTransferAckProto.parseFrom( - PBHelper.vintPrefixed(in)); + PBHelperClient.vintPrefixed(in)); if (LOG.isDebugEnabled()) { LOG.debug(getClass().getSimpleName() + ": close-ack=" + closeAck); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index e9cf4362942..dfaa5252819 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -70,7 +70,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumIn import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException; import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException; @@ -427,7 +427,7 @@ class DataXceiver extends Receiver implements Runnable { throws IOException { DataNodeFaultInjector.get().sendShortCircuitShmResponse(); ShortCircuitShmResponseProto.newBuilder().setStatus(SUCCESS). - setId(PBHelper.convert(shmInfo.shmId)).build(). + setId(PBHelperClient.convert(shmInfo.shmId)).build(). writeDelimitedTo(socketOut); // Send the file descriptor for the shared memory segment. byte buf[] = new byte[] { (byte)0 }; @@ -559,7 +559,7 @@ class DataXceiver extends Receiver implements Runnable { // to respond with a Status enum. try { ClientReadStatusProto stat = ClientReadStatusProto.parseFrom( - PBHelper.vintPrefixed(in)); + PBHelperClient.vintPrefixed(in)); if (!stat.hasStatus()) { LOG.warn("Client " + peer.getRemoteAddressString() + " did not send a valid status code after reading. " + @@ -745,7 +745,7 @@ class DataXceiver extends Receiver implements Runnable { // read connect ack (only for clients, not for replication req) if (isClient) { BlockOpResponseProto connectAck = - BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(mirrorIn)); + BlockOpResponseProto.parseFrom(PBHelperClient.vintPrefixed(mirrorIn)); mirrorInStatus = connectAck.getStatus(); firstBadLink = connectAck.getFirstBadLink(); if (LOG.isDebugEnabled() || mirrorInStatus != SUCCESS) { @@ -962,7 +962,7 @@ class DataXceiver extends Receiver implements Runnable { .setBytesPerCrc(bytesPerCRC) .setCrcPerBlock(crcPerBlock) .setMd5(ByteString.copyFrom(md5.getDigest())) - .setCrcType(PBHelper.convert(checksum.getChecksumType()))) + .setCrcType(PBHelperClient.convert(checksum.getChecksumType()))) .build() .writeDelimitedTo(out); out.flush(); @@ -1147,8 +1147,8 @@ class DataXceiver extends Receiver implements Runnable { // receive the response from the proxy BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( - PBHelper.vintPrefixed(proxyReply)); - + PBHelperClient.vintPrefixed(proxyReply)); + String logInfo = "copy block " + block + " from " + proxySock.getRemoteSocketAddress(); DataTransferProtoUtil.checkBlockOpStatus(copyResponse, logInfo); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 25fd99d5629..3a9c64e30c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto; import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; @@ -155,7 +156,7 @@ public final class FSImageFormatPBINode { QuotaByStorageTypeFeatureProto proto) { ImmutableList.Builder b = ImmutableList.builder(); for (QuotaByStorageTypeEntryProto quotaEntry : proto.getQuotasList()) { - StorageType type = PBHelper.convertStorageType(quotaEntry.getStorageType()); + StorageType type = PBHelperClient.convertStorageType(quotaEntry.getStorageType()); long quota = quotaEntry.getQuota(); b.add(new QuotaByStorageTypeEntry.Builder().setStorageType(type) .setQuota(quota).build()); @@ -462,7 +463,7 @@ public final class FSImageFormatPBINode { if (q.getTypeSpace(t) >= 0) { QuotaByStorageTypeEntryProto.Builder eb = QuotaByStorageTypeEntryProto.newBuilder(). - setStorageType(PBHelper.convertStorageType(t)). + setStorageType(PBHelperClient.convertStorageType(t)). setQuota(q.getTypeSpace(t)); b.addQuotas(eb); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index db4cbe29df5..15b8dea8e0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -import org.apache.hadoop.hdfs.protocolPB.PBHelper; +import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RetriableException; @@ -201,7 +201,7 @@ public class ShortCircuitCache implements Closeable { DataInputStream in = new DataInputStream(sock.getInputStream()); ReleaseShortCircuitAccessResponseProto resp = ReleaseShortCircuitAccessResponseProto.parseFrom( - PBHelper.vintPrefixed(in)); + PBHelperClient.vintPrefixed(in)); if (resp.getStatus() != Status.SUCCESS) { String error = resp.hasError() ? resp.getError() : "(unknown)"; throw new IOException(resp.getStatus().toString() + ": " + error); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java index c7233bd5da9..f25fb1b0aab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java @@ -153,7 +153,7 @@ public class TestPBHelper { @Test public void testConvertDatanodeID() { DatanodeID dn = DFSTestUtil.getLocalDatanodeID(); - DatanodeIDProto dnProto = PBHelper.convert(dn); + DatanodeIDProto dnProto = PBHelperClient.convert(dn); DatanodeID dn2 = PBHelper.convert(dnProto); compare(dn, dn2); } @@ -332,12 +332,12 @@ public class TestPBHelper { @Test public void testConvertExtendedBlock() { ExtendedBlock b = getExtendedBlock(); - ExtendedBlockProto bProto = PBHelper.convert(b); + ExtendedBlockProto bProto = PBHelperClient.convert(b); ExtendedBlock b1 = PBHelper.convert(bProto); assertEquals(b, b1); b.setBlockId(-1); - bProto = PBHelper.convert(b); + bProto = PBHelperClient.convert(b); b1 = PBHelper.convert(bProto); assertEquals(b, b1); } @@ -398,7 +398,7 @@ public class TestPBHelper { Token token = new Token( "identifier".getBytes(), "password".getBytes(), new Text("kind"), new Text("service")); - TokenProto tokenProto = PBHelper.convert(token); + TokenProto tokenProto = PBHelperClient.convert(token); Token token2 = PBHelper.convert(tokenProto); compare(token, token2); } @@ -592,16 +592,16 @@ public class TestPBHelper { @Test public void testChecksumTypeProto() { assertEquals(DataChecksum.Type.NULL, - PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL)); + PBHelperClient.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL)); assertEquals(DataChecksum.Type.CRC32, - PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32)); + PBHelperClient.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32)); assertEquals(DataChecksum.Type.CRC32C, - PBHelper.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C)); - assertEquals(PBHelper.convert(DataChecksum.Type.NULL), + PBHelperClient.convert(HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C)); + assertEquals(PBHelperClient.convert(DataChecksum.Type.NULL), HdfsProtos.ChecksumTypeProto.CHECKSUM_NULL); - assertEquals(PBHelper.convert(DataChecksum.Type.CRC32), + assertEquals(PBHelperClient.convert(DataChecksum.Type.CRC32), HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32); - assertEquals(PBHelper.convert(DataChecksum.Type.CRC32C), + assertEquals(PBHelperClient.convert(DataChecksum.Type.CRC32C), HdfsProtos.ChecksumTypeProto.CHECKSUM_CRC32C); }