From eb9f1b670726e1af03f2e940ce2696b880964972 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 20 Sep 2013 22:06:09 +0000 Subject: [PATCH] HDFS-5232. Protocol changes to transmit StorageUuid. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1525153 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-hdfs/CHANGES_HDFS-2832.txt | 2 ++ ...amenodeProtocolServerSideTranslatorPB.java | 2 +- .../ClientNamenodeProtocolTranslatorPB.java | 2 +- ...atanodeProtocolClientSideTranslatorPB.java | 2 +- ...atanodeProtocolServerSideTranslatorPB.java | 4 +-- ...atanodeProtocolServerSideTranslatorPB.java | 2 +- .../InterDatanodeProtocolTranslatorPB.java | 2 +- .../hadoop/hdfs/protocolPB/PBHelper.java | 36 +++++++++---------- .../main/proto/ClientNamenodeProtocol.proto | 2 +- .../src/main/proto/DatanodeProtocol.proto | 8 ++--- .../main/proto/InterDatanodeProtocol.proto | 2 +- .../hadoop-hdfs/src/main/proto/hdfs.proto | 11 +++--- 12 files changed, 40 insertions(+), 35 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt index b9d4981a816..9f4e314ad75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt @@ -26,3 +26,5 @@ IMPROVEMENTS: HDFS-4990. Change BlockPlacementPolicy to choose storages instead of datanodes. (szetszwo) + + HDFS-5232. Protocol changes to transmit StorageUuid. (Arpit Agarwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 6e8e7bc34eb..8290a4b4e43 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -405,7 +405,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements throws ServiceException { try { List existingList = req.getExistingsList(); - List existingStorageIDsList = req.getExistingStorageIDsList(); + List existingStorageIDsList = req.getExistingStorageUuidsList(); List excludesList = req.getExcludesList(); LocatedBlock result = server.getAdditionalDatanode(req.getSrc(), PBHelper.convert(req.getBlk()), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 84d10e473a6..30ea3e562e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -345,7 +345,7 @@ public class ClientNamenodeProtocolTranslatorPB implements .setSrc(src) .setBlk(PBHelper.convert(blk)) .addAllExistings(PBHelper.convert(existings)) - .addAllExistingStorageIDs(Arrays.asList(existingStorageIDs)) + .addAllExistingStorageUuids(Arrays.asList(existingStorageIDs)) .addAllExcludes(PBHelper.convert(excludes)) .setNumAdditionalNodes(numAdditionalNodes) .setClientName(clientName) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index fd4cc4b01c5..315ad92d049 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -213,7 +213,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements for (StorageReceivedDeletedBlocks storageBlock : receivedAndDeletedBlocks) { StorageReceivedDeletedBlocksProto.Builder repBuilder = StorageReceivedDeletedBlocksProto.newBuilder(); - repBuilder.setStorageID(storageBlock.getStorageID()); + repBuilder.setStorageUuid(storageBlock.getStorageID()); for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) { repBuilder.addBlocks(PBHelper.convert(rdBlock)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 3e424602fa9..9a63d37dec6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -104,7 +104,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements StorageReport[] report = new StorageReport[list.size()]; int i = 0; for (StorageReportProto p : list) { - report[i++] = new StorageReport(p.getStorageID(), p.getFailed(), + report[i++] = new StorageReport(p.getStorageUuid(), p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(), p.getBlockPoolUsed()); } @@ -174,7 +174,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements for (int j = 0; j < list.size(); j++) { rdBlocks[j] = PBHelper.convert(list.get(j)); } - info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageID(), rdBlocks); + info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks); } try { impl.blockReceivedAndDeleted(PBHelper.convert(request.getRegistration()), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java index 8f3eed96852..087c697c587 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java @@ -82,6 +82,6 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements throw new ServiceException(e); } return UpdateReplicaUnderRecoveryResponseProto.newBuilder() - .setStorageID(storageID).build(); + .setStorageUuid(storageID).build(); } } \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java index 4e518c35bfa..5174d861882 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolTranslatorPB.java @@ -109,7 +109,7 @@ public class InterDatanodeProtocolTranslatorPB implements .setNewLength(newLength).setRecoveryId(recoveryId).build(); try { return rpcProxy.updateReplicaUnderRecovery(NULL_CONTROLLER, req - ).getStorageID(); + ).getStorageUuid(); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index d118904d0ca..c2de12e7dce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -94,7 +94,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.File import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto; -import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageIDsProto; +import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeCommandProto; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamenodeRegistrationProto; @@ -226,7 +226,7 @@ public class PBHelper { // DatanodeId public static DatanodeID convert(DatanodeIDProto dn) { - return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(), + return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(), dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort()); } @@ -234,7 +234,7 @@ public class PBHelper { return DatanodeIDProto.newBuilder() .setIpAddr(dn.getIpAddr()) .setHostName(dn.getHostName()) - .setStorageID(dn.getStorageID()) + .setDatanodeUuid(dn.getStorageID()) .setXferPort(dn.getXferPort()) .setInfoPort(dn.getInfoPort()) .setIpcPort(dn.getIpcPort()).build(); @@ -276,11 +276,11 @@ public class PBHelper { public static BlockWithLocationsProto convert(BlockWithLocations blk) { return BlockWithLocationsProto.newBuilder() .setBlock(convert(blk.getBlock())) - .addAllStorageIDs(Arrays.asList(blk.getStorageIDs())).build(); + .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())).build(); } public static BlockWithLocations convert(BlockWithLocationsProto b) { - return new BlockWithLocations(convert(b.getBlock()), b.getStorageIDsList() + return new BlockWithLocations(convert(b.getBlock()), b.getStorageUuidsList() .toArray(new String[0])); } @@ -746,7 +746,7 @@ public class PBHelper { builder.addBlocks(PBHelper.convert(blocks[i])); } builder.addAllTargets(convert(cmd.getTargets())) - .addAllTargetStorageIDs(convert(cmd.getTargetStorageIDs())); + .addAllTargetStorageUuids(convert(cmd.getTargetStorageIDs())); return builder.build(); } @@ -759,11 +759,11 @@ public class PBHelper { return Arrays.asList(ret); } - private static List convert(String[][] targetStorageIDs) { - StorageIDsProto[] ret = new StorageIDsProto[targetStorageIDs.length]; - for (int i = 0; i < targetStorageIDs.length; i++) { - ret[i] = StorageIDsProto.newBuilder() - .addAllStorageIDs(Arrays.asList(targetStorageIDs[i])).build(); + private static List convert(String[][] targetStorageUuids) { + StorageUuidsProto[] ret = new StorageUuidsProto[targetStorageUuids.length]; + for (int i = 0; i < targetStorageUuids.length; i++) { + ret[i] = StorageUuidsProto.newBuilder() + .addAllStorageUuids(Arrays.asList(targetStorageUuids[i])).build(); } return Arrays.asList(ret); } @@ -843,10 +843,10 @@ public class PBHelper { targets[i] = PBHelper.convert(targetList.get(i)); } - List targetStorageIDsList = blkCmd.getTargetStorageIDsList(); - String[][] targetStorageIDs = new String[targetStorageIDsList.size()][]; + List targetStorageUuidsList = blkCmd.getTargetStorageUuidsList(); + String[][] targetStorageIDs = new String[targetStorageUuidsList.size()][]; for(int i = 0; i < targetStorageIDs.length; i++) { - List storageIDs = targetStorageIDsList.get(i).getStorageIDsList(); + List storageIDs = targetStorageUuidsList.get(i).getStorageUuidsList(); targetStorageIDs[i] = storageIDs.toArray(new String[storageIDs.size()]); } @@ -1375,7 +1375,7 @@ public class PBHelper { return DatanodeStorageProto.newBuilder() .setState(PBHelper.convertState(s.getState())) .setStorageType(PBHelper.convertStorageType(s.getStorageType())) - .setStorageID(s.getStorageID()).build(); + .setStorageUuid(s.getStorageID()).build(); } private static StorageState convertState(State state) { @@ -1406,11 +1406,11 @@ public class PBHelper { public static DatanodeStorage convert(DatanodeStorageProto s) { if (s.hasStorageType()) { - return new DatanodeStorage(s.getStorageID(), + return new DatanodeStorage(s.getStorageUuid(), PBHelper.convertState(s.getState()), PBHelper.convertType(s.getStorageType())); } else { - return new DatanodeStorage(s.getStorageID(), + return new DatanodeStorage(s.getStorageUuid(), PBHelper.convertState(s.getState())); } } @@ -1440,7 +1440,7 @@ public class PBHelper { return StorageReportProto.newBuilder() .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity()) .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining()) - .setStorageID(r.getStorageID()).build(); + .setStorageUuid(r.getStorageID()).build(); } public static JournalInfo convert(JournalInfoProto info) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 968b0740355..fda60857ce1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -141,7 +141,7 @@ message GetAdditionalDatanodeRequestProto { repeated DatanodeInfoProto excludes = 4; required uint32 numAdditionalNodes = 5; required string clientName = 6; - repeated string existingStorageIDs = 7; + repeated string existingStorageUuids = 7; } message GetAdditionalDatanodeResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 2f845e08a43..6f93afcc96b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -53,7 +53,7 @@ message DatanodeStorageProto { READ_ONLY = 1; } - required string storageID = 1; // Unique identifier for the storage + required string storageUuid = 1; optional StorageState state = 2 [default = NORMAL]; optional StorageTypeProto storageType = 3; } @@ -110,7 +110,7 @@ message BlockCommandProto { required string blockPoolId = 2; repeated BlockProto blocks = 3; repeated DatanodeInfosProto targets = 4; - repeated StorageIDsProto targetStorageIDs = 5; + repeated StorageUuidsProto targetStorageUuids = 5; } /** @@ -176,7 +176,7 @@ message HeartbeatRequestProto { } message StorageReportProto { - required string storageID = 1; + required string storageUuid = 1; optional bool failed = 2 [ default = false ]; optional uint64 capacity = 3 [ default = 0 ]; optional uint64 dfsUsed = 4 [ default = 0 ]; @@ -250,7 +250,7 @@ message ReceivedDeletedBlockInfoProto { * List of blocks received and deleted for a storage. */ message StorageReceivedDeletedBlocksProto { - required string storageID = 1; + required string storageUuid = 1; repeated ReceivedDeletedBlockInfoProto blocks = 2; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto index c76f7edfa86..47f79bed169 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/InterDatanodeProtocol.proto @@ -65,7 +65,7 @@ message UpdateReplicaUnderRecoveryRequestProto { * Response returns updated block information */ message UpdateReplicaUnderRecoveryResponseProto { - required string storageID = 1; // ID of the storage that stores replica + optional string storageUuid = 1; // ID of the storage that stores replica } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto index 22121a73b5b..b8775259f8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto @@ -50,7 +50,10 @@ message ExtendedBlockProto { message DatanodeIDProto { required string ipAddr = 1; // IP address required string hostName = 2; // hostname - required string storageID = 3; // unique storage id + required string datanodeUuid = 3; // UUID assigned to the Datanode. For + // upgraded clusters this is the same + // as the original StorageID of the + // Datanode. required uint32 xferPort = 4; // data streaming port required uint32 infoPort = 5; // info server port required uint32 ipcPort = 6; // ipc server port @@ -124,8 +127,8 @@ enum StorageTypeProto { /** * A list of storage IDs. */ -message StorageIDsProto { - repeated string storageIDs = 1; +message StorageUuidsProto { + repeated string storageUuids = 1; } /** @@ -348,7 +351,7 @@ message BlockProto { */ message BlockWithLocationsProto { required BlockProto block = 1; // Block - repeated string storageIDs = 2; // Datanodes with replicas of the block + repeated string storageUuids = 2; // Datanodes with replicas of the block } /**