From a795bc42d012bf75872ae412cb2644c2d80177e3 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 14 Feb 2014 20:56:24 +0000 Subject: [PATCH] HDFS-5494. Merge Protobuf-based-FSImage code from trunk - fix build break after merge. (Contributed by Jing Zhao) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1568517 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt | 3 +++ .../hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java | 2 +- .../org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java | 2 +- .../blockmanagement/TestReplicationPolicyConsiderLoad.java | 4 +++- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt index 998e18d5c2c..e94277daa4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt @@ -36,3 +36,6 @@ HDFS-5535 subtasks: HDFS-5535. Add BlockPoolSliceStorage 'trash' to handle block deletions during rolling upgrades. (Arpit Agarwal) + HDFS-5494. Merge Protobuf-based-FSImage code from trunk - fix build + break after the merge. (Jing Zhao via Arpit Agarwal) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java index 92245434ccc..22b60986978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java @@ -428,7 +428,7 @@ public final class FSImageFormatProtobuf { FileSummary.Builder b = FileSummary.newBuilder() .setOndiskVersion(FSImageUtil.FILE_VERSION) - .setLayoutVersion(LayoutVersion.getCurrentLayoutVersion()); + .setLayoutVersion(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); codec = compression.getImageCodec(); if (codec != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java index b9953480f26..1a5a668b767 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageUtil.java @@ -71,7 +71,7 @@ public final class FSImageUtil { + summary.getOndiskVersion()); } - if (!LayoutVersion.supports(Feature.PROTOBUF_FORMAT, + if (!NameNodeLayoutVersion.supports(Feature.PROTOBUF_FORMAT, summary.getLayoutVersion())) { throw new IOException("Unsupported layout version " + summary.getLayoutVersion()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java index 0b84fd7c953..73d397760a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.server.common.StorageInfo; +import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.test.PathUtils; @@ -87,7 +88,8 @@ public class TestReplicationPolicyConsiderLoad { // Register DNs for (int i=0; i < 6; i++) { DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i], - new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion()); + new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(), + VersionInfo.getVersion()); dnrList.add(dnr); dnManager.registerDatanode(dnr); dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(