HDFS-5494. Merge Protobuf-based-FSImage code from trunk - fix build break after merge. (Contributed by Jing Zhao)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1568517 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2014-02-14 20:56:24 +00:00
parent ba4b10354c
commit a795bc42d0
4 changed files with 8 additions and 3 deletions

View File

@ -36,3 +36,6 @@ HDFS-5535 subtasks:
HDFS-5535. Add BlockPoolSliceStorage 'trash' to handle block deletions HDFS-5535. Add BlockPoolSliceStorage 'trash' to handle block deletions
during rolling upgrades. (Arpit Agarwal) during rolling upgrades. (Arpit Agarwal)
HDFS-5494. Merge Protobuf-based-FSImage code from trunk - fix build
break after the merge. (Jing Zhao via Arpit Agarwal)

View File

@ -428,7 +428,7 @@ private void saveInternal(FileOutputStream fout,
FileSummary.Builder b = FileSummary.newBuilder() FileSummary.Builder b = FileSummary.newBuilder()
.setOndiskVersion(FSImageUtil.FILE_VERSION) .setOndiskVersion(FSImageUtil.FILE_VERSION)
.setLayoutVersion(LayoutVersion.getCurrentLayoutVersion()); .setLayoutVersion(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
codec = compression.getImageCodec(); codec = compression.getImageCodec();
if (codec != null) { if (codec != null) {

View File

@ -71,7 +71,7 @@ public static FileSummary loadSummary(RandomAccessFile file)
+ summary.getOndiskVersion()); + summary.getOndiskVersion());
} }
if (!LayoutVersion.supports(Feature.PROTOBUF_FORMAT, if (!NameNodeLayoutVersion.supports(Feature.PROTOBUF_FORMAT,
summary.getLayoutVersion())) { summary.getLayoutVersion())) {
throw new IOException("Unsupported layout version " throw new IOException("Unsupported layout version "
+ summary.getLayoutVersion()); + summary.getLayoutVersion());

View File

@ -27,6 +27,7 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.PathUtils;
@ -87,7 +88,8 @@ public static void setupCluster() throws IOException {
// Register DNs // Register DNs
for (int i=0; i < 6; i++) { for (int i=0; i < 6; i++) {
DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i], DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
new StorageInfo(), new ExportedBlockKeys(), VersionInfo.getVersion()); new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
VersionInfo.getVersion());
dnrList.add(dnr); dnrList.add(dnr);
dnManager.registerDatanode(dnr); dnManager.registerDatanode(dnr);
dataNodes[i].getStorageInfos()[0].setUtilizationForTesting( dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(