From 80ba8f8d4dc67033e1a250a7362a8470f9116d63 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 30 Aug 2011 04:07:03 +0000 Subject: [PATCH] svn merge -c 1163081 from trunk for HDFS-1217. Constributed by Laxman git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1163082 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/NameNode.java | 9 ++++---- .../TestDataNodeMultipleRegistrations.java | 23 ++++++++++--------- .../hdfs/server/namenode/FSImageTestUtil.java | 6 ++++- .../TestOfflineImageViewer.java | 3 ++- 5 files changed, 27 insertions(+), 17 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index abc03faee96..595f4678ce6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -679,6 +679,9 @@ Release 0.23.0 - Unreleased HDFS-2266. Add Namesystem and SafeMode interfaces to avoid directly referring to FSNamesystem in BlockManager. (szetszwo) + HDFS-1217. Change some NameNode methods from public to package private. + (Laxman via szetszwo) + OPTIMIZATIONS HDFS-1458. Improve checkpoint performance by avoiding unnecessary image diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 67dcefc5ee7..f411df82dd3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -300,7 +300,7 @@ public class NameNode implements NamenodeProtocols { * @param filesystemURI * @return address of file system */ - public static InetSocketAddress getAddress(URI filesystemURI) { + static InetSocketAddress getAddress(URI filesystemURI) { String authority = filesystemURI.getAuthority(); if (authority == null) { throw new IllegalArgumentException(String.format( @@ -1262,7 +1262,7 @@ public class NameNode implements NamenodeProtocols { * @param nodeReg data node registration * @throws IOException */ - public void verifyRequest(NodeRegistration nodeReg) throws IOException { + void verifyRequest(NodeRegistration nodeReg) throws IOException { verifyVersion(nodeReg.getVersion()); if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID())) { LOG.warn("Invalid registrationID - expected: " @@ -1278,12 +1278,13 @@ public class NameNode implements NamenodeProtocols { * @param version * @throws IOException */ - public void verifyVersion(int version) throws IOException { + void verifyVersion(int version) throws IOException { if (version != FSConstants.LAYOUT_VERSION) throw new IncorrectVersionException(version, "data node"); } - public FSImage getFSImage() { + /** get FSImage */ + FSImage getFSImage() { return namesystem.dir.fsImage; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java index a98b0afd6e8..7ad0b788173 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption; import org.apache.hadoop.hdfs.server.datanode.DataNode.BPOfferService; import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo; +import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.junit.Assert; import org.junit.Before; @@ -65,14 +66,14 @@ public class TestDataNodeMultipleRegistrations { assertNotNull("cannot create nn1", nn1); assertNotNull("cannot create nn2", nn2); - String bpid1 = nn1.getFSImage().getBlockPoolID(); - String bpid2 = nn2.getFSImage().getBlockPoolID(); - String cid1 = nn1.getFSImage().getClusterID(); - String cid2 = nn2.getFSImage().getClusterID(); - int lv1 = nn1.getFSImage().getLayoutVersion(); - int lv2 = nn2.getFSImage().getLayoutVersion(); - int ns1 = nn1.getFSImage().getNamespaceID(); - int ns2 = nn2.getFSImage().getNamespaceID(); + String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); + String bpid2 = FSImageTestUtil.getFSImage(nn2).getBlockPoolID(); + String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID(); + String cid2 = FSImageTestUtil.getFSImage(nn2).getClusterID(); + int lv1 =FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); + int lv2 = FSImageTestUtil.getFSImage(nn2).getLayoutVersion(); + int ns1 = FSImageTestUtil.getFSImage(nn1).getNamespaceID(); + int ns2 = FSImageTestUtil.getFSImage(nn2).getNamespaceID(); assertNotSame("namespace ids should be different", ns1, ns2); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); @@ -135,9 +136,9 @@ public class TestDataNodeMultipleRegistrations { NameNode nn1 = cluster.getNameNode(); assertNotNull("cannot create nn1", nn1); - String bpid1 = nn1.getFSImage().getBlockPoolID(); - String cid1 = nn1.getFSImage().getClusterID(); - int lv1 = nn1.getFSImage().getLayoutVersion(); + String bpid1 = FSImageTestUtil.getFSImage(nn1).getBlockPoolID(); + String cid1 = FSImageTestUtil.getFSImage(nn1).getClusterID(); + int lv1 = FSImageTestUtil.getFSImage(nn1).getLayoutVersion(); LOG.info("nn1: lv=" + lv1 + ";cid=" + cid1 + ";bpid=" + bpid1 + ";uri=" + nn1.getNameNodeAddress()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 5deccd5c221..4a8edb8475a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -27,7 +27,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Properties; @@ -411,4 +410,9 @@ public abstract class FSImageTestUtil { } } } + + /** get the fsImage*/ + public static FSImage getFSImage(NameNode node) { + return node.getFSImage(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 5fc96882f6f..05311f53597 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -129,7 +129,8 @@ public class TestOfflineImageViewer extends TestCase { // Determine location of fsimage file orig = FSImageTestUtil.findLatestImageFile( - cluster.getNameNode().getFSImage().getStorage().getStorageDir(0)); + FSImageTestUtil.getFSImage( + cluster.getNameNode()).getStorage().getStorageDir(0)); if (orig == null) { fail("Didn't generate or can't find fsimage"); }