From 1f3ec0616a3009253b30517c5a726ec7932bff86 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Tue, 20 May 2014 18:19:54 +0000 Subject: [PATCH] svn merge -c 1596344 merging from trunk to branch-2 to fix:HDFS-6419. TestBookKeeperHACheckpoints.TestSBNCheckpoints fails on trunk. Contributed by Akira AJISAKA. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1596352 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../TestBookKeeperHACheckpoints.java | 13 +------- .../namenode/ha/TestStandbyCheckpoints.java | 33 +++++++++++-------- 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2b7c2ad2f6b..f13af128492 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -251,6 +251,9 @@ Release 2.5.0 - UNRELEASED HDFS-6421. Fix vecsum.c compile on BSD and some other systems. (Mit Desai via Colin Patrick McCabe) + HDFS-6419. TestBookKeeperHACheckpoints#TestSBNCheckpoints fails on trunk. + (Akira AJISAKA via kihwal) + Release 2.4.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java index 02d477be75a..91cab553bcd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperHACheckpoints.java @@ -23,13 +23,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints; -import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; -import com.google.common.collect.ImmutableList; - /** * Runs the same tests as TestStandbyCheckpoints, but * using a bookkeeper journal manager as the shared directory @@ -43,19 +40,11 @@ public class TestBookKeeperHACheckpoints extends TestStandbyCheckpoints { @Override @Before public void setupCluster() throws Exception { - Configuration conf = new Configuration(); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5); - conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + Configuration conf = setupCommonConfig(); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil.createJournalURI("/checkpointing" + journalCount++) .toString()); BKJMUtil.addJournalManagerDefinition(conf); - conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); - conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, SlowCodec.class - .getCanonicalName()); - CompressionCodecFactory.setCodecClasses(conf, ImmutableList - . of(SlowCodec.class)); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index 3813319f029..2b5e89af8b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -80,24 +80,12 @@ public class TestStandbyCheckpoints { @SuppressWarnings("rawtypes") @Before public void setupCluster() throws Exception { - tmpOivImgDir = Files.createTempDir(); - Configuration conf = new Configuration(); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); - conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5); - conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); - conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY, - tmpOivImgDir.getAbsolutePath()); + Configuration conf = setupCommonConfig(); // Dial down the retention of extra edits and checkpoints. This is to // help catch regressions of HDFS-4238 (SBN should not purge shared edits) conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); - - conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); - conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, - SlowCodec.class.getCanonicalName()); - CompressionCodecFactory.setCodecClasses(conf, - ImmutableList.of(SlowCodec.class)); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") @@ -116,7 +104,24 @@ public class TestStandbyCheckpoints { cluster.transitionToActive(0); } - + + protected Configuration setupCommonConfig() { + tmpOivImgDir = Files.createTempDir(); + + Configuration conf = new Configuration(); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); + conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5); + conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); + conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY, + tmpOivImgDir.getAbsolutePath()); + conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); + conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, + SlowCodec.class.getCanonicalName()); + CompressionCodecFactory.setCodecClasses(conf, + ImmutableList.of(SlowCodec.class)); + return conf; + } + @After public void shutdownCluster() throws IOException { if (cluster != null) {