svn merge -c 1596344 merging from trunk to branch-2 to fix:HDFS-6419. TestBookKeeperHACheckpoints.TestSBNCheckpoints fails on trunk. Contributed by Akira AJISAKA.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1596352 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ffc50dfe01
commit
1f3ec0616a
|
@ -251,6 +251,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6421. Fix vecsum.c compile on BSD and some other systems. (Mit Desai
|
||||
via Colin Patrick McCabe)
|
||||
|
||||
HDFS-6419. TestBookKeeperHACheckpoints#TestSBNCheckpoints fails on trunk.
|
||||
(Akira AJISAKA via kihwal)
|
||||
|
||||
Release 2.4.1 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -23,13 +23,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
|
||||
/**
|
||||
* Runs the same tests as TestStandbyCheckpoints, but
|
||||
* using a bookkeeper journal manager as the shared directory
|
||||
|
@ -43,19 +40,11 @@ public class TestBookKeeperHACheckpoints extends TestStandbyCheckpoints {
|
|||
@Override
|
||||
@Before
|
||||
public void setupCluster() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
Configuration conf = setupCommonConfig();
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
|
||||
BKJMUtil.createJournalURI("/checkpointing" + journalCount++)
|
||||
.toString());
|
||||
BKJMUtil.addJournalManagerDefinition(conf);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, SlowCodec.class
|
||||
.getCanonicalName());
|
||||
CompressionCodecFactory.setCodecClasses(conf, ImmutableList
|
||||
.<Class> of(SlowCodec.class));
|
||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
|
||||
|
|
|
@ -80,24 +80,12 @@ public class TestStandbyCheckpoints {
|
|||
@SuppressWarnings("rawtypes")
|
||||
@Before
|
||||
public void setupCluster() throws Exception {
|
||||
tmpOivImgDir = Files.createTempDir();
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,
|
||||
tmpOivImgDir.getAbsolutePath());
|
||||
Configuration conf = setupCommonConfig();
|
||||
|
||||
// Dial down the retention of extra edits and checkpoints. This is to
|
||||
// help catch regressions of HDFS-4238 (SBN should not purge shared edits)
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
|
||||
|
||||
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
|
||||
SlowCodec.class.getCanonicalName());
|
||||
CompressionCodecFactory.setCodecClasses(conf,
|
||||
ImmutableList.<Class>of(SlowCodec.class));
|
||||
|
||||
MiniDFSNNTopology topology = new MiniDFSNNTopology()
|
||||
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
|
||||
|
@ -116,7 +104,24 @@ public class TestStandbyCheckpoints {
|
|||
|
||||
cluster.transitionToActive(0);
|
||||
}
|
||||
|
||||
|
||||
protected Configuration setupCommonConfig() {
|
||||
tmpOivImgDir = Files.createTempDir();
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
|
||||
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
|
||||
conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,
|
||||
tmpOivImgDir.getAbsolutePath());
|
||||
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
|
||||
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
|
||||
SlowCodec.class.getCanonicalName());
|
||||
CompressionCodecFactory.setCodecClasses(conf,
|
||||
ImmutableList.<Class>of(SlowCodec.class));
|
||||
return conf;
|
||||
}
|
||||
|
||||
@After
|
||||
public void shutdownCluster() throws IOException {
|
||||
if (cluster != null) {
|
||||
|
|
Loading…
Reference in New Issue