fix eol style on TestBootstrapStandbyWithBKJM

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1611726 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2014-07-18 17:23:05 +00:00
parent 5f9e52f745
commit 936895722e
1 changed files with 169 additions and 169 deletions

View File

@ -1,169 +1,169 @@
/** /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file * regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the * to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance * "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at * with the License. You may obtain a copy of the License at
* *
* http://www.apache.org/licenses/LICENSE-2.0 * http://www.apache.org/licenses/LICENSE-2.0
* *
* Unless required by applicable law or agreed to in writing, software * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, * distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.contrib.bkjournal; package org.apache.hadoop.contrib.bkjournal;
import java.io.File; import java.io.File;
import java.io.FileFilter; import java.io.FileFilter;
import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby; import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints.SlowCodec; import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints.SlowCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory; import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
public class TestBootstrapStandbyWithBKJM { public class TestBootstrapStandbyWithBKJM {
private static BKJMUtil bkutil; private static BKJMUtil bkutil;
protected MiniDFSCluster cluster; protected MiniDFSCluster cluster;
@BeforeClass @BeforeClass
public static void setupBookkeeper() throws Exception { public static void setupBookkeeper() throws Exception {
bkutil = new BKJMUtil(3); bkutil = new BKJMUtil(3);
bkutil.start(); bkutil.start();
} }
@AfterClass @AfterClass
public static void teardownBookkeeper() throws Exception { public static void teardownBookkeeper() throws Exception {
bkutil.teardown(); bkutil.teardown();
} }
@After @After
public void teardown() { public void teardown() {
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
} }
} }
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
Configuration conf = new Configuration(); Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
.createJournalURI("/bootstrapStandby").toString()); .createJournalURI("/bootstrapStandby").toString());
BKJMUtil.addJournalManagerDefinition(conf); BKJMUtil.addJournalManagerDefinition(conf);
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
SlowCodec.class.getCanonicalName()); SlowCodec.class.getCanonicalName());
CompressionCodecFactory.setCodecClasses(conf, CompressionCodecFactory.setCodecClasses(conf,
ImmutableList.<Class> of(SlowCodec.class)); ImmutableList.<Class> of(SlowCodec.class));
MiniDFSNNTopology topology = new MiniDFSNNTopology() MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN( .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN( new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology) cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
.numDataNodes(1).manageNameDfsSharedDirs(false).build(); .numDataNodes(1).manageNameDfsSharedDirs(false).build();
cluster.waitActive(); cluster.waitActive();
} }
/** /**
* While boostrapping, in_progress transaction entries should be skipped. * While boostrapping, in_progress transaction entries should be skipped.
* Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck" * Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
*/ */
@Test @Test
public void testBootstrapStandbyWithActiveNN() throws Exception { public void testBootstrapStandbyWithActiveNN() throws Exception {
// make nn0 active // make nn0 active
cluster.transitionToActive(0); cluster.transitionToActive(0);
// do ops and generate in-progress edit log data // do ops and generate in-progress edit log data
Configuration confNN1 = cluster.getConfiguration(1); Configuration confNN1 = cluster.getConfiguration(1);
DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
.configureFailoverFs(cluster, confNN1); .configureFailoverFs(cluster, confNN1);
for (int i = 1; i <= 10; i++) { for (int i = 1; i <= 10; i++) {
dfs.mkdirs(new Path("/test" + i)); dfs.mkdirs(new Path("/test" + i));
} }
dfs.close(); dfs.close();
// shutdown nn1 and delete its edit log files // shutdown nn1 and delete its edit log files
cluster.shutdownNameNode(1); cluster.shutdownNameNode(1);
deleteEditLogIfExists(confNN1); deleteEditLogIfExists(confNN1);
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true); cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
cluster.getNameNodeRpc(0).saveNamespace(); cluster.getNameNodeRpc(0).saveNamespace();
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true); cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);
// check without -skipSharedEditsCheck, Bootstrap should fail for BKJM // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
// immediately after saveNamespace // immediately after saveNamespace
int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" }, int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
confNN1); confNN1);
Assert.assertEquals("Mismatches return code", 6, rc); Assert.assertEquals("Mismatches return code", 6, rc);
// check with -skipSharedEditsCheck // check with -skipSharedEditsCheck
rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive", rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
"-skipSharedEditsCheck" }, confNN1); "-skipSharedEditsCheck" }, confNN1);
Assert.assertEquals("Mismatches return code", 0, rc); Assert.assertEquals("Mismatches return code", 0, rc);
// Checkpoint as fast as we can, in a tight loop. // Checkpoint as fast as we can, in a tight loop.
confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1); confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
cluster.restartNameNode(1); cluster.restartNameNode(1);
cluster.transitionToStandby(1); cluster.transitionToStandby(1);
NameNode nn0 = cluster.getNameNode(0); NameNode nn0 = cluster.getNameNode(0);
HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1)); HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0) long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
.getFSImage().getMostRecentCheckpointTxId(); .getFSImage().getMostRecentCheckpointTxId();
HATestUtil.waitForCheckpoint(cluster, 1, HATestUtil.waitForCheckpoint(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId)); ImmutableList.of((int) expectedCheckpointTxId));
// Should have copied over the namespace // Should have copied over the namespace
FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId)); ImmutableList.of((int) expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster); FSImageTestUtil.assertNNFilesMatch(cluster);
} }
private void deleteEditLogIfExists(Configuration confNN1) { private void deleteEditLogIfExists(Configuration confNN1) {
String editDirs = confNN1.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); String editDirs = confNN1.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
String[] listEditDirs = StringUtils.split(editDirs, ','); String[] listEditDirs = StringUtils.split(editDirs, ',');
Assert.assertTrue("Wrong edit directory path!", listEditDirs.length > 0); Assert.assertTrue("Wrong edit directory path!", listEditDirs.length > 0);
for (String dir : listEditDirs) { for (String dir : listEditDirs) {
File curDir = new File(dir, "current"); File curDir = new File(dir, "current");
File[] listFiles = curDir.listFiles(new FileFilter() { File[] listFiles = curDir.listFiles(new FileFilter() {
@Override @Override
public boolean accept(File f) { public boolean accept(File f) {
if (!f.getName().startsWith("edits")) { if (!f.getName().startsWith("edits")) {
return true; return true;
} }
return false; return false;
} }
}); });
if (listFiles != null && listFiles.length > 0) { if (listFiles != null && listFiles.length > 0) {
for (File file : listFiles) { for (File file : listFiles) {
Assert.assertTrue("Failed to delete edit files!", file.delete()); Assert.assertTrue("Failed to delete edit files!", file.delete());
} }
} }
} }
} }
} }