HDFS-5907. Add test cases missed in previous checkin

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1568529 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arpit Agarwal 2014-02-14 21:37:16 +00:00
parent 9830ef0d32
commit 8741c3b951
4 changed files with 527 additions and 0 deletions

View File

@ -0,0 +1,150 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.junit.Test;
import java.io.File;
import java.util.Random;
import java.util.UUID;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* Test that BlockPoolSliceStorage can correctly generate trash and
* restore directories for a given block file path.
*/
public class TestBlockPoolSliceStorage {
public static final Log LOG = LogFactory.getLog(TestBlockPoolSliceStorage.class);
Random rand = new Random();
BlockPoolSliceStorage storage;
/**
* BlockPoolSliceStorage with a dummy storage directory. The directory
* need not exist. We need to extend BlockPoolSliceStorage so we can
* call {@link Storage#addStorageDir}.
*/
private static class StubBlockPoolSliceStorage extends BlockPoolSliceStorage {
StubBlockPoolSliceStorage(int namespaceID, String bpID, long cTime,
String clusterId) {
super(namespaceID, bpID, cTime, clusterId);
addStorageDir(new StorageDirectory(new File("/tmp/dontcare/" + bpID)));
assertThat(storageDirs.size(), is(1));
}
}
private String makeRandomIpAddress() {
return rand.nextInt(256) + "." +
rand.nextInt(256) + "." +
rand.nextInt(256) + "." +
rand.nextInt(256);
}
private String makeRandomBlockpoolId() {
return "BP-" + rand.nextInt(Integer.MAX_VALUE) +
"-" + makeRandomIpAddress() +
"-" + rand.nextInt(Integer.MAX_VALUE);
}
private BlockPoolSliceStorage makeBlockPoolStorage() {
return new StubBlockPoolSliceStorage(
rand.nextInt(Integer.MAX_VALUE),
makeRandomBlockpoolId(),
rand.nextInt(Integer.MAX_VALUE),
UUID.randomUUID().toString());
}
private String makeRandomBlockFileSubdir(int nestingLevel) {
StringBuilder sb = new StringBuilder();
sb.append("/");
for (int i = 0; i < nestingLevel; ++i) {
sb.append("subdir" + rand.nextInt(64) + "/");
}
return sb.toString();
}
/**
* Test conversion from a block file path to its target trash
* directory.
*/
public void getTrashDirectoryForBlockFile(String fileName, int nestingLevel) {
final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
final String blockFileName = fileName;
String testFilePath =
storage.getSingularStorageDir().getRoot() + "/" +
Storage.STORAGE_DIR_CURRENT +
blockFileSubdir + blockFileName;
String expectedTrashPath =
storage.getSingularStorageDir().getRoot() + "/" +
BlockPoolSliceStorage.TRASH_ROOT_DIR +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Got subdir " + blockFileSubdir);
LOG.info("Generated file path " + testFilePath);
assertThat(storage.getTrashDirectory(new File(testFilePath)), is(expectedTrashPath));
}
/*
* Test conversion from a block file in a trash directory to its
* target directory for restore.
*/
public void getRestoreDirectoryForBlockFile(String fileName, int nestingLevel) {
BlockPoolSliceStorage storage = makeBlockPoolStorage();
final String blockFileSubdir = makeRandomBlockFileSubdir(nestingLevel);
final String blockFileName = fileName;
String deletedFilePath =
storage.getSingularStorageDir().getRoot() + "/" +
BlockPoolSliceStorage.TRASH_ROOT_DIR +
blockFileSubdir + blockFileName;
String expectedRestorePath =
storage.getSingularStorageDir().getRoot() + "/" +
Storage.STORAGE_DIR_CURRENT +
blockFileSubdir.substring(0, blockFileSubdir.length() - 1);
LOG.info("Generated deleted file path " + deletedFilePath);
assertThat(storage.getRestoreDirectory(new File(deletedFilePath)),
is(expectedRestorePath));
}
@Test (timeout=300000)
public void testGetTrashAndRestoreDirectories() {
storage = makeBlockPoolStorage();
// Test a few different nesting levels since block files
// could be nested such as subdir1/subdir5/blk_...
// Make sure all nesting levels are handled correctly.
for (int i = 0; i < 3; ++i) {
getTrashDirectoryForBlockFile("blk_myblockfile", i);
getTrashDirectoryForBlockFile("blk_myblockfile.meta", i);
getRestoreDirectoryForBlockFile("blk_myblockfile", i);
getRestoreDirectoryForBlockFile("blk_myblockfile.meta", i);
}
}
}

View File

@ -0,0 +1,177 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.log4j.Level;
import org.junit.Test;
import java.io.IOException;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
/**
* Ensure that the DataNode correctly handles rolling upgrade
* finalize and rollback.
*/
public class TestDataNodeRollingUpgrade {
private static final Log LOG = LogFactory.getLog(TestDataNodeRollingUpgrade.class);
private static final short REPL_FACTOR = 1;
private static final long FILE_SIZE = 1024L;
private static final long SEED = 0x1BADF00DL;
Configuration conf;
MiniDFSCluster cluster = null;
DistributedFileSystem fs;
private void runCmd(DFSAdmin dfsadmin, String... args) throws Exception {
assertThat(dfsadmin.run(args), is(0));
}
private void startRollingUpgrade() throws Exception {
LOG.info("Starting rolling upgrade");
final DFSAdmin dfsadmin = new DFSAdmin(conf);
runCmd(dfsadmin, "-rollingUpgrade", "start");
}
private void finalizeRollingUpgrade() throws Exception {
LOG.info("Finalizing rolling upgrade");
final DFSAdmin dfsadmin = new DFSAdmin(conf);
runCmd(dfsadmin, "-rollingUpgrade", "finalize");
}
private void rollbackRollingUpgrade() throws Exception {
LOG.info("Starting rollback of the rolling upgrade");
// Shutdown the DN and the NN in preparation for rollback.
DataNodeProperties dnprop = cluster.stopDataNode(0);
cluster.shutdownNameNodes();
// Restart the daemons with rollback flags.
cluster.restartNameNode("-rollingupgrade", "rollback");
dnprop.setDnArgs("-rollingupgrade", "rollback");
cluster.restartDataNode(dnprop);
cluster.waitActive();
}
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithFinalize() throws Exception {
// start a cluster
try {
// Start a cluster.
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path testFile1 = new Path("/TestDataNodeRollingUpgrade1.dat");
Path testFile2 = new Path("/TestDataNodeRollingUpgrade2.dat");
// Create files in DFS.
DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
DFSTestUtil.createFile(fs, testFile2, FILE_SIZE, REPL_FACTOR, SEED);
startRollingUpgrade();
// Sleep briefly so that DN learns of the rolling upgrade
// from heartbeats.
cluster.triggerHeartbeats();
Thread.sleep(5000);
fs.delete(testFile2, false);
// Sleep briefly so that block files can be moved to trash
// (this is scheduled for asynchronous execution).
cluster.triggerBlockReports();
Thread.sleep(5000);
finalizeRollingUpgrade();
// Ensure that testFile2 stays deleted.
assert(!fs.exists(testFile2));
assert(fs.exists(testFile1));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
@Test (timeout=600000)
public void testDatanodeRollingUpgradeWithRollback() throws Exception {
// start a cluster
try {
// Start a cluster.
conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path testFile1 = new Path("/TestDataNodeRollingUpgrade1.dat");
// Create files in DFS.
DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
String fileContents1 = DFSTestUtil.readFile(fs, testFile1);
startRollingUpgrade();
// Sleep briefly so that DN learns of the rolling upgrade
// from heartbeats.
cluster.triggerHeartbeats();
Thread.sleep(5000);
LOG.info("Deleting file during rolling upgrade");
fs.delete(testFile1, false);
// Sleep briefly so that block files can be moved to trash
// (this is scheduled for asynchronous execution).
cluster.triggerBlockReports();
Thread.sleep(5000);
assert(!fs.exists(testFile1));
// Now perform a rollback to restore DFS to the pre-rollback state.
rollbackRollingUpgrade();
// Ensure that testFile1 was restored after the rollback.
assert(fs.exists(testFile1));
String fileContents2 = DFSTestUtil.readFile(fs, testFile1);
// Ensure that file contents are the same.
assertThat(fileContents1, is(fileContents2));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
}

View File

@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Before;
import org.junit.Test;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* This test verifies DataNode command line processing.
*/
public class TestDatanodeStartupOptions {
private Configuration conf = null;
/**
* Process the given arg list as command line arguments to the DataNode
* to make sure we get the expected result. If the expected result is
* success then further validate that the parsed startup option is the
* same as what was expected.
*
* @param expectSuccess
* @param expectedOption
* @param conf
* @param arg
*/
private static void checkExpected(boolean expectSuccess,
StartupOption expectedOption,
Configuration conf,
String ... arg) {
String[] args = new String[arg.length];
int i = 0;
for (String currentArg : arg) {
args[i++] = currentArg;
}
boolean returnValue = DataNode.parseArguments(args, conf);
StartupOption option = DataNode.getStartupOption(conf);
assertThat(returnValue, is(expectSuccess));
if (expectSuccess) {
assertThat(option, is(expectedOption));
}
}
/**
* Reinitialize configuration before every test since DN stores the
* parsed StartupOption in the configuration.
*/
@Before
public void initConfiguration() {
conf = new HdfsConfiguration();
}
/**
* A few options that should all parse successfully.
*/
@Test (timeout=60000)
public void testStartupSuccess() {
checkExpected(true, StartupOption.REGULAR, conf);
checkExpected(true, StartupOption.REGULAR, conf, "-regular");
checkExpected(true, StartupOption.REGULAR, conf, "-REGULAR");
checkExpected(true, StartupOption.ROLLBACK, conf, "-rollback");
checkExpected(true, StartupOption.ROLLINGUPGRADE, conf, "-rollingupgrade", "rollback");
checkExpected(true, StartupOption.ROLLINGUPGRADE, conf, "-rollingupgraDE", "ROLLBack");
}
/**
* A few options that should all fail to parse.
*/
@Test (timeout=60000)
public void testStartupFailure() {
checkExpected(false, StartupOption.REGULAR, conf, "unknownoption");
checkExpected(false, StartupOption.REGULAR, conf, "-regular -rollback");
checkExpected(false, StartupOption.REGULAR, conf, "-rollingupgrade", "downgrade");
checkExpected(false, StartupOption.REGULAR, conf, "-rollingupgrade", "unknownoption");
}
}

View File

@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.*;
import org.junit.Test;
import static junit.framework.Assert.fail;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
/**
* Test enumerations in TestHdfsServerConstants.
*/
public class TestHdfsServerConstants {
/**
* Verify that parsing a StartupOption string gives the expected results.
* If a RollingUpgradeStartupOption is specified than it is also checked.
*
* @param value
* @param expectedOption
* @param expectedRollupOption optional, may be null.
*/
private static void verifyStartupOptionResult(String value,
StartupOption expectedOption,
RollingUpgradeStartupOption expectedRollupOption) {
StartupOption option = StartupOption.getEnum(value);
assertThat(option, is(expectedOption));
if (expectedRollupOption != null) {
assertThat(option.getRollingUpgradeStartupOption(), is(expectedRollupOption));
}
}
/**
* Test that we can parse a StartupOption string without the optional
* RollingUpgradeStartupOption.
*/
@Test
public void testStartupOptionParsing() {
verifyStartupOptionResult("FORMAT", StartupOption.FORMAT, null);
verifyStartupOptionResult("REGULAR", StartupOption.REGULAR, null);
verifyStartupOptionResult("CHECKPOINT", StartupOption.CHECKPOINT, null);
verifyStartupOptionResult("UPGRADE", StartupOption.UPGRADE, null);
verifyStartupOptionResult("ROLLBACK", StartupOption.ROLLBACK, null);
verifyStartupOptionResult("FINALIZE", StartupOption.FINALIZE, null);
verifyStartupOptionResult("ROLLINGUPGRADE", StartupOption.ROLLINGUPGRADE, null);
verifyStartupOptionResult("IMPORT", StartupOption.IMPORT, null);
verifyStartupOptionResult("INITIALIZESHAREDEDITS", StartupOption.INITIALIZESHAREDEDITS, null);
try {
verifyStartupOptionResult("UNKNOWN(UNKNOWNOPTION)", StartupOption.FORMAT, null);
fail("Failed to get expected IllegalArgumentException");
} catch(IllegalArgumentException iae) {
// Expected!
}
}
/**
* Test that we can parse a StartupOption string with a
* RollingUpgradeStartupOption.
*/
@Test
public void testRollingUpgradeStartupOptionParsing() {
verifyStartupOptionResult("ROLLINGUPGRADE(ROLLBACK)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.ROLLBACK);
verifyStartupOptionResult("ROLLINGUPGRADE(DOWNGRADE)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.DOWNGRADE);
verifyStartupOptionResult("ROLLINGUPGRADE(STARTED)",
StartupOption.ROLLINGUPGRADE,
RollingUpgradeStartupOption.STARTED);
try {
verifyStartupOptionResult("ROLLINGUPGRADE(UNKNOWNOPTION)", StartupOption.ROLLINGUPGRADE, null);
fail("Failed to get expected IllegalArgumentException");
} catch(IllegalArgumentException iae) {
// Expected!
}
}
}