HDFS-4818. Several HDFS tests that attempt to make directories unusable do not work correctly on Windows. Contributed by Chris Nauroth.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1494023 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7ef54faad4
commit
364b379cb8
|
@ -717,6 +717,9 @@ Release 2.1.0-beta - UNRELEASED
|
|||
HDFS-4783. TestDelegationTokensWithHA#testHAUtilClonesDelegationTokens fails
|
||||
on Windows. (cnauroth)
|
||||
|
||||
HDFS-4818. Several HDFS tests that attempt to make directories unusable do
|
||||
not work correctly on Windows. (cnauroth)
|
||||
|
||||
BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS AND RELATED JIRAS
|
||||
|
||||
HDFS-4076. Support snapshot of single files. (szetszwo)
|
||||
|
|
|
@ -842,8 +842,8 @@ public class NNStorage extends Storage implements Closeable,
|
|||
String absPath = f.getAbsolutePath();
|
||||
for (StorageDirectory sd : storageDirs) {
|
||||
String dirPath = sd.getRoot().getAbsolutePath();
|
||||
if (!dirPath.endsWith("/")) {
|
||||
dirPath += "/";
|
||||
if (!dirPath.endsWith(File.separator)) {
|
||||
dirPath += File.separator;
|
||||
}
|
||||
if (absPath.startsWith(dirPath)) {
|
||||
reportErrorsOnDirectory(sd);
|
||||
|
|
|
@ -864,9 +864,13 @@ public class TestCheckpoint {
|
|||
savedSd.lock();
|
||||
fail("Namenode should not be able to lock a storage that is already locked");
|
||||
} catch (IOException ioe) {
|
||||
String jvmName = ManagementFactory.getRuntimeMXBean().getName();
|
||||
assertTrue("Error message does not include JVM name '" + jvmName
|
||||
+ "'", logs.getOutput().contains(jvmName));
|
||||
// cannot read lock file on Windows, so message cannot get JVM name
|
||||
String lockingJvmName = Path.WINDOWS ? "" :
|
||||
" " + ManagementFactory.getRuntimeMXBean().getName();
|
||||
String expectedLogMessage = "It appears that another namenode"
|
||||
+ lockingJvmName + " has already locked the storage directory";
|
||||
assertTrue("Log output does not contain expected log message: "
|
||||
+ expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
|
||||
}
|
||||
} finally {
|
||||
cleanup(cluster);
|
||||
|
@ -2035,7 +2039,7 @@ public class TestCheckpoint {
|
|||
StorageDirectory sd0 = storage.getStorageDir(0);
|
||||
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
|
||||
currentDir = sd0.getCurrentDir();
|
||||
FileUtil.setExecutable(currentDir, false);
|
||||
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));
|
||||
|
||||
// Try to upload checkpoint -- this should fail since there are no
|
||||
// valid storage dirs
|
||||
|
@ -2048,7 +2052,7 @@ public class TestCheckpoint {
|
|||
}
|
||||
|
||||
// Restore the good dir
|
||||
FileUtil.setExecutable(currentDir, true);
|
||||
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
|
||||
nn.restoreFailedStorage("true");
|
||||
nn.rollEditLog();
|
||||
|
||||
|
@ -2059,7 +2063,7 @@ public class TestCheckpoint {
|
|||
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
|
||||
} finally {
|
||||
if (currentDir != null) {
|
||||
FileUtil.setExecutable(currentDir, true);
|
||||
FileUtil.chmod(currentDir.getAbsolutePath(), "755");
|
||||
}
|
||||
cleanup(secondary);
|
||||
secondary = null;
|
||||
|
|
|
@ -241,8 +241,8 @@ public class TestFileJournalManager {
|
|||
try {
|
||||
jm.finalizeLogSegment(0, 1);
|
||||
} finally {
|
||||
assertTrue(storage.getRemovedStorageDirs().contains(sd));
|
||||
FileUtil.chmod(sdRootPath, "+w", true);
|
||||
assertTrue(storage.getRemovedStorageDirs().contains(sd));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -439,8 +439,12 @@ public class TestFileJournalManager {
|
|||
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
|
||||
|
||||
EditLogInputStream elis = getJournalInputStream(jm, 5, true);
|
||||
try {
|
||||
FSEditLogOp op = elis.readOp();
|
||||
assertEquals("read unexpected op", op.getTransactionId(), 5);
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, elis);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -463,10 +467,14 @@ public class TestFileJournalManager {
|
|||
assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
|
||||
|
||||
EditLogInputStream elis = getJournalInputStream(jm, 90, false);
|
||||
try {
|
||||
FSEditLogOp lastReadOp = null;
|
||||
while ((lastReadOp = elis.readOp()) != null) {
|
||||
assertTrue(lastReadOp.getTransactionId() <= 100);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, elis);
|
||||
}
|
||||
}
|
||||
|
||||
private static String getLogsAsString(
|
||||
|
|
|
@ -106,6 +106,9 @@ public class TestFsck {
|
|||
static final Pattern numCorruptBlocksPattern = Pattern.compile(
|
||||
".*Corrupt blocks:\t\t([0123456789]*).*");
|
||||
|
||||
private static final String LINE_SEPARATOR =
|
||||
System.getProperty("line.separator");
|
||||
|
||||
static String runFsck(Configuration conf, int expectedErrCode,
|
||||
boolean checkErrorCode,String... path)
|
||||
throws Exception {
|
||||
|
@ -321,7 +324,7 @@ public class TestFsck {
|
|||
while (true) {
|
||||
outStr = runFsck(conf, 1, false, "/");
|
||||
String numCorrupt = null;
|
||||
for (String line : outStr.split("\n")) {
|
||||
for (String line : outStr.split(LINE_SEPARATOR)) {
|
||||
Matcher m = numCorruptBlocksPattern.matcher(line);
|
||||
if (m.matches()) {
|
||||
numCorrupt = m.group(1);
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEdits
|
|||
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
|
||||
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
|
||||
import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
@ -59,7 +60,7 @@ public class TestNNStorageRetentionFunctional {
|
|||
*/
|
||||
@Test
|
||||
public void testPurgingWithNameEditsDirAfterFailure()
|
||||
throws IOException {
|
||||
throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
|
||||
|
@ -107,10 +108,10 @@ public class TestNNStorageRetentionFunctional {
|
|||
getInProgressEditsFileName(5));
|
||||
|
||||
LOG.info("Failing first storage dir by chmodding it");
|
||||
FileUtil.setExecutable(sd0, false);
|
||||
assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "000"));
|
||||
doSaveNamespace(nn);
|
||||
LOG.info("Restoring accessibility of first storage dir");
|
||||
FileUtil.setExecutable(sd0, true);
|
||||
assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "755"));
|
||||
|
||||
LOG.info("nothing should have been purged in first storage dir");
|
||||
assertGlobEquals(cd0, "fsimage_\\d*",
|
||||
|
@ -139,7 +140,7 @@ public class TestNNStorageRetentionFunctional {
|
|||
assertGlobEquals(cd0, "edits_.*",
|
||||
getInProgressEditsFileName(9));
|
||||
} finally {
|
||||
FileUtil.setExecutable(sd0, true);
|
||||
FileUtil.chmod(cd0.getAbsolutePath(), "755");
|
||||
|
||||
LOG.info("Shutting down...");
|
||||
if (cluster != null) {
|
||||
|
|
|
@ -130,7 +130,8 @@ public class TestNameNodeMXBean {
|
|||
|
||||
// This will cause the first dir to fail.
|
||||
File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
|
||||
assertEquals(0, FileUtil.chmod(failedNameDir.getAbsolutePath(), "000"));
|
||||
assertEquals(0, FileUtil.chmod(
|
||||
new File(failedNameDir, "current").getAbsolutePath(), "000"));
|
||||
cluster.getNameNodeRpc().rollEditLog();
|
||||
|
||||
nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
|
||||
|
@ -150,7 +151,8 @@ public class TestNameNodeMXBean {
|
|||
} finally {
|
||||
if (cluster != null) {
|
||||
for (URI dir : cluster.getNameDirs(0)) {
|
||||
FileUtil.chmod(new File(dir).toString(), "700");
|
||||
FileUtil.chmod(
|
||||
new File(new File(dir), "current").getAbsolutePath(), "755");
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue