HDFS-4573. Fix TestINodeFile on Windows. Contributed by Arpit Agarwal.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1454616 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a4c20088b5
commit
c1fabc5e91
|
@ -306,7 +306,7 @@ Trunk (Unreleased)
|
||||||
HDFS-4502. JsonUtil.toFileStatus(..) should check if the fileId property
|
HDFS-4502. JsonUtil.toFileStatus(..) should check if the fileId property
|
||||||
exists. (Brandon Li via suresh)
|
exists. (Brandon Li via suresh)
|
||||||
|
|
||||||
BREAKDOWN OF HADOOP-8562 SUBTASKS
|
BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
|
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
|
||||||
Bikas Saha, Lauren Yang, Chuan Liu, Thejas M Nair and Ivan Mitic via suresh)
|
Bikas Saha, Lauren Yang, Chuan Liu, Thejas M Nair and Ivan Mitic via suresh)
|
||||||
|
@ -320,6 +320,8 @@ Trunk (Unreleased)
|
||||||
HDFS-4297. Fix issues related to datanode concurrent reading and writing on
|
HDFS-4297. Fix issues related to datanode concurrent reading and writing on
|
||||||
Windows. (Arpit Agarwal, Chuan Liu via suresh)
|
Windows. (Arpit Agarwal, Chuan Liu via suresh)
|
||||||
|
|
||||||
|
HDFS-4573. Fix TestINodeFile on Windows. (Arpit Agarwal via suresh)
|
||||||
|
|
||||||
Release 2.0.5-beta - UNRELEASED
|
Release 2.0.5-beta - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -184,8 +184,10 @@ public class TestINodeFile {
|
||||||
long fileLen = 1024;
|
long fileLen = 1024;
|
||||||
replication = 3;
|
replication = 3;
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
|
MiniDFSCluster cluster = null;
|
||||||
replication).build();
|
try {
|
||||||
|
cluster =
|
||||||
|
new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
FSNamesystem fsn = cluster.getNamesystem();
|
FSNamesystem fsn = cluster.getNamesystem();
|
||||||
FSDirectory fsdir = fsn.getFSDirectory();
|
FSDirectory fsdir = fsn.getFSDirectory();
|
||||||
|
@ -212,6 +214,11 @@ public class TestINodeFile {
|
||||||
// getFullPathName can return correct result only if the parent field of
|
// getFullPathName can return correct result only if the parent field of
|
||||||
// child node is set correctly
|
// child node is set correctly
|
||||||
assertEquals(newFile.toString(), fnode.getFullPathName());
|
assertEquals(newFile.toString(), fnode.getFullPathName());
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -385,8 +392,9 @@ public class TestINodeFile {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
|
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
|
||||||
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
|
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
MiniDFSCluster cluster = null;
|
||||||
.build();
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
FSNamesystem fsn = cluster.getNamesystem();
|
FSNamesystem fsn = cluster.getNamesystem();
|
||||||
|
@ -420,6 +428,11 @@ public class TestINodeFile {
|
||||||
cluster.restartNameNode();
|
cluster.restartNameNode();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
assertTrue(fsn.getLastInodeId() == 1003);
|
assertTrue(fsn.getLastInodeId() == 1003);
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
Loading…
Reference in New Issue