HDFS-13503. Fix TestFsck test failures on Windows. Contributed by Xiao Liang.
This commit is contained in:
parent
cb3414a279
commit
9fd93ee533
|
@ -2791,7 +2791,8 @@ public class MiniDFSCluster implements AutoCloseable {
|
||||||
* @return Storage directory
|
* @return Storage directory
|
||||||
*/
|
*/
|
||||||
public File getStorageDir(int dnIndex, int dirIndex) {
|
public File getStorageDir(int dnIndex, int dirIndex) {
|
||||||
return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
|
return new File(determineDfsBaseDir(),
|
||||||
|
getStorageDirPath(dnIndex, dirIndex));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.hdfs.MiniDFSCluster.HDFS_MINIDFS_BASEDIR;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
|
@ -195,7 +196,9 @@ public class TestFsck {
|
||||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
|
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
|
||||||
precision);
|
precision);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(4).build();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
final String fileName = "/srcdat";
|
final String fileName = "/srcdat";
|
||||||
util.createFiles(fs, fileName);
|
util.createFiles(fs, fileName);
|
||||||
|
@ -283,7 +286,9 @@ public class TestFsck {
|
||||||
setNumFiles(20).build();
|
setNumFiles(20).build();
|
||||||
FileSystem fs = null;
|
FileSystem fs = null;
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(4).build();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
util.createFiles(fs, "/srcdat");
|
util.createFiles(fs, "/srcdat");
|
||||||
util.waitReplication(fs, "/srcdat", (short)3);
|
util.waitReplication(fs, "/srcdat", (short)3);
|
||||||
|
@ -301,7 +306,9 @@ public class TestFsck {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
|
|
||||||
// Create a cluster with the current user, write some files
|
// Create a cluster with the current user, write some files
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(4).build();
|
||||||
final MiniDFSCluster c2 = cluster;
|
final MiniDFSCluster c2 = cluster;
|
||||||
final String dir = "/dfsck";
|
final String dir = "/dfsck";
|
||||||
final Path dirpath = new Path(dir);
|
final Path dirpath = new Path(dir);
|
||||||
|
@ -347,8 +354,9 @@ public class TestFsck {
|
||||||
DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
|
DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
|
||||||
(5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
|
(5 * dfsBlockSize) + (dfsBlockSize - 1), 5 * dfsBlockSize);
|
||||||
FileSystem fs = null;
|
FileSystem fs = null;
|
||||||
cluster = new MiniDFSCluster.Builder(conf).
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
numDataNodes(numDatanodes).build();
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(numDatanodes).build();
|
||||||
String topDir = "/srcdat";
|
String topDir = "/srcdat";
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -546,7 +554,9 @@ public class TestFsck {
|
||||||
FileSystem fs = null;
|
FileSystem fs = null;
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(4).build();
|
||||||
String topDir = "/srcdat";
|
String topDir = "/srcdat";
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -610,7 +620,9 @@ public class TestFsck {
|
||||||
setNumFiles(4).build();
|
setNumFiles(4).build();
|
||||||
FileSystem fs = null;
|
FileSystem fs = null;
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(4).build();
|
||||||
String topDir = "/srcdat";
|
String topDir = "/srcdat";
|
||||||
String randomString = "HADOOP ";
|
String randomString = "HADOOP ";
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
|
@ -661,7 +673,9 @@ public class TestFsck {
|
||||||
String outStr = null;
|
String outStr = null;
|
||||||
short factor = 1;
|
short factor = 1;
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
Path file1 = new Path("/testCorruptBlock");
|
Path file1 = new Path("/testCorruptBlock");
|
||||||
|
@ -732,7 +746,9 @@ public class TestFsck {
|
||||||
Random random = new Random();
|
Random random = new Random();
|
||||||
String outStr = null;
|
String outStr = null;
|
||||||
short factor = 1;
|
short factor = 1;
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(2).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
Path file1 = new Path("/testUnderMinReplicatedBlock");
|
Path file1 = new Path("/testUnderMinReplicatedBlock");
|
||||||
|
@ -803,9 +819,9 @@ public class TestFsck {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
||||||
|
|
||||||
cluster =
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.racks(racks).build();
|
.numDataNodes(numDn).hosts(hosts).racks(racks).build();
|
||||||
cluster.waitClusterUp();
|
cluster.waitClusterUp();
|
||||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
|
|
||||||
|
@ -952,7 +968,8 @@ public class TestFsck {
|
||||||
@Test
|
@Test
|
||||||
public void testFsckError() throws Exception {
|
public void testFsckError() throws Exception {
|
||||||
// bring up a one-node cluster
|
// bring up a one-node cluster
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
|
||||||
String fileName = "/test.txt";
|
String fileName = "/test.txt";
|
||||||
Path filePath = new Path(fileName);
|
Path filePath = new Path(fileName);
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
@ -984,7 +1001,8 @@ public class TestFsck {
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
||||||
FileSystem fs = null;
|
FileSystem fs = null;
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
DFSTestUtil util = new DFSTestUtil.Builder().
|
DFSTestUtil util = new DFSTestUtil.Builder().
|
||||||
|
@ -1047,7 +1065,8 @@ public class TestFsck {
|
||||||
@Test
|
@Test
|
||||||
public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
|
public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
|
||||||
// bring up a one-node cluster
|
// bring up a one-node cluster
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
|
||||||
String fileName = "/test.txt";
|
String fileName = "/test.txt";
|
||||||
Path filePath = new Path(fileName);
|
Path filePath = new Path(fileName);
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
@ -1091,8 +1110,9 @@ public class TestFsck {
|
||||||
DistributedFileSystem dfs = null;
|
DistributedFileSystem dfs = null;
|
||||||
|
|
||||||
// Startup a minicluster
|
// Startup a minicluster
|
||||||
cluster =
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(numReplicas).build();
|
||||||
assertNotNull("Failed Cluster Creation", cluster);
|
assertNotNull("Failed Cluster Creation", cluster);
|
||||||
cluster.waitClusterUp();
|
cluster.waitClusterUp();
|
||||||
dfs = cluster.getFileSystem();
|
dfs = cluster.getFileSystem();
|
||||||
|
@ -1151,9 +1171,9 @@ public class TestFsck {
|
||||||
DistributedFileSystem dfs = null;
|
DistributedFileSystem dfs = null;
|
||||||
|
|
||||||
// Startup a minicluster
|
// Startup a minicluster
|
||||||
cluster =
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.racks(racks).build();
|
.numDataNodes(numDn).hosts(hosts).racks(racks).build();
|
||||||
assertNotNull("Failed Cluster Creation", cluster);
|
assertNotNull("Failed Cluster Creation", cluster);
|
||||||
cluster.waitClusterUp();
|
cluster.waitClusterUp();
|
||||||
dfs = cluster.getFileSystem();
|
dfs = cluster.getFileSystem();
|
||||||
|
@ -1263,7 +1283,9 @@ public class TestFsck {
|
||||||
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
|
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY,
|
||||||
precision);
|
precision);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
|
.numDataNodes(4).build();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
final String fileName = "/srcdat";
|
final String fileName = "/srcdat";
|
||||||
util.createFiles(fs, fileName);
|
util.createFiles(fs, fileName);
|
||||||
|
@ -1290,7 +1312,8 @@ public class TestFsck {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testFsckForSnapshotFiles() throws Exception {
|
public void testFsckForSnapshotFiles() throws Exception {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).numDataNodes(1)
|
||||||
.build();
|
.build();
|
||||||
String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
|
String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
|
||||||
"-files");
|
"-files");
|
||||||
|
@ -1325,9 +1348,9 @@ public class TestFsck {
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
|
||||||
|
|
||||||
DistributedFileSystem dfs = null;
|
DistributedFileSystem dfs = null;
|
||||||
cluster =
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.racks(racks).build();
|
.numDataNodes(numDn).hosts(hosts).racks(racks).build();
|
||||||
|
|
||||||
assertNotNull("Failed Cluster Creation", cluster);
|
assertNotNull("Failed Cluster Creation", cluster);
|
||||||
cluster.waitClusterUp();
|
cluster.waitClusterUp();
|
||||||
|
@ -1380,9 +1403,9 @@ public class TestFsck {
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
|
||||||
|
|
||||||
DistributedFileSystem dfs;
|
DistributedFileSystem dfs;
|
||||||
cluster =
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.racks(racks).build();
|
.numDataNodes(numDn).hosts(hosts).racks(racks).build();
|
||||||
|
|
||||||
assertNotNull("Failed Cluster Creation", cluster);
|
assertNotNull("Failed Cluster Creation", cluster);
|
||||||
cluster.waitClusterUp();
|
cluster.waitClusterUp();
|
||||||
|
@ -1464,7 +1487,8 @@ public class TestFsck {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
|
||||||
replFactor);
|
replFactor);
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.numDataNodes(numDn)
|
.numDataNodes(numDn)
|
||||||
.hosts(hosts)
|
.hosts(hosts)
|
||||||
.racks(racks)
|
.racks(racks)
|
||||||
|
@ -1584,9 +1608,9 @@ public class TestFsck {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
||||||
|
|
||||||
cluster =
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.racks(racks).build();
|
.numDataNodes(numDn).hosts(hosts).racks(racks).build();
|
||||||
|
|
||||||
assertNotNull("Failed Cluster Creation", cluster);
|
assertNotNull("Failed Cluster Creation", cluster);
|
||||||
cluster.waitClusterUp();
|
cluster.waitClusterUp();
|
||||||
|
@ -1653,7 +1677,8 @@ public class TestFsck {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testStoragePoliciesCK() throws Exception {
|
public void testStoragePoliciesCK() throws Exception {
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.numDataNodes(3)
|
.numDataNodes(3)
|
||||||
.storageTypes(
|
.storageTypes(
|
||||||
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
|
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
|
||||||
|
@ -1696,9 +1721,9 @@ public class TestFsck {
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
|
||||||
|
|
||||||
DistributedFileSystem dfs;
|
DistributedFileSystem dfs;
|
||||||
cluster =
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(numDn).hosts(hosts)
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.racks(racks).build();
|
.numDataNodes(numDn).hosts(hosts).racks(racks).build();
|
||||||
|
|
||||||
assertNotNull("Failed Cluster Creation", cluster);
|
assertNotNull("Failed Cluster Creation", cluster);
|
||||||
cluster.waitClusterUp();
|
cluster.waitClusterUp();
|
||||||
|
@ -1777,7 +1802,8 @@ public class TestFsck {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAINTENANCE_REPLICATION_MIN_KEY,
|
||||||
replFactor);
|
replFactor);
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf)
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir)
|
||||||
.numDataNodes(numDn)
|
.numDataNodes(numDn)
|
||||||
.hosts(hosts)
|
.hosts(hosts)
|
||||||
.racks(racks)
|
.racks(racks)
|
||||||
|
@ -1886,7 +1912,8 @@ public class TestFsck {
|
||||||
|
|
||||||
int numFiles = 3;
|
int numFiles = 3;
|
||||||
int numSnapshots = 0;
|
int numSnapshots = 0;
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
hdfs = cluster.getFileSystem();
|
hdfs = cluster.getFileSystem();
|
||||||
DFSTestUtil util = new DFSTestUtil.Builder().
|
DFSTestUtil util = new DFSTestUtil.Builder().
|
||||||
|
@ -1966,7 +1993,8 @@ public class TestFsck {
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replication);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replication);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
File builderBaseDir = new File(GenericTestUtils.getRandomizedTempPath());
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf, builderBaseDir).build();
|
||||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
||||||
|
@ -2061,6 +2089,7 @@ public class TestFsck {
|
||||||
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replFactor);
|
||||||
|
conf.set(HDFS_MINIDFS_BASEDIR, GenericTestUtils.getRandomizedTempPath());
|
||||||
if (defineUpgradeDomain) {
|
if (defineUpgradeDomain) {
|
||||||
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
|
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
|
||||||
CombinedHostFileManager.class, HostConfigManager.class);
|
CombinedHostFileManager.class, HostConfigManager.class);
|
||||||
|
@ -2107,7 +2136,8 @@ public class TestFsck {
|
||||||
@Test(timeout = 300000)
|
@Test(timeout = 300000)
|
||||||
public void testFsckCorruptWhenOneReplicaIsCorrupt()
|
public void testFsckCorruptWhenOneReplicaIsCorrupt()
|
||||||
throws Exception {
|
throws Exception {
|
||||||
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf,
|
||||||
|
new File(GenericTestUtils.getRandomizedTempPath()))
|
||||||
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(2)
|
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(2)
|
||||||
.build()) {
|
.build()) {
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
|
Loading…
Reference in New Issue