HBASE-6830. [WINDOWS] Tests should not rely on local temp dir to be available in DFS

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1410659 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Enis Soztutar 2012-11-17 01:16:51 +00:00
parent 4cdaf84ef8
commit 4aeeda4377
8 changed files with 108 additions and 29 deletions

View File

@ -140,12 +140,16 @@ public class HBaseTestingUtility {
private String hadoopLogDir;
// Directory where we put the data for this instance of HBaseTestingUtility.
/** Directory where we put the data for this instance of HBaseTestingUtility*/
private File dataTestDir = null;
// Directory (a subdirectory of dataTestDir) used by the dfs cluster if any
/** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */
private File clusterTestDir = null;
/** Directory on test filesystem where we put the data for this instance of
* HBaseTestingUtility*/
private Path dataTestDirOnTestFS = null;
/**
* System property key to get test directory value.
* Name is as it is because mini dfs has hard-codings to put test data here.
@ -254,6 +258,17 @@ public class HBaseTestingUtility {
return new Path(PathName);
}
/**
* @return Where to write test data on the test filesystem; Returns working directory
* for the test filesytem by default
* @see #setupDataTestDirOnTestFS()
* @see #getTestFileSystem()
*/
private Path getBaseTestDirOnTestFS() throws IOException {
FileSystem fs = getTestFileSystem();
return new Path(fs.getWorkingDirectory(), "test-data");
}
/**
* @return Where to write test data on local filesystem, specific to
* the test. Useful for tests that do not use a cluster.
@ -307,6 +322,31 @@ public class HBaseTestingUtility {
return new Path(getDataTestDir(), subdirName);
}
/**
* Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
* to write temporary test data. Call this method after setting up the mini dfs cluster
* if the test relies on it.
* @return a unique path in the test filesystem
*/
public Path getDataTestDirOnTestFS() throws IOException {
if (dataTestDirOnTestFS == null) {
setupDataTestDirOnTestFS();
}
return dataTestDirOnTestFS;
}
/**
* Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()}
* to write temporary test data. Call this method after setting up the mini dfs cluster
* if the test relies on it.
* @return a unique path in the test filesystem
* @param subdirName name of the subdir to create under the base test dir
*/
public Path getDataTestDirOnTestFS(final String subdirName) throws IOException {
return new Path(getDataTestDirOnTestFS(), subdirName);
}
/**
* Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
* Give it a random name so can have many concurrent tests running if
@ -350,10 +390,6 @@ public class HBaseTestingUtility {
createSubDir(
"mapred.local.dir",
testPath, "mapred-local-dir");
createSubDirAndSystemProperty(
"mapred.working.dir",
testPath, "mapred-working-dir");
}
private void createSubDir(String propertyName, Path parent, String subDirName){
@ -389,6 +425,34 @@ public class HBaseTestingUtility {
}
}
/**
* Sets up a path in test filesystem to be used by tests
*/
private void setupDataTestDirOnTestFS() throws IOException {
if (dataTestDirOnTestFS != null) {
LOG.warn("Data test on test fs dir already setup in "
+ dataTestDirOnTestFS.toString());
return;
}
//The file system can be either local, mini dfs, or if the configuration
//is supplied externally, it can be an external cluster FS. If it is a local
//file system, the tests should use getBaseTestDir, otherwise, we can use
//the working directory, and create a unique sub dir there
FileSystem fs = getTestFileSystem();
if (fs.getUri().getScheme().equals(fs.getLocal(conf).getUri().getScheme())) {
if (dataTestDir == null) {
setupDataTestDir();
}
dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath());
} else {
Path base = getBaseTestDirOnTestFS();
String randomStr = UUID.randomUUID().toString();
dataTestDirOnTestFS = new Path(base, randomStr);
fs.deleteOnExit(dataTestDirOnTestFS);
}
}
/**
* Start a minidfscluster.
* @param servers How many DNs to start.
@ -444,6 +508,9 @@ public class HBaseTestingUtility {
// Wait for the cluster to be totally up
this.dfsCluster.waitClusterUp();
//reset the test directory for test file system
dataTestDirOnTestFS = null;
return this.dfsCluster;
}
@ -463,6 +530,9 @@ public class HBaseTestingUtility {
// Wait for the cluster to be totally up
this.dfsCluster.waitClusterUp();
//reset the test directory for test file system
dataTestDirOnTestFS = null;
return this.dfsCluster;
}
@ -474,18 +544,23 @@ public class HBaseTestingUtility {
}
/** This is used before starting HDFS and map-reduce mini-clusters */
private void createDirsAndSetProperties() {
private void createDirsAndSetProperties() throws IOException {
setupClusterTestDir();
System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
createDirAndSetProperty("cache_data", "test.cache.data");
createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
createDirAndSetProperty("mapred_output", MapreduceTestingShim.getMROutputDirProp());
createDirAndSetProperty("mapred_local", "mapred.local.dir");
createDirAndSetProperty("mapred_system", "mapred.system.dir");
createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
enableShortCircuit();
Path root = getDataTestDirOnTestFS("hadoop");
conf.set(MapreduceTestingShim.getMROutputDirProp(),
new Path(root, "mapred-output-dir").toString());
conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString());
conf.set("mapreduce.jobtracker.staging.root.dir",
new Path(root, "mapreduce-jobtracker-staging-root-dir").toString());
conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString());
}
@ -523,7 +598,7 @@ public class HBaseTestingUtility {
}
private String createDirAndSetProperty(final String relPath, String property) {
String path = clusterTestDir.getPath() + "/" + relPath;
String path = getDataTestDir(relPath).toString();
System.setProperty(property, path);
conf.set(property, path);
new File(path).mkdirs();
@ -541,6 +616,9 @@ public class HBaseTestingUtility {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
dataTestDirOnTestFS = null;
this.conf.set("fs.defaultFS", "file:///");
this.conf.set("fs.default.name", "file:///");
}
}
@ -1483,7 +1561,8 @@ public class HBaseTestingUtility {
// Allow the user to override FS URI for this map-reduce cluster to use.
mrCluster = new MiniMRCluster(servers,
FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1);
FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1,
null, null, new JobConf(this.conf));
JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster);
if (jobConf == null) {
jobConf = mrCluster.createJobConf();

View File

@ -465,7 +465,7 @@ public class TestRegionObserverInterface {
);
FileSystem fs = util.getTestFileSystem();
final Path dir = util.getDataTestDir(testName).makeQualified(fs);
final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs);
Path familyDir = new Path(dir, Bytes.toString(A));
createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);

View File

@ -288,7 +288,7 @@ public class TestHFileOutputFormat {
@Test
public void testWritingPEData() throws Exception {
Configuration conf = util.getConfiguration();
Path testDir = util.getDataTestDir("testWritingPEData");
Path testDir = util.getDataTestDirOnTestFS("testWritingPEData");
FileSystem fs = testDir.getFileSystem(conf);
// Set down this value or we OOME in eclipse.
@ -357,11 +357,11 @@ public class TestHFileOutputFormat {
boolean shouldChangeRegions) throws Exception {
util = new HBaseTestingUtility();
Configuration conf = util.getConfiguration();
Path testDir = util.getDataTestDir("testLocalMRIncrementalLoad");
byte[][] startKeys = generateRandomStartKeys(5);
try {
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad");
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);
assertEquals("Should start with empty table",
@ -449,7 +449,7 @@ public class TestHFileOutputFormat {
Configuration conf, HTable table, Path outDir)
throws Exception {
Job job = new Job(conf, "testLocalMRIncrementalLoad");
job.setWorkingDirectory(util.getDataTestDir("runIncrementalPELoad"));
job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
setupRandomGeneratorMapper(job);
HFileOutputFormat.configureIncrementalLoad(job, table);
FileOutputFormat.setOutputPath(job, outDir);
@ -546,7 +546,7 @@ public class TestHFileOutputFormat {
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
util.getDataTestDir("testColumnFamilyCompression");
util.getDataTestDirOnTestFS("testColumnFamilyCompression");
HTable table = Mockito.mock(HTable.class);
@ -570,7 +570,7 @@ public class TestHFileOutputFormat {
// pollutes the GZip codec pool with an incompatible compressor.
conf.set("io.seqfile.compression.type", "NONE");
Job job = new Job(conf, "testLocalMRIncrementalLoad");
job.setWorkingDirectory(util.getDataTestDir("testColumnFamilyCompression"));
job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilyCompression"));
setupRandomGeneratorMapper(job);
HFileOutputFormat.configureIncrementalLoad(job, table);
FileOutputFormat.setOutputPath(job, dir);
@ -703,7 +703,7 @@ public class TestHFileOutputFormat {
util.startMiniMapReduceCluster();
for (int i = 0; i < 2; i++) {
Path testDir = util.getDataTestDir("testExcludeAllFromMinorCompaction_" + i);
Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
runIncrementalPELoad(conf, table, testDir);
// Perform the actual load
new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table);
@ -748,11 +748,11 @@ public class TestHFileOutputFormat {
public void testExcludeMinorCompaction() throws Exception {
Configuration conf = util.getConfiguration();
conf.setInt("hbase.hstore.compaction.min", 2);
Path testDir = util.getDataTestDir("testExcludeMinorCompaction");
generateRandomStartKeys(5);
try {
util.startMiniCluster();
Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
final FileSystem fs = util.getDFSCluster().getFileSystem();
HBaseAdmin admin = new HBaseAdmin(conf);
HTable table = util.createTable(TABLE_NAME, FAMILIES);

View File

@ -126,7 +126,7 @@ public class TestLoadIncrementalHFiles {
private void runTest(String testName, BloomType bloomType,
byte[][][] hfileRanges) throws Exception {
Path dir = util.getDataTestDir(testName);
Path dir = util.getDataTestDirOnTestFS(testName);
FileSystem fs = util.getTestFileSystem();
dir = dir.makeQualified(fs);
Path familyDir = new Path(dir, Bytes.toString(FAMILY));
@ -209,7 +209,7 @@ public class TestLoadIncrementalHFiles {
@Test
public void testSplitStoreFile() throws IOException {
Path dir = util.getDataTestDir("testSplitHFile");
Path dir = util.getDataTestDirOnTestFS("testSplitHFile");
FileSystem fs = util.getTestFileSystem();
Path testIn = new Path(dir, "testhfile");
HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);

View File

@ -126,7 +126,7 @@ public class TestLoadIncrementalHFilesSplitRecovery {
}
private Path buildBulkFiles(String table, int value) throws Exception {
Path dir = util.getDataTestDir(table);
Path dir = util.getDataTestDirOnTestFS(table);
Path bulk1 = new Path(dir, table+value);
FileSystem fs = util.getTestFileSystem();
buildHFiles(fs, bulk1, value);

View File

@ -63,7 +63,7 @@ public class TestHFileCleaner {
@Test
public void testTTLCleaner() throws IOException, InterruptedException {
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
Path root = UTIL.getDataTestDir();
Path root = UTIL.getDataTestDirOnTestFS();
Path file = new Path(root, "file");
fs.createNewFile(file);
long createTime = System.currentTimeMillis();
@ -99,7 +99,7 @@ public class TestHFileCleaner {
"org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner");
conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
Server server = new DummyServer();
Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
FileSystem fs = FileSystem.get(conf);
HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir);
@ -165,7 +165,7 @@ public class TestHFileCleaner {
// no cleaner policies = delete all files
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
Server server = new DummyServer();
Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
// setup the cleaner
FileSystem fs = UTIL.getDFSCluster().getFileSystem();
@ -236,4 +236,4 @@ public class TestHFileCleaner {
return false;
}
}
}
}

View File

@ -106,7 +106,7 @@ public class TestHRegionServerBulkLoad {
/**
* Thread that does full scans of the table looking for any partially
* completed rows.
*
*
* Each iteration of this loads 10 hdfs files, which occupies 5 file open file
* handles. So every 10 iterations (500 file handles) it does a region
* compaction to reduce the number of open file handles.
@ -124,7 +124,7 @@ public class TestHRegionServerBulkLoad {
public void doAnAction() throws Exception {
long iteration = numBulkLoads.getAndIncrement();
Path dir = UTIL.getDataTestDir(String.format("bulkLoad_%08d",
Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d",
iteration));
// create HFiles for different column families

View File

@ -403,7 +403,7 @@ public class TestHLog {
try {
DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER);
cluster.shutdown();
TEST_UTIL.shutdownMiniDFSCluster();
try {
// wal.writer.close() will throw an exception,
// but still call this since it closes the LogSyncer thread first