HBASE-4634 'test.build.data' property overused leading to write data at the wrong place

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1189429 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-10-26 20:29:41 +00:00
parent 9efd27b733
commit 055d8e3b8c
74 changed files with 471 additions and 510 deletions

View File

@ -405,6 +405,8 @@ Release 0.92.0 - Unreleased
HBASE-4367 Deadlock in MemStore flusher due to JDK internally synchronizing
on current thread
HBASE-4645 Edits Log recovery losing data across column families
HBASE-4634 "test.build.data" property overused leading to write data at the
wrong place (nkeywal)
TESTS
HBASE-4450 test for number of blocks read: to serve as baseline for expected

View File

@ -54,8 +54,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
public abstract class HBaseTestCase extends TestCase {
private static final Log LOG = LogFactory.getLog(HBaseTestCase.class);
/** configuration parameter name for test directory */
public static final String TEST_DIRECTORY_KEY = "test.build.data";
/** configuration parameter name for test directory
* @deprecated see HBaseTestingUtility#TEST_DIRECTORY_KEY
**/
private static final String TEST_DIRECTORY_KEY = "test.build.data";
/*
protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
@ -153,20 +155,27 @@ public abstract class HBaseTestCase extends TestCase {
super.tearDown();
}
protected Path getUnitTestdir(String testName) {
return new Path(
conf.get(TEST_DIRECTORY_KEY, "target/test/data"), testName);
}
/**
* @see HBaseTestingUtility#getBaseTestDir
* @param testName
* @return directory to use for this test
*/
protected Path getUnitTestdir(String testName) {
return new Path(
System.getProperty(
HBaseTestingUtility.BASE_TEST_DIRECTORY_KEY,
HBaseTestingUtility.DEFAULT_BASE_TEST_DIRECTORY
),
testName
);
}
protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
byte [] endKey)
throws IOException {
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
filesystem.mkdirs(rootdir);
HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
return HRegion.createHRegion(hri, rootdir, conf, desc);
return HRegion.createHRegion(hri, testDir, conf, desc);
}
protected HRegion openClosedRegion(final HRegion closedRegion)

View File

@ -72,9 +72,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper;
@ -82,15 +79,16 @@ import org.apache.zookeeper.KeeperException.NodeExistsException;
/**
* Facility for testing HBase. Replacement for
* old HBaseTestCase and HBaseCluserTestCase functionality.
* old HBaseTestCase and HBaseClusterTestCase functionality.
* Create an instance and keep it around testing HBase. This class is
* meant to be your one-stop shop for anything you might need testing. Manages
* one cluster at a time only. Depends on log4j being on classpath and
* one cluster at a time only.
* Depends on log4j being on classpath and
* hbase-site.xml for logging and test-run configuration. It does not set
* logging levels nor make changes to configuration parameters.
*/
public class HBaseTestingUtility {
private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
private Configuration conf;
private MiniZooKeeperCluster zkCluster = null;
/**
@ -102,19 +100,33 @@ public class HBaseTestingUtility {
private MiniHBaseCluster hbaseCluster = null;
private MiniMRCluster mrCluster = null;
// If non-null, then already a cluster running.
private File clusterTestBuildDir = null;
// Directory where we put the data for this instance of HBaseTestingUtility
private File dataTestDir = null;
// Directory (usually a subdirectory of dataTestDir) used by the dfs cluster
// if any
private File clusterTestDir = null;
/**
* System property key to get test directory value.
* Name is as it is because mini dfs has hard-codings to put test data here.
* It should NOT be used directly in HBase, as it's a property used in
* mini dfs.
* @deprecated can be used only with mini dfs
*/
public static final String TEST_DIRECTORY_KEY = "test.build.data";
private static final String TEST_DIRECTORY_KEY = "test.build.data";
/**
* Default parent directory for test output.
* System property key to get base test directory value
*/
public static final String DEFAULT_TEST_DIRECTORY = "target/test-data";
public static final String BASE_TEST_DIRECTORY_KEY =
"test.build.data.basedirectory";
/**
* Default base directory for test output.
*/
public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
/** Compression algorithms to use in parameterized JUnit 4 tests */
public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
@ -124,10 +136,9 @@ public class HBaseTestingUtility {
});
/** Compression algorithms to use in testing */
public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS =
new Compression.Algorithm[] {
Compression.Algorithm.NONE, Compression.Algorithm.GZ
};
public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
Compression.Algorithm.NONE, Compression.Algorithm.GZ
};
/**
* Create all combinations of Bloom filters and compression algorithms for
@ -155,10 +166,6 @@ public class HBaseTestingUtility {
this.conf = conf;
}
public MiniHBaseCluster getHbaseCluster() {
return hbaseCluster;
}
/**
* Returns this classes's instance of {@link Configuration}. Be careful how
* you use the returned Configuration since {@link HConnection} instances
@ -175,69 +182,109 @@ public class HBaseTestingUtility {
}
/**
* Makes sure the test directory is set up so that {@link #getTestDir()}
* returns a valid directory. Useful in unit tests that do not run a
* mini-cluster.
* @return Where to write test data on local filesystem; usually
* {@link #DEFAULT_BASE_TEST_DIRECTORY}
* Should not be used by the unit tests, hence its's private.
* Unit test will use a subdirectory of this directory.
* @see #setupDataTestDir()
* @see #getTestFileSystem()
*/
public void initTestDir() {
if (System.getProperty(TEST_DIRECTORY_KEY) == null) {
clusterTestBuildDir = setupClusterTestBuildDir();
System.setProperty(TEST_DIRECTORY_KEY, clusterTestBuildDir.getPath());
}
private Path getBaseTestDir() {
String PathName = System.getProperty(
BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
return new Path(PathName);
}
/**
* @return Where to write test data on local filesystem; usually
* {@link #DEFAULT_TEST_DIRECTORY}
* @see #setupClusterTestBuildDir()
* @see #clusterTestBuildDir()
* @return Where to write test data on local filesystem, specific to
* the test. Useful for tests that do not use a cluster.
* Creates it if it does not exist already.
* @see #getTestFileSystem()
*/
public static Path getTestDir() {
return new Path(System.getProperty(TEST_DIRECTORY_KEY,
DEFAULT_TEST_DIRECTORY));
public Path getDataTestDir() {
if (dataTestDir == null){
setupDataTestDir();
}
return new Path(dataTestDir.getAbsolutePath());
}
/**
* @return Where the DFS cluster will write data on the local subsystem.
* Creates it if it does not exist already.
* @see #getTestFileSystem()
*/
public Path getClusterTestDir() {
if (clusterTestDir == null){
setupClusterTestDir();
}
return new Path(clusterTestDir.getAbsolutePath());
}
/**
* @param subdirName
* @return Path to a subdirectory named <code>subdirName</code> under
* {@link #getTestDir()}.
* @see #setupClusterTestBuildDir()
* @see #clusterTestBuildDir(String)
* @see #getTestFileSystem()
* {@link #getDataTestDir()}.
* Does *NOT* create it if it does not exist.
*/
public static Path getTestDir(final String subdirName) {
return new Path(getTestDir(), subdirName);
public Path getDataTestDir(final String subdirName) {
return new Path(getDataTestDir(), subdirName);
}
/**
* Home our cluster in a dir under {@link #DEFAULT_TEST_DIRECTORY}. Give it a
* random name
* so can have many concurrent clusters running if we need to. Need to
* amend the {@link #TEST_DIRECTORY_KEY} System property. Its what
* minidfscluster bases
* Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
* Give it a random name so can have many concurrent tests running if
* we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY}
* System property, as it's what minidfscluster bases
* it data dir on. Moding a System property is not the way to do concurrent
* instances -- another instance could grab the temporary
* value unintentionally -- but not anything can do about it at moment;
* single instance only is how the minidfscluster works.
* @return The calculated cluster test build directory.
* @return The calculated data test build directory.
*/
public File setupClusterTestBuildDir() {
private void setupDataTestDir() {
if (dataTestDir != null) {
LOG.warn("Data test dir already setup in " +
dataTestDir.getAbsolutePath());
return;
}
String randomStr = UUID.randomUUID().toString();
String dirStr = getTestDir(randomStr).toString();
File dir = new File(dirStr).getAbsoluteFile();
Path testDir= new Path(
getBaseTestDir(),
randomStr
);
dataTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit
dir.deleteOnExit();
return dir;
dataTestDir.deleteOnExit();
}
/**
* Creates a directory for the DFS cluster, under the test data
*/
private void setupClusterTestDir() {
if (clusterTestDir != null) {
LOG.warn("Cluster test dir already setup in " +
clusterTestDir.getAbsolutePath());
return;
}
// Using randomUUID ensures that multiple clusters can be launched by
// a same test, if it stops & starts them
Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit
clusterTestDir.deleteOnExit();
}
/**
* @throws IOException If a cluster -- zk, dfs, or hbase -- already running.
*/
void isRunningCluster(String passedBuildPath) throws IOException {
if (this.clusterTestBuildDir == null || passedBuildPath != null) return;
public void isRunningCluster() throws IOException {
if (dfsCluster == null) return;
throw new IOException("Cluster already running at " +
this.clusterTestBuildDir);
this.clusterTestDir);
}
/**
@ -248,7 +295,7 @@ public class HBaseTestingUtility {
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
return startMiniDFSCluster(servers, null, null);
return startMiniDFSCluster(servers, null);
}
/**
@ -265,62 +312,58 @@ public class HBaseTestingUtility {
public MiniDFSCluster startMiniDFSCluster(final String hosts[])
throws Exception {
if ( hosts != null && hosts.length != 0) {
return startMiniDFSCluster(hosts.length, null, hosts);
return startMiniDFSCluster(hosts.length, hosts);
} else {
return startMiniDFSCluster(1, null, null);
return startMiniDFSCluster(1, null);
}
}
/**
* Start a minidfscluster.
* Can only create one.
* @param dir Where to home your dfs cluster.
* @param servers How many DNs to start.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
throws Exception {
return startMiniDFSCluster(servers, dir, null);
}
/**
* Start a minidfscluster.
* Can only create one.
* @param servers How many DNs to start.
* @param dir Where to home your dfs cluster.
* @param hosts hostnames DNs to run on.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers, final File dir, final String hosts[])
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
throws Exception {
// This does the following to home the minidfscluster
// base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
// Check that there is not already a cluster running
isRunningCluster();
// Initialize the local directory used by the MiniDFS
if (clusterTestDir == null) {
setupClusterTestDir();
}
// We have to set this property as it is used by MiniCluster
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
// Some tests also do this:
// System.getProperty("test.cache.data", "build/test/cache");
if (dir == null) {
this.clusterTestBuildDir = setupClusterTestBuildDir();
} else {
this.clusterTestBuildDir = dir;
}
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
// It's also deprecated
System.setProperty("test.cache.data", this.clusterTestDir.toString());
// Ok, now we can start
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
true, null, null, hosts, null);
// Set this just-started cluser as our filesystem.
// Set this just-started cluster as our filesystem.
FileSystem fs = this.dfsCluster.getFileSystem();
this.conf.set("fs.defaultFS", fs.getUri().toString());
// Do old style too just to be safe.
this.conf.set("fs.default.name", fs.getUri().toString());
// Wait for the cluster to be totally up
this.dfsCluster.waitClusterUp();
return this.dfsCluster;
}
/**
* Shuts down instance created by call to {@link #startMiniDFSCluster(int, File)}
* Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
* or does nothing.
* @throws Exception
*/
@ -328,7 +371,9 @@ public class HBaseTestingUtility {
if (this.dfsCluster != null) {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
dfsCluster = null;
}
}
/**
@ -339,8 +384,7 @@ public class HBaseTestingUtility {
* @return zk cluster started.
*/
public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
return startMiniZKCluster(setupClusterTestBuildDir(),1);
return startMiniZKCluster(1);
}
/**
@ -353,7 +397,8 @@ public class HBaseTestingUtility {
*/
public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
throws Exception {
return startMiniZKCluster(setupClusterTestBuildDir(), zooKeeperServerNum);
File zkClusterFile = new File(getClusterTestDir().toString());
return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
}
@ -365,12 +410,12 @@ public class HBaseTestingUtility {
private MiniZooKeeperCluster startMiniZKCluster(final File dir,
int zooKeeperServerNum)
throws Exception {
this.passedZkCluster = false;
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster();
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
this.conf.set("hbase.zookeeper.property.clientPort",
Integer.toString(clientPort));
return this.zkCluster;
@ -465,26 +510,20 @@ public class HBaseTestingUtility {
LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
// If we already put up a cluster, fail.
String testBuildPath = conf.get(TEST_DIRECTORY_KEY, null);
isRunningCluster(testBuildPath);
if (testBuildPath != null) {
LOG.info("Using passed path: " + testBuildPath);
}
// Make a new random dir to home everything in. Set it as system property.
// minidfs reads home from system property.
this.clusterTestBuildDir = testBuildPath == null?
setupClusterTestBuildDir() : new File(testBuildPath);
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
isRunningCluster();
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
startMiniDFSCluster(numDataNodes, this.clusterTestBuildDir, dataNodeHosts);
this.dfsCluster.waitClusterUp();
startMiniDFSCluster(numDataNodes, dataNodeHosts);
// Start up a zk cluster.
if (this.zkCluster == null) {
startMiniZKCluster(this.clusterTestBuildDir);
startMiniZKCluster(clusterTestDir);
}
// Start the MiniHBaseCluster
return startMiniHBaseCluster(numMasters, numSlaves);
}
@ -528,7 +567,7 @@ public class HBaseTestingUtility {
HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
ResultScanner s = t.getScanner(new Scan());
while (s.next() != null) {
continue;
// do nothing
}
LOG.info("HBase has been restarted");
}
@ -547,22 +586,22 @@ public class HBaseTestingUtility {
* @throws IOException
* @see {@link #startMiniCluster(int)}
*/
public void shutdownMiniCluster() throws IOException {
public void shutdownMiniCluster() throws Exception {
LOG.info("Shutting down minicluster");
shutdownMiniHBaseCluster();
if (!this.passedZkCluster) shutdownMiniZKCluster();
if (this.dfsCluster != null) {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
if (!this.passedZkCluster){
shutdownMiniZKCluster();
}
shutdownMiniDFSCluster();
// Clean up our directory.
if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) {
if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
// Need to use deleteDirectory because File.delete required dir is empty.
if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
new Path(this.clusterTestBuildDir.toString()))) {
LOG.warn("Failed delete of " + this.clusterTestBuildDir.toString());
new Path(this.clusterTestDir.toString()))) {
LOG.warn("Failed delete of " + this.clusterTestDir.toString());
}
this.clusterTestBuildDir = null;
this.clusterTestDir = null;
}
LOG.info("Minicluster is down");
}
@ -576,8 +615,8 @@ public class HBaseTestingUtility {
this.hbaseCluster.shutdown();
// Wait till hbase is down before going on to shutdown zk.
this.hbaseCluster.join();
this.hbaseCluster = null;
}
this.hbaseCluster = null;
}
/**
@ -931,8 +970,8 @@ public class HBaseTestingUtility {
* Creates the specified number of regions in the specified table.
* @param c
* @param table
* @param columnFamily
* @param startKeys
* @param family
* @param numRegions
* @return
* @throws IOException
*/
@ -1107,7 +1146,6 @@ public class HBaseTestingUtility {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
// add custom ones
int count = 0;
for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
@ -1118,7 +1156,6 @@ public class HBaseTestingUtility {
meta.put(put);
LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
newRegions.add(hri);
count++;
}
return newRegions;
}
@ -1221,6 +1258,7 @@ public class HBaseTestingUtility {
LOG.info("Stopping mini mapreduce cluster...");
if (mrCluster != null) {
mrCluster.shutdown();
mrCluster = null;
}
// Restore configuration to point to local jobtracker
conf.set("mapred.job.tracker", "local");
@ -1382,7 +1420,13 @@ public class HBaseTestingUtility {
* @throws IOException
*/
public boolean cleanupTestDir() throws IOException {
return deleteDir(getTestDir());
if (dataTestDir == null ){
return false;
} else {
boolean ret = deleteDir(getDataTestDir());
dataTestDir = null;
return ret;
}
}
/**
@ -1391,7 +1435,10 @@ public class HBaseTestingUtility {
* @throws IOException
*/
public boolean cleanupTestDir(final String subdir) throws IOException {
return deleteDir(getTestDir(subdir));
if (dataTestDir == null){
return false;
}
return deleteDir(getDataTestDir(subdir));
}
/**
@ -1402,7 +1449,7 @@ public class HBaseTestingUtility {
public boolean deleteDir(final Path dir) throws IOException {
FileSystem fs = getTestFileSystem();
if (fs.exists(dir)) {
return fs.delete(getTestDir(), true);
return fs.delete(getDataTestDir(), true);
}
return false;
}
@ -1436,6 +1483,9 @@ public class HBaseTestingUtility {
return false;
}
/**
* This method clones the passed <code>c</code> configuration setting a new
* user into the clone. Use it getting new instances of FileSystem. Only
@ -1500,7 +1550,6 @@ public class HBaseTestingUtility {
* Wait until <code>countOfRegion</code> in .META. have a non-empty
* info:server. This means all regions have been deployed, master has been
* informed and updated .META. with the regions deployed server.
* @param conf Configuration
* @param countOfRegions How many regions in .META.
* @throws IOException
*/
@ -1572,7 +1621,7 @@ public class HBaseTestingUtility {
* Creates an znode with OPENED state.
* @param TEST_UTIL
* @param region
* @param regionServer
* @param serverName
* @return
* @throws IOException
* @throws ZooKeeperConnectionException
@ -1639,5 +1688,4 @@ public class HBaseTestingUtility {
return "<out_of_range>";
}
}
}

View File

@ -28,32 +28,21 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.*;
public class TestFSTableDescriptorForceCreation {
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@BeforeClass
public static void setUpCluster() throws Exception {
UTIL.startMiniDFSCluster(1);
}
@AfterClass
public static void shutDownCluster() throws Exception {
UTIL.shutdownMiniDFSCluster();
}
@Test
public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
throws IOException {
final String name = "newTable2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(fs.getWorkingDirectory(), name);
Path rootdir = new Path(UTIL.getDataTestDir(), name);
HTableDescriptor htd = new HTableDescriptor(name);
assertTrue("Should create new table descriptor", FSUtils
.createTableDescriptor(fs, rootdir, htd, false));
assertTrue("Should create new table descriptor",
FSUtils.createTableDescriptor(fs, rootdir, htd, false));
}
@Test
@ -62,12 +51,12 @@ public class TestFSTableDescriptorForceCreation {
final String name = "testAlreadyExists";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(fs.getWorkingDirectory(), name);
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd);
assertFalse("Should not create new table descriptor", FSUtils
.createTableDescriptor(fs, rootdir, htd, false));
.createTableDescriptor(fs, rootdir, htd, false));
}
@Test
@ -75,10 +64,10 @@ public class TestFSTableDescriptorForceCreation {
throws Exception {
final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(fs.getWorkingDirectory(), name);
Path rootdir = new Path(UTIL.getDataTestDir(), name);
HTableDescriptor htd = new HTableDescriptor(name);
FSUtils.createTableDescriptor(fs, rootdir, htd, false);
assertTrue("Should create new table descriptor", FSUtils
.createTableDescriptor(fs, rootdir, htd, true));
.createTableDescriptor(fs, rootdir, htd, true));
}
}

View File

@ -50,26 +50,6 @@ import org.junit.Test;
public class TestHBaseTestingUtility {
private final Log LOG = LogFactory.getLog(this.getClass());
private HBaseTestingUtility hbt;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
}
@Before
public void setUp() throws Exception {
this.hbt = new HBaseTestingUtility();
this.hbt.cleanupTestDir();
}
@After
public void tearDown() throws Exception {
}
/**
* Basic sanity test that spins up multiple HDFS and HBase clusters that share
* the same ZK ensemble. We then create the same table in both and make sure
@ -136,57 +116,80 @@ public class TestHBaseTestingUtility {
}
@Test public void testMiniCluster() throws Exception {
MiniHBaseCluster cluster = this.hbt.startMiniCluster();
HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniHBaseCluster cluster = hbt.startMiniCluster();
try {
assertEquals(1, cluster.getLiveRegionServerThreads().size());
} finally {
cluster.shutdown();
hbt.shutdownMiniCluster();
}
}
/**
* Test that we can start and stop multiple time a cluster
* with the same HBaseTestingUtility.
*/
@Test public void testMultipleStartStop() throws Exception{
HBaseTestingUtility htu1 = new HBaseTestingUtility();
Path foo = new Path("foo");
htu1.startMiniCluster();
htu1.getDFSCluster().getFileSystem().create(foo);
assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo));
htu1.shutdownMiniCluster();
htu1.startMiniCluster();
assertFalse( htu1.getDFSCluster().getFileSystem().exists(foo));
htu1.getDFSCluster().getFileSystem().create(foo);
assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo));
htu1.shutdownMiniCluster();
}
@Test public void testMiniZooKeeper() throws Exception {
MiniZooKeeperCluster cluster1 = this.hbt.startMiniZKCluster();
HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster();
try {
assertEquals(0, cluster1.getBackupZooKeeperServerNum());
assertEquals(0, cluster1.getBackupZooKeeperServerNum());
assertTrue((cluster1.killCurrentActiveZooKeeperServer() == -1));
} finally {
cluster1.shutdown();
hbt.shutdownMiniZKCluster();
}
this.hbt.shutdownMiniZKCluster();
// set up zookeeper cluster with 5 zk servers
MiniZooKeeperCluster cluster2 = this.hbt.startMiniZKCluster(5);
MiniZooKeeperCluster cluster2 = hbt.startMiniZKCluster(5);
int defaultClientPort = 21818;
cluster2.setDefaultClientPort(defaultClientPort);
try {
assertEquals(4, cluster2.getBackupZooKeeperServerNum());
// killing the current active zk server
assertTrue((cluster2.killCurrentActiveZooKeeperServer() >= defaultClientPort));
assertTrue((cluster2.killCurrentActiveZooKeeperServer() >= defaultClientPort));
assertTrue((cluster2.killCurrentActiveZooKeeperServer() >= defaultClientPort));
assertEquals(2, cluster2.getBackupZooKeeperServerNum());
assertEquals(3, cluster2.getZooKeeperServerNum());
// killing the backup zk servers
cluster2.killOneBackupZooKeeperServer();
cluster2.killOneBackupZooKeeperServer();
assertEquals(0, cluster2.getBackupZooKeeperServerNum());
assertEquals(1, cluster2.getZooKeeperServerNum());
// killing the last zk server
assertTrue((cluster2.killCurrentActiveZooKeeperServer() == -1));
// this should do nothing.
cluster2.killOneBackupZooKeeperServer();
assertEquals(-1, cluster2.getBackupZooKeeperServerNum());
assertEquals(0, cluster2.getZooKeeperServerNum());
assertEquals(0, cluster2.getZooKeeperServerNum());
} finally {
cluster2.shutdown();
hbt.shutdownMiniZKCluster();
}
}
@Test public void testMiniDFSCluster() throws Exception {
MiniDFSCluster cluster = this.hbt.startMiniDFSCluster(1);
HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniDFSCluster cluster = hbt.startMiniDFSCluster(1);
FileSystem dfs = cluster.getFileSystem();
Path dir = new Path("dir");
Path qualifiedDir = dfs.makeQualified(dir);
@ -194,26 +197,32 @@ public class TestHBaseTestingUtility {
assertFalse(dfs.exists(qualifiedDir));
assertTrue(dfs.mkdirs(qualifiedDir));
assertTrue(dfs.delete(qualifiedDir, true));
try {
} finally {
cluster.shutdown();
}
hbt.shutdownMiniCluster();
}
@Test public void testSetupClusterTestBuildDir() {
File testdir = this.hbt.setupClusterTestBuildDir();
@Test public void testSetupClusterTestBuildDir() throws Exception {
HBaseTestingUtility hbt = new HBaseTestingUtility();
Path testdir = hbt.getClusterTestDir();
LOG.info("uuid-subdir=" + testdir);
assertFalse(testdir.exists());
assertTrue(testdir.mkdirs());
assertTrue(testdir.exists());
FileSystem fs = hbt.getTestFileSystem();
assertFalse(fs.exists(testdir));
hbt.startMiniDFSCluster(1);
assertTrue(fs.exists(testdir));
hbt.shutdownMiniCluster();
assertFalse(fs.exists(testdir));
}
@Test public void testTestDir() throws IOException {
Path testdir = HBaseTestingUtility.getTestDir();
@Test public void testTestDir() throws Exception {
HBaseTestingUtility hbt = new HBaseTestingUtility();
Path testdir = hbt.getDataTestDir();
LOG.info("testdir=" + testdir);
FileSystem fs = this.hbt.getTestFileSystem();
FileSystem fs = hbt.getTestFileSystem();
assertTrue(!fs.exists(testdir));
assertTrue(fs.mkdirs(testdir));
assertTrue(this.hbt.cleanupTestDir());
assertTrue(hbt.cleanupTestDir());
}
}

View File

@ -49,7 +49,7 @@ public class TestInfoServers {
}
@AfterClass
public static void afterClass() throws IOException {
public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster();
}
@ -60,10 +60,10 @@ public class TestInfoServers {
public void testInfoServersRedirect() throws Exception {
// give the cluster time to start up
new HTable(UTIL.getConfiguration(), ".META.");
int port = UTIL.getHbaseCluster().getMaster().getInfoServer().getPort();
int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port +
"/index.html"), "master-status");
port = UTIL.getHbaseCluster().getRegionServerThreads().get(0).getRegionServer().
port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer().
getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port +
"/index.html"), "rs-status");
@ -80,10 +80,10 @@ public class TestInfoServers {
public void testInfoServersStatusPages() throws Exception {
// give the cluster time to start up
new HTable(UTIL.getConfiguration(), ".META.");
int port = UTIL.getHbaseCluster().getMaster().getInfoServer().getPort();
int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port +
"/master-status"), "META");
port = UTIL.getHbaseCluster().getRegionServerThreads().get(0).getRegionServer().
port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer().
getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port +
"/rs-status"), "META");

View File

@ -97,7 +97,7 @@ public class TestMultiVersions {
Incommon incommon = new HTableIncommon(table);
TimestampTestBase.doTestDelete(incommon, new FlushCache() {
public void flushcache() throws IOException {
UTIL.getHbaseCluster().flushcache();
UTIL.getHBaseCluster().flushcache();
}
});

View File

@ -56,7 +56,7 @@ public class TestRegionRebalancing {
}
@AfterClass
public static void afterClass() throws IOException {
public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster();
}
@ -94,35 +94,35 @@ public class TestRegionRebalancing {
// add a region server - total of 2
LOG.info("Started second server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHbaseCluster().getMaster().balance();
UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced();
// add a region server - total of 3
LOG.info("Started third server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHbaseCluster().getMaster().balance();
UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced();
// kill a region server - total of 2
LOG.info("Stopped third server=" + UTIL.getHbaseCluster().stopRegionServer(2, false));
UTIL.getHbaseCluster().waitOnRegionServer(2);
UTIL.getHbaseCluster().getMaster().balance();
LOG.info("Stopped third server=" + UTIL.getHBaseCluster().stopRegionServer(2, false));
UTIL.getHBaseCluster().waitOnRegionServer(2);
UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced();
// start two more region servers - total of 4
LOG.info("Readding third server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
LOG.info("Added fourth server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHbaseCluster().getMaster().balance();
UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced();
for (int i = 0; i < 6; i++){
LOG.info("Adding " + (i + 5) + "th region server");
UTIL.getHbaseCluster().startRegionServer();
UTIL.getHBaseCluster().startRegionServer();
}
UTIL.getHbaseCluster().getMaster().balance();
UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced();
}
@ -154,7 +154,7 @@ public class TestRegionRebalancing {
int regionCount = getRegionCount();
List<HRegionServer> servers = getOnlineRegionServers();
double avg = UTIL.getHbaseCluster().getMaster().getAverageLoad();
double avg = UTIL.getHBaseCluster().getMaster().getAverageLoad();
int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop));
int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1;
LOG.debug("There are " + servers.size() + " servers and " + regionCount
@ -179,7 +179,7 @@ public class TestRegionRebalancing {
Thread.sleep(10000);
} catch (InterruptedException e) {}
UTIL.getHbaseCluster().getMaster().balance();
UTIL.getHBaseCluster().getMaster().balance();
continue;
}
@ -194,7 +194,7 @@ public class TestRegionRebalancing {
private List<HRegionServer> getOnlineRegionServers() {
List<HRegionServer> list = new ArrayList<HRegionServer>();
for (JVMClusterUtil.RegionServerThread rst :
UTIL.getHbaseCluster().getRegionServerThreads()) {
UTIL.getHBaseCluster().getRegionServerThreads()) {
if (rst.getRegionServer().isOnline()) {
list.add(rst.getRegionServer());
}

View File

@ -85,7 +85,7 @@ public class TestMetaReaderEditor {
ct.start();
}
@AfterClass public static void afterClass() throws IOException {
@AfterClass public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster();
}

View File

@ -1241,7 +1241,7 @@ public class TestAdmin {
throws IOException {
// When the META table can be opened, the region servers are running
new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
HRegionServer regionServer = TEST_UTIL.getHbaseCluster()
HRegionServer regionServer = TEST_UTIL.getHBaseCluster()
.getRegionServerThreads().get(0).getRegionServer();
// Create the test table and open it

View File

@ -44,7 +44,6 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -225,7 +224,7 @@ public class TestFromClientSide {
ResultScanner scanner = table.getScanner(s);
while (scanner.next() != null) continue;
Path tempPath = new Path(HBaseTestingUtility.getTestDir(), "regions.dat");
Path tempPath = new Path(TEST_UTIL.getDataTestDir(), "regions.dat");
final String tempFileName = tempPath.toString();

View File

@ -51,7 +51,7 @@ public class TestHTablePool {
}
@AfterClass
public static void tearDownAfterClass() throws IOException {
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

View File

@ -62,9 +62,9 @@ public class TestReplicationAdmin {
Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
admin = new ReplicationAdmin(conf);
Path oldLogDir = new Path(TEST_UTIL.getTestDir(),
Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME);
Path logDir = new Path(TEST_UTIL.getTestDir(),
Path logDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_LOGDIR_NAME);
manager = new ReplicationSourceManager(admin.getReplicationZk(), conf,
// The following stopper never stops so that we can respond

View File

@ -157,8 +157,8 @@ public class TestClassLoading {
// compose a java source file.
String javaCode = "import org.apache.hadoop.hbase.coprocessor.*;" +
"public class " + className + " extends BaseRegionObserver {}";
Path baseDir = HBaseTestingUtility.getTestDir();
Path srcDir = new Path(HBaseTestingUtility.getTestDir(), "src");
Path baseDir = TEST_UTIL.getDataTestDir();
Path srcDir = new Path(TEST_UTIL.getDataTestDir(), "src");
File srcDirPath = new File(srcDir.toString());
srcDirPath.mkdirs();
File sourceCodeFile = new File(srcDir.toString(), className + ".java");

View File

@ -479,7 +479,7 @@ public class TestMasterObserver {
}
@AfterClass
public static void teardownAfterClass() throws Exception {
public static void tearDownAfterClass() throws Exception {
UTIL.shutdownMiniCluster();
}

View File

@ -51,8 +51,8 @@ public class TestColumnPrefixFilter {
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
getTestDir(), TEST_UTIL.getConfiguration(), htd);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column");
@ -107,8 +107,8 @@ public class TestColumnPrefixFilter {
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
getTestDir(), TEST_UTIL.getConfiguration(), htd);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column");

View File

@ -76,7 +76,7 @@ public class TestDependentColumnFilter extends TestCase {
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
this.region = HRegion.createHRegion(info, testUtil.getTestDir(),
this.region = HRegion.createHRegion(info, testUtil.getDataTestDir(),
testUtil.getConfiguration(), htd);
addData();
}

View File

@ -52,8 +52,8 @@ public class TestMultipleColumnPrefixFilter {
htd.addFamily(new HColumnDescriptor(family));
// HRegionInfo info = new HRegionInfo(htd, null, null, false);
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
getTestDir(), TEST_UTIL.getConfiguration(), htd);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column");
@ -111,8 +111,8 @@ public class TestMultipleColumnPrefixFilter {
htd.addFamily(new HColumnDescriptor(family1));
htd.addFamily(new HColumnDescriptor(family2));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
getTestDir(), TEST_UTIL.getConfiguration(), htd);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column");
@ -174,8 +174,8 @@ public class TestMultipleColumnPrefixFilter {
HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility.
getTestDir(), TEST_UTIL.getConfiguration(),htd);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(),htd);
List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column");

View File

@ -59,7 +59,7 @@ public class TestHalfStoreFileReader {
@Test
public void testHalfScanAndReseek() throws IOException {
HBaseTestingUtility test_util = new HBaseTestingUtility();
String root_dir = HBaseTestingUtility.getTestDir("TestHalfStoreFile").toString();
String root_dir = test_util.getDataTestDir("TestHalfStoreFile").toString();
Path p = new Path(root_dir, "test");
Configuration conf = test_util.getConfiguration();

View File

@ -34,14 +34,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.junit.After;
@ -216,7 +208,7 @@ public class TestCacheOnWrite {
}
public void writeStoreFile() throws IOException {
Path storeFileParentDir = new Path(HBaseTestingUtility.getTestDir(),
Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
"test_cache_on_write");
StoreFile.Writer sfw = StoreFile.createWriter(fs, storeFileParentDir,
DATA_BLOCK_SIZE, compress, KeyValue.COMPARATOR, conf,

View File

@ -127,7 +127,7 @@ public class TestFixedFileTrailer {
}
// Now check what happens if the trailer is corrupted.
Path trailerPath = new Path(HBaseTestingUtility.getTestDir(), "trailer_"
Path trailerPath = new Path(util.getDataTestDir(), "trailer_"
+ version);
{

View File

@ -49,12 +49,16 @@ import org.apache.hadoop.io.Writable;
public class TestHFile extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestHFile.class);
private static String ROOT_DIR =
HBaseTestingUtility.getTestDir("TestHFile").toString();
private String ROOT_DIR;
private final int minBlockSize = 512;
private static String localFormatter = "%010d";
private static CacheConfig cacheConf = null;
public void setUp() throws Exception {
super.setUp();
ROOT_DIR = this.getUnitTestdir("TestHFile").toString();
}
/**
* Test empty HFile.
* Test all features work reasonably when hfile is empty of entries.
@ -123,7 +127,7 @@ public class TestHFile extends HBaseTestCase {
}
private FSDataOutputStream createFSOutput(Path name) throws IOException {
if (fs.exists(name)) fs.delete(name, true);
//if (fs.exists(name)) fs.delete(name, true);
FSDataOutputStream fout = fs.create(name);
return fout;
}

View File

@ -69,14 +69,13 @@ public class TestHFileBlock {
private static final int NUM_READER_THREADS = 26;
private static final HBaseTestingUtility TEST_UTIL =
new HBaseTestingUtility();
new HBaseTestingUtility();
private FileSystem fs;
private int uncompressedSizeV1;
@Before
public void setUp() throws IOException {
fs = FileSystem.get(TEST_UTIL.getConfiguration());
TEST_UTIL.initTestDir();
}
public void writeTestBlockContents(DataOutputStream dos) throws IOException {
@ -154,8 +153,8 @@ public class TestHFileBlock {
for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
for (boolean pread : new boolean[] { false, true }) {
byte[] block = createTestV1Block(algo);
Path path = new Path(HBaseTestingUtility.getTestDir(), "blocks_v1_"
+ algo);
Path path = new Path(TEST_UTIL.getDataTestDir(),
"blocks_v1_"+ algo);
LOG.info("Creating temporary file at " + path);
FSDataOutputStream os = fs.create(path);
int totalSize = 0;
@ -188,7 +187,7 @@ public class TestHFileBlock {
public void testReaderV2() throws IOException {
for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
for (boolean pread : new boolean[] { false, true }) {
Path path = new Path(HBaseTestingUtility.getTestDir(), "blocks_v2_"
Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
+ algo);
FSDataOutputStream os = fs.create(path);
HFileBlock.Writer hbw = new HFileBlock.Writer(algo);
@ -244,7 +243,7 @@ public class TestHFileBlock {
for (boolean cacheOnWrite : BOOLEAN_VALUES) {
Random rand = defaultRandom();
LOG.info("Compression algorithm: " + algo + ", pread=" + pread);
Path path = new Path(HBaseTestingUtility.getTestDir(), "prev_offset");
Path path = new Path(TEST_UTIL.getDataTestDir(), "prev_offset");
List<Long> expectedOffsets = new ArrayList<Long>();
List<Long> expectedPrevOffsets = new ArrayList<Long>();
List<BlockType> expectedTypes = new ArrayList<BlockType>();
@ -400,7 +399,7 @@ public class TestHFileBlock {
public void testConcurrentReading() throws Exception {
for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
Path path =
new Path(HBaseTestingUtility.getTestDir(), "concurrent_reading");
new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
Random rand = defaultRandom();
List<Long> offsets = new ArrayList<Long>();
List<BlockType> types = new ArrayList<BlockType>();

View File

@ -43,8 +43,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
@ -113,7 +111,7 @@ public class TestHFileBlockIndex {
@Test
public void testBlockIndex() throws IOException {
path = new Path(HBaseTestingUtility.getTestDir(), "block_index_" + compr);
path = new Path(TEST_UTIL.getDataTestDir(), "block_index_" + compr);
writeWholeIndex();
readIndex();
}
@ -458,7 +456,7 @@ public class TestHFileBlockIndex {
*/
@Test
public void testHFileWriterAndReader() throws IOException {
Path hfilePath = new Path(HBaseTestingUtility.getTestDir(),
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
"hfile_for_block_index");
CacheConfig cacheConf = new CacheConfig(conf);
BlockCache blockCache = cacheConf.getBlockCache();

View File

@ -45,8 +45,9 @@ import org.apache.hadoop.io.compress.GzipCodec;
* instead.</p>
*/
public class TestHFilePerformance extends TestCase {
private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static String ROOT_DIR =
HBaseTestingUtility.getTestDir("TestHFilePerformance").toString();
TEST_UTIL.getDataTestDir("TestHFilePerformance").toString();
private FileSystem fs;
private Configuration conf;
private long startTimeEpoch;

View File

@ -244,8 +244,9 @@ public class TestHFileSeek extends TestCase {
int minWordLen = 5;
int maxWordLen = 20;
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
String rootDir =
HBaseTestingUtility.getTestDir("TestTFileSeek").toString();
TEST_UTIL.getDataTestDir("TestTFileSeek").toString();
String file = "TestTFileSeek";
// String compress = "lzo"; DISABLED
String compress = "none";

View File

@ -63,7 +63,7 @@ public class TestHFileWriterV2 {
@Test
public void testHFileFormatV2() throws IOException {
Path hfilePath = new Path(HBaseTestingUtility.getTestDir(),
Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
"testHFileFormatV2");
final Compression.Algorithm COMPRESS_ALGO = Compression.Algorithm.GZ;

View File

@ -40,7 +40,7 @@ public class TestReseekTo {
@Test
public void testReseekTo() throws Exception {
Path ncTFile = new Path(HBaseTestingUtility.getTestDir(), "basic.hfile");
Path ncTFile = new Path(TEST_UTIL.getDataTestDir(), "basic.hfile");
FSDataOutputStream fout = TEST_UTIL.getTestFileSystem().create(ncTFile);
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
HFile.Writer writer = HFile.getWriterFactory(

View File

@ -80,7 +80,7 @@ public class TestTableMapReduce {
}
@AfterClass
public static void afterClass() throws IOException {
public static void afterClass() throws Exception {
UTIL.shutdownMiniMapReduceCluster();
UTIL.shutdownMiniCluster();
}

View File

@ -175,7 +175,7 @@ public class TestHFileOutputFormat {
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
HBaseTestingUtility.getTestDir("test_LATEST_TIMESTAMP_isReplaced");
util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced");
try {
Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir);
@ -243,7 +243,7 @@ public class TestHFileOutputFormat {
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
HBaseTestingUtility.getTestDir("test_TIMERANGE_present");
util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir);
try {
// build a record writer using HFileOutputFormat
@ -307,7 +307,7 @@ public class TestHFileOutputFormat {
@Test
public void testWritingPEData() throws Exception {
Configuration conf = util.getConfiguration();
Path testDir = HBaseTestingUtility.getTestDir("testWritingPEData");
Path testDir = util.getDataTestDir("testWritingPEData");
FileSystem fs = testDir.getFileSystem(conf);
// Set down this value or we OOME in eclipse.
@ -372,7 +372,7 @@ public class TestHFileOutputFormat {
private void doIncrementalLoadTest(
boolean shouldChangeRegions) throws Exception {
Configuration conf = util.getConfiguration();
Path testDir = HBaseTestingUtility.getTestDir("testLocalMRIncrementalLoad");
Path testDir = util.getDataTestDir("testLocalMRIncrementalLoad");
byte[][] startKeys = generateRandomStartKeys(5);
try {
@ -557,7 +557,7 @@ public class TestHFileOutputFormat {
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null;
Path dir =
HBaseTestingUtility.getTestDir("testColumnFamilyCompression");
util.getDataTestDir("testColumnFamilyCompression");
HTable table = Mockito.mock(HTable.class);

View File

@ -252,7 +252,7 @@ public class TestImportTsv {
}
assertTrue(verified);
} finally {
cluster.shutdown();
htu1.shutdownMiniCluster();
}
}

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
import org.junit.*;
/**
* Test cases for the "load" half of the HFileOutputFormat bulk load
@ -49,8 +49,6 @@ import org.junit.Test;
* tests in TestHFileOutputFormat
*/
public class TestLoadIncrementalHFiles {
private static final byte[] TABLE = Bytes.toBytes("mytable");
private static final byte[] QUALIFIER = Bytes.toBytes("myqual");
private static final byte[] FAMILY = Bytes.toBytes("myfam");
@ -63,7 +61,17 @@ public class TestLoadIncrementalHFiles {
public static String COMPRESSION =
Compression.Algorithm.NONE.getName();
private HBaseTestingUtility util = new HBaseTestingUtility();
private static HBaseTestingUtility util = new HBaseTestingUtility();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
util.startMiniCluster();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
util.shutdownMiniCluster();
}
/**
* Test case that creates some regions and loads
@ -117,7 +125,7 @@ public class TestLoadIncrementalHFiles {
private void runTest(String testName, BloomType bloomType,
byte[][][] hfileRanges) throws Exception {
Path dir = HBaseTestingUtility.getTestDir(testName);
Path dir = util.getDataTestDir(testName);
FileSystem fs = util.getTestFileSystem();
dir = dir.makeQualified(fs);
Path familyDir = new Path(dir, Bytes.toString(FAMILY));
@ -131,31 +139,27 @@ public class TestLoadIncrementalHFiles {
}
int expectedRows = hfileIdx * 1000;
final byte[] TABLE = Bytes.toBytes("mytable_"+testName);
util.startMiniCluster();
try {
HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(TABLE);
HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
familyDesc.setBloomFilterType(bloomType);
htd.addFamily(familyDesc);
admin.createTable(htd, SPLIT_KEYS);
HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(TABLE);
HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
familyDesc.setBloomFilterType(bloomType);
htd.addFamily(familyDesc);
admin.createTable(htd, SPLIT_KEYS);
HTable table = new HTable(util.getConfiguration(), TABLE);
util.waitTableAvailable(TABLE, 30000);
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(
util.getConfiguration());
loader.doBulkLoad(dir, table);
HTable table = new HTable(util.getConfiguration(), TABLE);
util.waitTableAvailable(TABLE, 30000);
LoadIncrementalHFiles loader = new LoadIncrementalHFiles(
util.getConfiguration());
loader.doBulkLoad(dir, table);
assertEquals(expectedRows, util.countRows(table));
} finally {
util.shutdownMiniCluster();
}
assertEquals(expectedRows, util.countRows(table));
}
@Test
public void testSplitStoreFile() throws IOException {
Path dir = HBaseTestingUtility.getTestDir("testSplitHFile");
Path dir = util.getDataTestDir("testSplitHFile");
FileSystem fs = util.getTestFileSystem();
Path testIn = new Path(dir, "testhfile");
HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);

View File

@ -76,7 +76,7 @@ public class TestTableMapReduce {
}
@AfterClass
public static void afterClass() throws IOException {
public static void afterClass() throws Exception {
UTIL.shutdownMiniMapReduceCluster();
UTIL.shutdownMiniCluster();
}

View File

@ -433,7 +433,7 @@ public class TestCatalogJanitor {
private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
final String subdir)
throws IOException {
Path testdir = HBaseTestingUtility.getTestDir(subdir);
Path testdir = htu.getDataTestDir(subdir);
FileSystem fs = FileSystem.get(htu.getConfiguration());
if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
htu.getConfiguration().set(HConstants.HBASE_DIR, testdir.toString());

View File

@ -83,11 +83,6 @@ public class TestDistributedLogSplitting {
Configuration conf;
HBaseTestingUtility TEST_UTIL;
@Before
public void before() throws Exception {
}
private void startCluster(int num_rs) throws Exception{
ZKSplitLog.Counters.resetCounters();
LOG.info("Starting cluster");
@ -106,7 +101,7 @@ public class TestDistributedLogSplitting {
@After
public void after() throws Exception {
cluster.shutdown();
TEST_UTIL.shutdownMiniCluster();
}
@Test (timeout=300000)

View File

@ -73,7 +73,7 @@ public class TestLogsCleaner {
ReplicationZookeeper zkHelper =
new ReplicationZookeeper(server, new AtomicBoolean(true));
Path oldLogDir = new Path(HBaseTestingUtility.getTestDir(),
Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME);
String fakeMachineName =
URLEncoder.encode(server.getServerName().toString(), "UTF8");

View File

@ -61,7 +61,7 @@ public class TestMaster {
}
@AfterClass
public static void afterAllTests() throws IOException {
public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

View File

@ -105,6 +105,7 @@ public class TestMasterRestartAfterDisablingTable {
assertEquals(
"The assigned regions were not onlined after master switch except for the catalog tables.",
6, regions.size());
TEST_UTIL.shutdownMiniCluster();
}
private void log(String msg) {

View File

@ -66,7 +66,7 @@ public class TestMasterTransitions {
addToEachStartKey(countOfRegions);
}
@AfterClass public static void afterAllTests() throws IOException {
@AfterClass public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

View File

@ -124,8 +124,8 @@ public class TestOpenedRegionHandler {
"testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches");
HRegionInfo hri = new HRegionInfo(htd.getName(),
Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
HRegion region = HRegion.createHRegion(hri, HBaseTestingUtility
.getTestDir(), TEST_UTIL.getConfiguration(), htd);
HRegion region = HRegion.createHRegion(hri, TEST_UTIL
.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
assertNotNull(region);
AssignmentManager am = Mockito.mock(AssignmentManager.class);
when(am.isRegionInTransition(hri)).thenReturn(

View File

@ -44,30 +44,26 @@ import org.junit.Test;
public class TestRestartCluster {
private static final Log LOG = LogFactory.getLog(TestRestartCluster.class);
private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static ZooKeeperWatcher zooKeeper;
private HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final byte[] TABLENAME = Bytes.toBytes("master_transitions");
private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a")};
private static final byte [][] TABLES = new byte[][] {
private static final byte [][] FAMILIES = {Bytes.toBytes("a")};
private static final byte [][] TABLES = {
Bytes.toBytes("restartTableOne"),
Bytes.toBytes("restartTableTwo"),
Bytes.toBytes("restartTableThree")
};
private static final byte [] FAMILY = Bytes.toBytes("family");
@Before public void setup() throws Exception {
}
@After public void teardown() throws IOException {
@After public void tearDown() throws Exception {
UTIL.shutdownMiniCluster();
}
@Test (timeout=300000) public void testRestartClusterAfterKill()
throws Exception {
UTIL.startMiniZKCluster();
zooKeeper = new ZooKeeperWatcher(UTIL.getConfiguration(), "cluster1", null, true);
ZooKeeperWatcher zooKeeper =
new ZooKeeperWatcher(UTIL.getConfiguration(), "cluster1", null, true);
// create the unassigned region, throw up a region opened state for META
String unassignedZNode = zooKeeper.assignmentZNode;
@ -106,8 +102,7 @@ public class TestRestartCluster {
assertEquals(3, allRegions.size());
LOG.info("\n\nShutting down cluster");
UTIL.getHBaseCluster().shutdown();
UTIL.getHBaseCluster().join();
UTIL.shutdownMiniHBaseCluster();
LOG.info("\n\nSleeping a bit");
Thread.sleep(2000);

View File

@ -76,7 +76,7 @@ public class TestZKBasedOpenCloseRegion {
addToEachStartKey(countOfRegions);
}
@AfterClass public static void afterAllTests() throws IOException {
@AfterClass public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

View File

@ -47,8 +47,9 @@ public class TestAtomicOperation extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestAtomicOperation.class);
HRegion region = null;
private final String DIR = HBaseTestingUtility.getTestDir() +
"/TestIncrement/";
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final String DIR = TEST_UTIL.getDataTestDir("TestIncrement").toString();
private final int MAX_VERSIONS = 2;

View File

@ -62,8 +62,8 @@ public class TestBlocksRead extends HBaseTestCase {
}
HRegion region = null;
private final String DIR = HBaseTestingUtility.getTestDir() +
"/TestBlocksRead/";
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final String DIR = TEST_UTIL.getDataTestDir("TestBlocksRead").toString();
/**
* @see org.apache.hadoop.hbase.HBaseTestCase#setUp()

View File

@ -40,8 +40,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test;
@ -70,7 +68,7 @@ public class TestColumnSeeking {
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
HRegion region =
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL
HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL
.getConfiguration(), htd);
List<String> rows = generateRandomWords(10, "row");
@ -175,7 +173,7 @@ public class TestColumnSeeking {
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
HRegion region =
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL
HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL
.getConfiguration(), htd);
List<String> rows = generateRandomWords(10, "row");

View File

@ -48,8 +48,8 @@ public class TestCompactSelection extends TestCase {
private Configuration conf;
private Store store;
private static final String DIR
= HBaseTestingUtility.getTestDir() + "/TestCompactSelection/";
private static final String DIR=
TEST_UTIL.getDataTestDir("TestCompactSelection").toString();
private static Path TEST_FILE;
private static final int minFiles = 3;

View File

@ -33,10 +33,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
@ -48,6 +45,8 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@ -57,6 +56,8 @@ import org.mockito.stubbing.Answer;
*/
public class TestCompaction extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private HRegion r = null;
private Path compactionDir = null;
private Path regionCompactionDir = null;
@ -67,7 +68,6 @@ public class TestCompaction extends HBaseTestCase {
private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
final private byte[] col1, col2;
private MiniDFSCluster cluster;
/** constructor */
public TestCompaction() throws Exception {
@ -76,7 +76,6 @@ public class TestCompaction extends HBaseTestCase {
// Set cache flush size to 1MB
conf.setInt("hbase.hregion.memstore.flush.size", 1024*1024);
conf.setInt("hbase.hregion.memstore.block.multiplier", 100);
this.cluster = null;
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
firstRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
@ -92,10 +91,6 @@ public class TestCompaction extends HBaseTestCase {
@Override
public void setUp() throws Exception {
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
// Make the hbase rootdir match the minidfs we just span up
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
super.setUp();
HTableDescriptor htd = createTableDescriptor(getName());
this.r = createNewHRegion(htd, null, null);
@ -106,9 +101,6 @@ public class TestCompaction extends HBaseTestCase {
HLog hlog = r.getLog();
this.r.close();
hlog.closeAndDelete();
if (this.cluster != null) {
shutdownDfs(cluster);
}
super.tearDown();
}
@ -426,7 +418,7 @@ public class TestCompaction extends HBaseTestCase {
assertEquals(compactionThreshold, s.getStorefilesCount());
assertTrue(s.getStorefilesSize() > 15*1000);
// and no new store files persisted past compactStores()
FileStatus[] ls = cluster.getFileSystem().listStatus(r.getTmpDir());
FileStatus[] ls = FileSystem.get(conf).listStatus(r.getTmpDir());
assertEquals(0, ls.length);
} finally {
@ -501,7 +493,7 @@ public class TestCompaction extends HBaseTestCase {
StoreFile.Writer compactedFile = store.compactStore(storeFiles, false, maxId);
// Now lets corrupt the compacted file.
FileSystem fs = cluster.getFileSystem();
FileSystem fs = FileSystem.get(conf);
Path origPath = compactedFile.getPath();
Path homedir = store.getHomedir();
Path dstPath = new Path(homedir, origPath.getName());

View File

@ -293,7 +293,7 @@ public class TestCompoundBloomFilter {
cacheConf = new CacheConfig(conf);
StoreFile.Writer w = StoreFile.createWriter(fs,
HBaseTestingUtility.getTestDir(), BLOCK_SIZES[t], null, null, conf,
TEST_UTIL.getDataTestDir(), BLOCK_SIZES[t], null, null, conf,
cacheConf, bt, 0);
assertTrue(w.hasGeneralBloom());

View File

@ -41,11 +41,11 @@ public class TestEndToEndSplitTransaction {
@BeforeClass
public static void beforeAllTests() throws Exception {
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
TEST_UTIL.startMiniCluster(1);
TEST_UTIL.startMiniCluster();
}
@AfterClass
public static void afterAllTests() throws IOException {
public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

View File

@ -68,7 +68,7 @@ public class TestFSErrorsExposed {
@Test
public void testHFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path(
HBaseTestingUtility.getTestDir("internalScannerExposesErrors"),
util.getDataTestDir("internalScannerExposesErrors"),
"regionname"), "familyname");
FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
@ -111,7 +111,7 @@ public class TestFSErrorsExposed {
@Test
public void testStoreFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path(
HBaseTestingUtility.getTestDir("internalScannerExposesErrors"),
util.getDataTestDir("internalScannerExposesErrors"),
"regionname"), "familyname");
FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
CacheConfig cacheConf = new CacheConfig(util.getConfiguration());

View File

@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
*/
public class TestGetClosestAtOrBefore extends HBaseTestCase {
private static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class);
private MiniDFSCluster miniHdfs;
private static final byte[] T00 = Bytes.toBytes("000");
private static final byte[] T10 = Bytes.toBytes("010");
@ -58,24 +57,17 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
private static final byte[] T35 = Bytes.toBytes("035");
private static final byte[] T40 = Bytes.toBytes("040");
@Override
protected void setUp() throws Exception {
super.setUp();
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
}
public void testUsingMetaAndBinary() throws IOException {
FileSystem filesystem = FileSystem.get(conf);
Path rootdir = filesystem.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
filesystem.mkdirs(rootdir);
Path rootdir = testDir;
// Up flush size else we bind up when we use default catalog flush of 16k.
HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024);
HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
rootdir, this.conf, HTableDescriptor.META_TABLEDESC);
try {
// Write rows for three tables 'A', 'B', and 'C'.
for (char c = 'A'; c < 'D'; c++) {
HTableDescriptor htd = new HTableDescriptor("" + c);
@ -134,6 +126,16 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
findRow(mr, 'C', 45, -1);
findRow(mr, 'C', 46, -1);
findRow(mr, 'C', 43, -1);
} finally {
if (mr != null) {
try {
mr.close();
} catch (Exception e) {
e.printStackTrace();
}
mr.getLog().closeAndDelete();
}
}
}
/*
@ -339,12 +341,4 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
}
}
}
@Override
protected void tearDown() throws Exception {
if (this.miniHdfs != null) {
this.miniHdfs.shutdown();
}
super.tearDown();
}
}

View File

@ -34,8 +34,6 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -44,7 +42,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -91,8 +88,8 @@ public class TestHRegion extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestHRegion.class);
HRegion region = null;
private final String DIR = HBaseTestingUtility.getTestDir() +
"/TestHRegion/";
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final String DIR = TEST_UTIL.getDataTestDir("TestHRegion").toString();
private final int MAX_VERSIONS = 2;
@ -3023,7 +3020,7 @@ public class TestHRegion extends HBaseTestCase {
put.add(fam2, col, 1, Bytes.toBytes("test2"));
ht.put(put);
HRegion firstRegion = htu.getHbaseCluster().
HRegion firstRegion = htu.getHBaseCluster().
getRegions(Bytes.toBytes(this.getName())).get(0);
firstRegion.flushcache();
HDFSBlocksDistribution blocksDistribution1 =

View File

@ -64,7 +64,7 @@ public class TestHRegionInfo {
@Test
public void testGetSetOfHTD() throws IOException {
HBaseTestingUtility HTU = new HBaseTestingUtility();
final String tablename = "testGetSetOfHTD";
final String tablename = "testGetSetOfHTD";
// Delete the temporary table directory that might still be there from the
// previous test run.

View File

@ -279,7 +279,7 @@ public class TestMultiColumnScanner {
HRegionInfo info =
new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
HRegion region = HRegion.createHRegion(
info, HBaseTestingUtility.getTestDir(), TEST_UTIL.getConfiguration(),
info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(),
htd);
return region;
}

View File

@ -62,7 +62,7 @@ public class TestResettingCounters {
for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
HRegionInfo hri = new HRegionInfo(htd.getName(), null, null, false);
String testDir = HBaseTestingUtility.getTestDir() + "/TestResettingCounters/";
String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
Path path = new Path(testDir);
if (fs.exists(path)) {
if (!fs.delete(path, true)) {

View File

@ -79,20 +79,9 @@ public class TestScanner extends HBaseTestCase {
private static final long START_CODE = Long.MAX_VALUE;
private MiniDFSCluster cluster = null;
private HRegion r;
private HRegionIncommon region;
@Override
public void setUp() throws Exception {
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
super.setUp();
}
/**
* Test basic stop row filter works.
* @throws Exception
@ -138,7 +127,6 @@ public class TestScanner extends HBaseTestCase {
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
}
}
@ -192,7 +180,6 @@ public class TestScanner extends HBaseTestCase {
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
}
}
@ -220,7 +207,6 @@ public class TestScanner extends HBaseTestCase {
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
}
}
@ -333,13 +319,10 @@ public class TestScanner extends HBaseTestCase {
scan(true, address.toString());
getRegionInfo();
} finally {
// clean up
r.close();
r.getLog().closeAndDelete();
} finally {
shutdownDfs(cluster);
}
}
@ -461,7 +444,6 @@ public class TestScanner extends HBaseTestCase {
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(cluster);
}
}
@ -485,7 +467,6 @@ public class TestScanner extends HBaseTestCase {
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(cluster);
}
}

View File

@ -55,7 +55,7 @@ import org.mockito.Mockito;
public class TestSplitTransaction {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final Path testdir =
HBaseTestingUtility.getTestDir(this.getClass().getName());
TEST_UTIL.getDataTestDir(this.getClass().getName());
private HRegion parent;
private HLog wal;
private FileSystem fs;

View File

@ -92,7 +92,9 @@ public class TestStore extends TestCase {
long id = System.currentTimeMillis();
Get get = new Get(row);
private static final String DIR = HBaseTestingUtility.getTestDir() + "/TestStore/";
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final String DIR = TEST_UTIL.getDataTestDir("TestStore").toString();
/**
* Setup

View File

@ -58,29 +58,13 @@ import com.google.common.collect.Lists;
*/
public class TestStoreFile extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
private MiniDFSCluster cluster;
private CacheConfig cacheConf;
private CacheConfig cacheConf = new CacheConfig(conf);
private String ROOT_DIR;
@Override
public void setUp() throws Exception {
try {
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
this.cacheConf = new CacheConfig(conf);
} catch (IOException e) {
shutdownDfs(cluster);
}
super.setUp();
}
@Override
public void tearDown() throws Exception {
super.tearDown();
shutdownDfs(cluster);
// ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
// "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName());
ROOT_DIR = new Path(this.testDir, "TestStoreFile").toString();
}
/**
@ -330,9 +314,7 @@ public class TestStoreFile extends HBaseTestCase {
}
}
private static String ROOT_DIR =
HBaseTestingUtility.getTestDir("TestStoreFile").toString();
private static String localFormatter = "%010d";
private static final String localFormatter = "%010d";
private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs)
throws Exception {
@ -700,8 +682,7 @@ public class TestStoreFile extends HBaseTestCase {
Configuration conf = this.conf;
// Find a home for our files
Path baseDir = new Path(new Path(this.testDir, "regionname"),
"twoCOWEOC");
Path baseDir = new Path(new Path(this.testDir, "regionname"),"twoCOWEOC");
// Grab the block cache and get the initial hit/miss counts
BlockCache bc = new CacheConfig(conf).getBlockCache();

View File

@ -69,18 +69,8 @@ public class TestWideScanner extends HBaseTestCase {
}
/** HRegionInfo for root region */
MiniDFSCluster cluster = null;
HRegion r;
@Override
public void setUp() throws Exception {
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
super.setUp();
}
private int addWideContent(HRegion region) throws IOException {
int count = 0;
for (char c = 'a'; c <= 'c'; c++) {
@ -153,7 +143,6 @@ public class TestWideScanner extends HBaseTestCase {
} finally {
this.r.close();
this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
}
}
}

View File

@ -62,7 +62,7 @@ public class TestCloseRegionHandler {
new HRegionInfo(htd.getName(), HConstants.EMPTY_END_ROW,
HConstants.EMPTY_END_ROW);
HRegion region =
HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(),
HRegion.createHRegion(hri, HTU.getDataTestDir(),
HTU.getConfiguration(), htd);
assertNotNull(region);
// Spy on the region so can throw exception when close is called.

View File

@ -25,14 +25,10 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.executor.RegionTransitionData;
import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -49,9 +45,6 @@ import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Test of the {@link OpenRegionHandler}.
@ -59,17 +52,18 @@ import org.mockito.stubbing.Answer;
public class TestOpenRegionHandler {
static final Log LOG = LogFactory.getLog(TestOpenRegionHandler.class);
private final static HBaseTestingUtility HTU = new HBaseTestingUtility();
private static final HTableDescriptor TEST_HTD =
new HTableDescriptor("TestOpenRegionHandler.java");
private static HTableDescriptor TEST_HTD;
private HRegionInfo TEST_HRI;
private int testIndex = 0;
@BeforeClass public static void before() throws Exception {
HTU.startMiniZKCluster();
TEST_HTD = new HTableDescriptor("TestOpenRegionHandler.java");
}
@AfterClass public static void after() throws IOException {
TEST_HTD = null;
HTU.shutdownMiniZKCluster();
}
@ -102,7 +96,7 @@ public class TestOpenRegionHandler {
HTableDescriptor htd = TEST_HTD;
final HRegionInfo hri = TEST_HRI;
HRegion region =
HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), HTU
HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU
.getConfiguration(), htd);
assertNotNull(region);
OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {

View File

@ -93,6 +93,7 @@ public class TestHLog {
@After
public void tearDown() throws Exception {
}
@BeforeClass
public static void setUpBeforeClass() throws Exception {
// Make block sizes small.

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.regionserver.wal;
import java.io.IOException;
import java.util.Random;
import java.text.NumberFormat;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
import org.apache.commons.logging.Log;
@ -39,17 +39,10 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestHLogBench extends Configured implements Tool {
@ -69,8 +62,10 @@ public class TestHLogBench extends Configured implements Tool {
// the number of threads and the number of iterations per thread
private int numThreads = 300;
private int numIterationsPerThread = 10000;
private Path regionRootDir = new Path(HBaseTestingUtility.getTestDir() +
"/TestHLogBench/");
private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Path regionRootDir =TEST_UTIL.getDataTestDir("TestHLogBench") ;
private boolean appendNoSync = false;
public TestHLogBench() {
@ -327,7 +322,7 @@ public class TestHLogBench extends Configured implements Tool {
argv[2] = "-numIterationsPerThread";
argv[3] = Integer.toString(1000);
argv[4] = "-path";
argv[5] = HBaseTestingUtility.getTestDir() + "/HlogPerformance";
argv[5] = TEST_UTIL.getDataTestDir() + "/HlogPerformance";
argv[6] = "-nosync";
try {
res = ToolRunner.run(bench, argv);

View File

@ -31,9 +31,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.MultithreadedTestUtil;
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.EntryBuffers;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.RegionEntryBuffer;
import org.apache.hadoop.hbase.util.Bytes;
@ -56,7 +53,7 @@ public class TestHLogMethods {
*/
@Test public void testGetSplitEditFilesSorted() throws IOException {
FileSystem fs = FileSystem.get(util.getConfiguration());
Path regiondir = HBaseTestingUtility.getTestDir("regiondir");
Path regiondir = util.getDataTestDir("regiondir");
fs.delete(regiondir, true);
fs.mkdirs(regiondir);
Path recoverededits = HLog.getRegionDirRecoveredEditsDir(regiondir);

View File

@ -112,7 +112,7 @@ public class TestLogRollAbort {
}
@After
public void tearDown() throws IOException {
public void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

View File

@ -77,11 +77,11 @@ public class TestLogRolling {
private HLog log;
private String tableName;
private byte[] value;
private static FileSystem fs;
private static MiniDFSCluster dfsCluster;
private static HBaseAdmin admin;
private static MiniHBaseCluster cluster;
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private FileSystem fs;
private MiniDFSCluster dfsCluster;
private HBaseAdmin admin;
private MiniHBaseCluster cluster;
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
// verbose logging on classes that are touched in these tests
{
@ -100,19 +100,16 @@ public class TestLogRolling {
* @throws Exception
*/
public TestLogRolling() {
// start one regionserver and a minidfs.
super();
this.server = null;
this.log = null;
this.tableName = null;
this.value = null;
this.server = null;
this.log = null;
this.tableName = null;
String className = this.getClass().getName();
StringBuilder v = new StringBuilder(className);
while (v.length() < 1000) {
v.append(className);
}
value = Bytes.toBytes(v.toString());
String className = this.getClass().getName();
StringBuilder v = new StringBuilder(className);
while (v.length() < 1000) {
v.append(className);
}
this.value = Bytes.toBytes(v.toString());
}
// Need to override this setup so we can edit the config before it gets sent
@ -175,8 +172,7 @@ public class TestLogRolling {
}
@After
public void tearDown() throws IOException {
TEST_UTIL.cleanupTestDir();
public void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}

View File

@ -55,9 +55,9 @@ public class TestWALActionsListener {
conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.regionserver.maxlogs", 5);
fs = FileSystem.get(conf);
oldLogDir = new Path(HBaseTestingUtility.getTestDir(),
oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(HBaseTestingUtility.getTestDir(),
logDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_LOGDIR_NAME);
}

View File

@ -26,8 +26,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException.SessionExpiredException;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.*;
public class TestReplicationPeer {

View File

@ -112,9 +112,9 @@ public class TestReplicationSourceManager {
replication = new Replication(new DummyServer(), fs, logDir, oldLogDir);
manager = replication.getReplicationManager();
fs = FileSystem.get(conf);
oldLogDir = new Path(utility.getTestDir(),
oldLogDir = new Path(utility.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(utility.getTestDir(),
logDir = new Path(utility.getDataTestDir(),
HConstants.HREGION_LOGDIR_NAME);
manager.addSource(slaveId);

View File

@ -65,7 +65,7 @@ public class TestThriftServer {
}
@AfterClass
public static void afterClass() throws IOException {
public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster();
}

View File

@ -32,8 +32,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.Test;
@ -49,7 +47,7 @@ public class TestFSTableDescriptors {
final String name = "testRemoves";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name);
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd);
@ -61,7 +59,7 @@ public class TestFSTableDescriptors {
final String name = "testReadingHTDFromFS";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(name);
Path rootdir = HBaseTestingUtility.getTestDir(name);
Path rootdir = UTIL.getDataTestDir(name);
createHTDInFS(fs, rootdir, htd);
HTableDescriptor htd2 =
FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString());
@ -79,7 +77,7 @@ public class TestFSTableDescriptors {
final String name = "testHTableDescriptors";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any debris laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name);
Path rootdir = new Path(UTIL.getDataTestDir(), name);
final int count = 10;
// Write out table infos.
for (int i = 0; i < count; i++) {
@ -128,7 +126,7 @@ public class TestFSTableDescriptors {
final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name);
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
htds.get("NoSuchTable");
}
@ -138,7 +136,7 @@ public class TestFSTableDescriptors {
final String name = "testUpdates";
FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name);
Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd);

View File

@ -61,7 +61,7 @@ public class TestRegionSplitter {
}
@AfterClass
public static void teardown() throws IOException {
public static void teardown() throws Exception {
UTIL.shutdownMiniCluster();
}

View File

@ -52,7 +52,7 @@ public class TestHQuorumPeer {
// Set it to a non-standard port.
TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.clientPort",
PORT_NO);
this.dataDir = HBaseTestingUtility.getTestDir(this.getClass().getName());
this.dataDir = TEST_UTIL.getDataTestDir(this.getClass().getName());
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
if (fs.exists(this.dataDir)) {
if (!fs.delete(this.dataDir, true)) {