HBASE-4634 'test.build.data' property overused leading to write data at the wrong place

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1189429 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2011-10-26 20:29:41 +00:00
parent 9efd27b733
commit 055d8e3b8c
74 changed files with 471 additions and 510 deletions

View File

@ -405,6 +405,8 @@ Release 0.92.0 - Unreleased
HBASE-4367 Deadlock in MemStore flusher due to JDK internally synchronizing HBASE-4367 Deadlock in MemStore flusher due to JDK internally synchronizing
on current thread on current thread
HBASE-4645 Edits Log recovery losing data across column families HBASE-4645 Edits Log recovery losing data across column families
HBASE-4634 "test.build.data" property overused leading to write data at the
wrong place (nkeywal)
TESTS TESTS
HBASE-4450 test for number of blocks read: to serve as baseline for expected HBASE-4450 test for number of blocks read: to serve as baseline for expected

View File

@ -54,8 +54,10 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
public abstract class HBaseTestCase extends TestCase { public abstract class HBaseTestCase extends TestCase {
private static final Log LOG = LogFactory.getLog(HBaseTestCase.class); private static final Log LOG = LogFactory.getLog(HBaseTestCase.class);
/** configuration parameter name for test directory */ /** configuration parameter name for test directory
public static final String TEST_DIRECTORY_KEY = "test.build.data"; * @deprecated see HBaseTestingUtility#TEST_DIRECTORY_KEY
**/
private static final String TEST_DIRECTORY_KEY = "test.build.data";
/* /*
protected final static byte [] fam1 = Bytes.toBytes("colfamily1"); protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
@ -153,20 +155,27 @@ public abstract class HBaseTestCase extends TestCase {
super.tearDown(); super.tearDown();
} }
/**
* @see HBaseTestingUtility#getBaseTestDir
* @param testName
* @return directory to use for this test
*/
protected Path getUnitTestdir(String testName) { protected Path getUnitTestdir(String testName) {
return new Path( return new Path(
conf.get(TEST_DIRECTORY_KEY, "target/test/data"), testName); System.getProperty(
HBaseTestingUtility.BASE_TEST_DIRECTORY_KEY,
HBaseTestingUtility.DEFAULT_BASE_TEST_DIRECTORY
),
testName
);
} }
protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey, protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
byte [] endKey) byte [] endKey)
throws IOException { throws IOException {
FileSystem filesystem = FileSystem.get(conf); FileSystem filesystem = FileSystem.get(conf);
Path rootdir = filesystem.makeQualified(
new Path(conf.get(HConstants.HBASE_DIR)));
filesystem.mkdirs(rootdir);
HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey); HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
return HRegion.createHRegion(hri, rootdir, conf, desc); return HRegion.createHRegion(hri, testDir, conf, desc);
} }
protected HRegion openClosedRegion(final HRegion closedRegion) protected HRegion openClosedRegion(final HRegion closedRegion)

View File

@ -72,9 +72,6 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.mapred.MiniMRCluster; import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.ZooKeeper;
@ -82,15 +79,16 @@ import org.apache.zookeeper.KeeperException.NodeExistsException;
/** /**
* Facility for testing HBase. Replacement for * Facility for testing HBase. Replacement for
* old HBaseTestCase and HBaseCluserTestCase functionality. * old HBaseTestCase and HBaseClusterTestCase functionality.
* Create an instance and keep it around testing HBase. This class is * Create an instance and keep it around testing HBase. This class is
* meant to be your one-stop shop for anything you might need testing. Manages * meant to be your one-stop shop for anything you might need testing. Manages
* one cluster at a time only. Depends on log4j being on classpath and * one cluster at a time only.
* Depends on log4j being on classpath and
* hbase-site.xml for logging and test-run configuration. It does not set * hbase-site.xml for logging and test-run configuration. It does not set
* logging levels nor make changes to configuration parameters. * logging levels nor make changes to configuration parameters.
*/ */
public class HBaseTestingUtility { public class HBaseTestingUtility {
private final static Log LOG = LogFactory.getLog(HBaseTestingUtility.class); private static final Log LOG = LogFactory.getLog(HBaseTestingUtility.class);
private Configuration conf; private Configuration conf;
private MiniZooKeeperCluster zkCluster = null; private MiniZooKeeperCluster zkCluster = null;
/** /**
@ -102,19 +100,33 @@ public class HBaseTestingUtility {
private MiniHBaseCluster hbaseCluster = null; private MiniHBaseCluster hbaseCluster = null;
private MiniMRCluster mrCluster = null; private MiniMRCluster mrCluster = null;
// If non-null, then already a cluster running.
private File clusterTestBuildDir = null; // Directory where we put the data for this instance of HBaseTestingUtility
private File dataTestDir = null;
// Directory (usually a subdirectory of dataTestDir) used by the dfs cluster
// if any
private File clusterTestDir = null;
/** /**
* System property key to get test directory value. * System property key to get test directory value.
* Name is as it is because mini dfs has hard-codings to put test data here. * Name is as it is because mini dfs has hard-codings to put test data here.
* It should NOT be used directly in HBase, as it's a property used in
* mini dfs.
* @deprecated can be used only with mini dfs
*/ */
public static final String TEST_DIRECTORY_KEY = "test.build.data"; private static final String TEST_DIRECTORY_KEY = "test.build.data";
/** /**
* Default parent directory for test output. * System property key to get base test directory value
*/ */
public static final String DEFAULT_TEST_DIRECTORY = "target/test-data"; public static final String BASE_TEST_DIRECTORY_KEY =
"test.build.data.basedirectory";
/**
* Default base directory for test output.
*/
public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
/** Compression algorithms to use in parameterized JUnit 4 tests */ /** Compression algorithms to use in parameterized JUnit 4 tests */
public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED = public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
@ -124,8 +136,7 @@ public class HBaseTestingUtility {
}); });
/** Compression algorithms to use in testing */ /** Compression algorithms to use in testing */
public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS = public static final Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
new Compression.Algorithm[] {
Compression.Algorithm.NONE, Compression.Algorithm.GZ Compression.Algorithm.NONE, Compression.Algorithm.GZ
}; };
@ -155,10 +166,6 @@ public class HBaseTestingUtility {
this.conf = conf; this.conf = conf;
} }
public MiniHBaseCluster getHbaseCluster() {
return hbaseCluster;
}
/** /**
* Returns this classes's instance of {@link Configuration}. Be careful how * Returns this classes's instance of {@link Configuration}. Be careful how
* you use the returned Configuration since {@link HConnection} instances * you use the returned Configuration since {@link HConnection} instances
@ -175,69 +182,109 @@ public class HBaseTestingUtility {
} }
/** /**
* Makes sure the test directory is set up so that {@link #getTestDir()} * @return Where to write test data on local filesystem; usually
* returns a valid directory. Useful in unit tests that do not run a * {@link #DEFAULT_BASE_TEST_DIRECTORY}
* mini-cluster. * Should not be used by the unit tests, hence its's private.
* Unit test will use a subdirectory of this directory.
* @see #setupDataTestDir()
* @see #getTestFileSystem()
*/ */
public void initTestDir() { private Path getBaseTestDir() {
if (System.getProperty(TEST_DIRECTORY_KEY) == null) { String PathName = System.getProperty(
clusterTestBuildDir = setupClusterTestBuildDir(); BASE_TEST_DIRECTORY_KEY, DEFAULT_BASE_TEST_DIRECTORY);
System.setProperty(TEST_DIRECTORY_KEY, clusterTestBuildDir.getPath());
} return new Path(PathName);
} }
/** /**
* @return Where to write test data on local filesystem; usually * @return Where to write test data on local filesystem, specific to
* {@link #DEFAULT_TEST_DIRECTORY} * the test. Useful for tests that do not use a cluster.
* @see #setupClusterTestBuildDir() * Creates it if it does not exist already.
* @see #clusterTestBuildDir()
* @see #getTestFileSystem() * @see #getTestFileSystem()
*/ */
public static Path getTestDir() { public Path getDataTestDir() {
return new Path(System.getProperty(TEST_DIRECTORY_KEY, if (dataTestDir == null){
DEFAULT_TEST_DIRECTORY)); setupDataTestDir();
}
return new Path(dataTestDir.getAbsolutePath());
}
/**
* @return Where the DFS cluster will write data on the local subsystem.
* Creates it if it does not exist already.
* @see #getTestFileSystem()
*/
public Path getClusterTestDir() {
if (clusterTestDir == null){
setupClusterTestDir();
}
return new Path(clusterTestDir.getAbsolutePath());
} }
/** /**
* @param subdirName * @param subdirName
* @return Path to a subdirectory named <code>subdirName</code> under * @return Path to a subdirectory named <code>subdirName</code> under
* {@link #getTestDir()}. * {@link #getDataTestDir()}.
* @see #setupClusterTestBuildDir() * Does *NOT* create it if it does not exist.
* @see #clusterTestBuildDir(String)
* @see #getTestFileSystem()
*/ */
public static Path getTestDir(final String subdirName) { public Path getDataTestDir(final String subdirName) {
return new Path(getTestDir(), subdirName); return new Path(getDataTestDir(), subdirName);
} }
/** /**
* Home our cluster in a dir under {@link #DEFAULT_TEST_DIRECTORY}. Give it a * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}.
* random name * Give it a random name so can have many concurrent tests running if
* so can have many concurrent clusters running if we need to. Need to * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY}
* amend the {@link #TEST_DIRECTORY_KEY} System property. Its what * System property, as it's what minidfscluster bases
* minidfscluster bases
* it data dir on. Moding a System property is not the way to do concurrent * it data dir on. Moding a System property is not the way to do concurrent
* instances -- another instance could grab the temporary * instances -- another instance could grab the temporary
* value unintentionally -- but not anything can do about it at moment; * value unintentionally -- but not anything can do about it at moment;
* single instance only is how the minidfscluster works. * single instance only is how the minidfscluster works.
* @return The calculated cluster test build directory. * @return The calculated data test build directory.
*/ */
public File setupClusterTestBuildDir() { private void setupDataTestDir() {
if (dataTestDir != null) {
LOG.warn("Data test dir already setup in " +
dataTestDir.getAbsolutePath());
return;
}
String randomStr = UUID.randomUUID().toString(); String randomStr = UUID.randomUUID().toString();
String dirStr = getTestDir(randomStr).toString(); Path testDir= new Path(
File dir = new File(dirStr).getAbsoluteFile(); getBaseTestDir(),
randomStr
);
dataTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit // Have it cleaned up on exit
dir.deleteOnExit(); dataTestDir.deleteOnExit();
return dir; }
/**
* Creates a directory for the DFS cluster, under the test data
*/
private void setupClusterTestDir() {
if (clusterTestDir != null) {
LOG.warn("Cluster test dir already setup in " +
clusterTestDir.getAbsolutePath());
return;
}
// Using randomUUID ensures that multiple clusters can be launched by
// a same test, if it stops & starts them
Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
// Have it cleaned up on exit
clusterTestDir.deleteOnExit();
} }
/** /**
* @throws IOException If a cluster -- zk, dfs, or hbase -- already running. * @throws IOException If a cluster -- zk, dfs, or hbase -- already running.
*/ */
void isRunningCluster(String passedBuildPath) throws IOException { public void isRunningCluster() throws IOException {
if (this.clusterTestBuildDir == null || passedBuildPath != null) return; if (dfsCluster == null) return;
throw new IOException("Cluster already running at " + throw new IOException("Cluster already running at " +
this.clusterTestBuildDir); this.clusterTestDir);
} }
/** /**
@ -248,7 +295,7 @@ public class HBaseTestingUtility {
* @return The mini dfs cluster created. * @return The mini dfs cluster created.
*/ */
public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception { public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
return startMiniDFSCluster(servers, null, null); return startMiniDFSCluster(servers, null);
} }
/** /**
@ -265,62 +312,58 @@ public class HBaseTestingUtility {
public MiniDFSCluster startMiniDFSCluster(final String hosts[]) public MiniDFSCluster startMiniDFSCluster(final String hosts[])
throws Exception { throws Exception {
if ( hosts != null && hosts.length != 0) { if ( hosts != null && hosts.length != 0) {
return startMiniDFSCluster(hosts.length, null, hosts); return startMiniDFSCluster(hosts.length, hosts);
} else { } else {
return startMiniDFSCluster(1, null, null); return startMiniDFSCluster(1, null);
} }
} }
/** /**
* Start a minidfscluster. * Start a minidfscluster.
* Can only create one. * Can only create one.
* @param dir Where to home your dfs cluster.
* @param servers How many DNs to start. * @param servers How many DNs to start.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
throws Exception {
return startMiniDFSCluster(servers, dir, null);
}
/**
* Start a minidfscluster.
* Can only create one.
* @param servers How many DNs to start.
* @param dir Where to home your dfs cluster.
* @param hosts hostnames DNs to run on. * @param hosts hostnames DNs to run on.
* @throws Exception * @throws Exception
* @see {@link #shutdownMiniDFSCluster()} * @see {@link #shutdownMiniDFSCluster()}
* @return The mini dfs cluster created. * @return The mini dfs cluster created.
*/ */
public MiniDFSCluster startMiniDFSCluster(int servers, final File dir, final String hosts[]) public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
throws Exception { throws Exception {
// This does the following to home the minidfscluster
// base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"); // Check that there is not already a cluster running
isRunningCluster();
// Initialize the local directory used by the MiniDFS
if (clusterTestDir == null) {
setupClusterTestDir();
}
// We have to set this property as it is used by MiniCluster
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
// Some tests also do this: // Some tests also do this:
// System.getProperty("test.cache.data", "build/test/cache"); // System.getProperty("test.cache.data", "build/test/cache");
if (dir == null) { // It's also deprecated
this.clusterTestBuildDir = setupClusterTestBuildDir(); System.setProperty("test.cache.data", this.clusterTestDir.toString());
} else {
this.clusterTestBuildDir = dir; // Ok, now we can start
}
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true, this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
true, null, null, hosts, null); true, null, null, hosts, null);
// Set this just-started cluser as our filesystem.
// Set this just-started cluster as our filesystem.
FileSystem fs = this.dfsCluster.getFileSystem(); FileSystem fs = this.dfsCluster.getFileSystem();
this.conf.set("fs.defaultFS", fs.getUri().toString()); this.conf.set("fs.defaultFS", fs.getUri().toString());
// Do old style too just to be safe. // Do old style too just to be safe.
this.conf.set("fs.default.name", fs.getUri().toString()); this.conf.set("fs.default.name", fs.getUri().toString());
// Wait for the cluster to be totally up
this.dfsCluster.waitClusterUp();
return this.dfsCluster; return this.dfsCluster;
} }
/** /**
* Shuts down instance created by call to {@link #startMiniDFSCluster(int, File)} * Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
* or does nothing. * or does nothing.
* @throws Exception * @throws Exception
*/ */
@ -328,7 +371,9 @@ public class HBaseTestingUtility {
if (this.dfsCluster != null) { if (this.dfsCluster != null) {
// The below throws an exception per dn, AsynchronousCloseException. // The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown(); this.dfsCluster.shutdown();
dfsCluster = null;
} }
} }
/** /**
@ -339,8 +384,7 @@ public class HBaseTestingUtility {
* @return zk cluster started. * @return zk cluster started.
*/ */
public MiniZooKeeperCluster startMiniZKCluster() throws Exception { public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
return startMiniZKCluster(setupClusterTestBuildDir(),1); return startMiniZKCluster(1);
} }
/** /**
@ -353,7 +397,8 @@ public class HBaseTestingUtility {
*/ */
public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum) public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
throws Exception { throws Exception {
return startMiniZKCluster(setupClusterTestBuildDir(), zooKeeperServerNum); File zkClusterFile = new File(getClusterTestDir().toString());
return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
} }
@ -365,10 +410,10 @@ public class HBaseTestingUtility {
private MiniZooKeeperCluster startMiniZKCluster(final File dir, private MiniZooKeeperCluster startMiniZKCluster(final File dir,
int zooKeeperServerNum) int zooKeeperServerNum)
throws Exception { throws Exception {
this.passedZkCluster = false;
if (this.zkCluster != null) { if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir); throw new IOException("Cluster already running at " + dir);
} }
this.passedZkCluster = false;
this.zkCluster = new MiniZooKeeperCluster(); this.zkCluster = new MiniZooKeeperCluster();
int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum); int clientPort = this.zkCluster.startup(dir,zooKeeperServerNum);
this.conf.set("hbase.zookeeper.property.clientPort", this.conf.set("hbase.zookeeper.property.clientPort",
@ -465,26 +510,20 @@ public class HBaseTestingUtility {
LOG.info("Starting up minicluster with " + numMasters + " master(s) and " + LOG.info("Starting up minicluster with " + numMasters + " master(s) and " +
numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)"); numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
// If we already put up a cluster, fail. // If we already put up a cluster, fail.
String testBuildPath = conf.get(TEST_DIRECTORY_KEY, null); isRunningCluster();
isRunningCluster(testBuildPath);
if (testBuildPath != null) {
LOG.info("Using passed path: " + testBuildPath);
}
// Make a new random dir to home everything in. Set it as system property.
// minidfs reads home from system property.
this.clusterTestBuildDir = testBuildPath == null?
setupClusterTestBuildDir() : new File(testBuildPath);
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
// Bring up mini dfs cluster. This spews a bunch of warnings about missing // Bring up mini dfs cluster. This spews a bunch of warnings about missing
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'. // scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
startMiniDFSCluster(numDataNodes, this.clusterTestBuildDir, dataNodeHosts); startMiniDFSCluster(numDataNodes, dataNodeHosts);
this.dfsCluster.waitClusterUp();
// Start up a zk cluster. // Start up a zk cluster.
if (this.zkCluster == null) { if (this.zkCluster == null) {
startMiniZKCluster(this.clusterTestBuildDir); startMiniZKCluster(clusterTestDir);
} }
// Start the MiniHBaseCluster
return startMiniHBaseCluster(numMasters, numSlaves); return startMiniHBaseCluster(numMasters, numSlaves);
} }
@ -528,7 +567,7 @@ public class HBaseTestingUtility {
HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME); HTable t = new HTable(new Configuration(this.conf), HConstants.META_TABLE_NAME);
ResultScanner s = t.getScanner(new Scan()); ResultScanner s = t.getScanner(new Scan());
while (s.next() != null) { while (s.next() != null) {
continue; // do nothing
} }
LOG.info("HBase has been restarted"); LOG.info("HBase has been restarted");
} }
@ -547,22 +586,22 @@ public class HBaseTestingUtility {
* @throws IOException * @throws IOException
* @see {@link #startMiniCluster(int)} * @see {@link #startMiniCluster(int)}
*/ */
public void shutdownMiniCluster() throws IOException { public void shutdownMiniCluster() throws Exception {
LOG.info("Shutting down minicluster"); LOG.info("Shutting down minicluster");
shutdownMiniHBaseCluster(); shutdownMiniHBaseCluster();
if (!this.passedZkCluster) shutdownMiniZKCluster(); if (!this.passedZkCluster){
if (this.dfsCluster != null) { shutdownMiniZKCluster();
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
} }
shutdownMiniDFSCluster();
// Clean up our directory. // Clean up our directory.
if (this.clusterTestBuildDir != null && this.clusterTestBuildDir.exists()) { if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
// Need to use deleteDirectory because File.delete required dir is empty. // Need to use deleteDirectory because File.delete required dir is empty.
if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf), if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
new Path(this.clusterTestBuildDir.toString()))) { new Path(this.clusterTestDir.toString()))) {
LOG.warn("Failed delete of " + this.clusterTestBuildDir.toString()); LOG.warn("Failed delete of " + this.clusterTestDir.toString());
} }
this.clusterTestBuildDir = null; this.clusterTestDir = null;
} }
LOG.info("Minicluster is down"); LOG.info("Minicluster is down");
} }
@ -576,9 +615,9 @@ public class HBaseTestingUtility {
this.hbaseCluster.shutdown(); this.hbaseCluster.shutdown();
// Wait till hbase is down before going on to shutdown zk. // Wait till hbase is down before going on to shutdown zk.
this.hbaseCluster.join(); this.hbaseCluster.join();
}
this.hbaseCluster = null; this.hbaseCluster = null;
} }
}
/** /**
* Creates an hbase rootdir in user home directory. Also creates hbase * Creates an hbase rootdir in user home directory. Also creates hbase
@ -931,8 +970,8 @@ public class HBaseTestingUtility {
* Creates the specified number of regions in the specified table. * Creates the specified number of regions in the specified table.
* @param c * @param c
* @param table * @param table
* @param columnFamily * @param family
* @param startKeys * @param numRegions
* @return * @return
* @throws IOException * @throws IOException
*/ */
@ -1107,7 +1146,6 @@ public class HBaseTestingUtility {
Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR); Arrays.sort(startKeys, Bytes.BYTES_COMPARATOR);
List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length); List<HRegionInfo> newRegions = new ArrayList<HRegionInfo>(startKeys.length);
// add custom ones // add custom ones
int count = 0;
for (int i = 0; i < startKeys.length; i++) { for (int i = 0; i < startKeys.length; i++) {
int j = (i + 1) % startKeys.length; int j = (i + 1) % startKeys.length;
HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i], HRegionInfo hri = new HRegionInfo(htd.getName(), startKeys[i],
@ -1118,7 +1156,6 @@ public class HBaseTestingUtility {
meta.put(put); meta.put(put);
LOG.info("createMultiRegionsInMeta: inserted " + hri.toString()); LOG.info("createMultiRegionsInMeta: inserted " + hri.toString());
newRegions.add(hri); newRegions.add(hri);
count++;
} }
return newRegions; return newRegions;
} }
@ -1221,6 +1258,7 @@ public class HBaseTestingUtility {
LOG.info("Stopping mini mapreduce cluster..."); LOG.info("Stopping mini mapreduce cluster...");
if (mrCluster != null) { if (mrCluster != null) {
mrCluster.shutdown(); mrCluster.shutdown();
mrCluster = null;
} }
// Restore configuration to point to local jobtracker // Restore configuration to point to local jobtracker
conf.set("mapred.job.tracker", "local"); conf.set("mapred.job.tracker", "local");
@ -1382,7 +1420,13 @@ public class HBaseTestingUtility {
* @throws IOException * @throws IOException
*/ */
public boolean cleanupTestDir() throws IOException { public boolean cleanupTestDir() throws IOException {
return deleteDir(getTestDir()); if (dataTestDir == null ){
return false;
} else {
boolean ret = deleteDir(getDataTestDir());
dataTestDir = null;
return ret;
}
} }
/** /**
@ -1391,7 +1435,10 @@ public class HBaseTestingUtility {
* @throws IOException * @throws IOException
*/ */
public boolean cleanupTestDir(final String subdir) throws IOException { public boolean cleanupTestDir(final String subdir) throws IOException {
return deleteDir(getTestDir(subdir)); if (dataTestDir == null){
return false;
}
return deleteDir(getDataTestDir(subdir));
} }
/** /**
@ -1402,7 +1449,7 @@ public class HBaseTestingUtility {
public boolean deleteDir(final Path dir) throws IOException { public boolean deleteDir(final Path dir) throws IOException {
FileSystem fs = getTestFileSystem(); FileSystem fs = getTestFileSystem();
if (fs.exists(dir)) { if (fs.exists(dir)) {
return fs.delete(getTestDir(), true); return fs.delete(getDataTestDir(), true);
} }
return false; return false;
} }
@ -1436,6 +1483,9 @@ public class HBaseTestingUtility {
return false; return false;
} }
/** /**
* This method clones the passed <code>c</code> configuration setting a new * This method clones the passed <code>c</code> configuration setting a new
* user into the clone. Use it getting new instances of FileSystem. Only * user into the clone. Use it getting new instances of FileSystem. Only
@ -1500,7 +1550,6 @@ public class HBaseTestingUtility {
* Wait until <code>countOfRegion</code> in .META. have a non-empty * Wait until <code>countOfRegion</code> in .META. have a non-empty
* info:server. This means all regions have been deployed, master has been * info:server. This means all regions have been deployed, master has been
* informed and updated .META. with the regions deployed server. * informed and updated .META. with the regions deployed server.
* @param conf Configuration
* @param countOfRegions How many regions in .META. * @param countOfRegions How many regions in .META.
* @throws IOException * @throws IOException
*/ */
@ -1572,7 +1621,7 @@ public class HBaseTestingUtility {
* Creates an znode with OPENED state. * Creates an znode with OPENED state.
* @param TEST_UTIL * @param TEST_UTIL
* @param region * @param region
* @param regionServer * @param serverName
* @return * @return
* @throws IOException * @throws IOException
* @throws ZooKeeperConnectionException * @throws ZooKeeperConnectionException
@ -1639,5 +1688,4 @@ public class HBaseTestingUtility {
return "<out_of_range>"; return "<out_of_range>";
} }
} }
} }

View File

@ -28,32 +28,21 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.AfterClass; import org.junit.*;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestFSTableDescriptorForceCreation { public class TestFSTableDescriptorForceCreation {
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
@BeforeClass
public static void setUpCluster() throws Exception {
UTIL.startMiniDFSCluster(1);
}
@AfterClass
public static void shutDownCluster() throws Exception {
UTIL.shutdownMiniDFSCluster();
}
@Test @Test
public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse() public void testShouldCreateNewTableDescriptorIfForcefulCreationIsFalse()
throws IOException { throws IOException {
final String name = "newTable2"; final String name = "newTable2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(fs.getWorkingDirectory(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
assertTrue("Should create new table descriptor", FSUtils
.createTableDescriptor(fs, rootdir, htd, false)); assertTrue("Should create new table descriptor",
FSUtils.createTableDescriptor(fs, rootdir, htd, false));
} }
@Test @Test
@ -62,7 +51,7 @@ public class TestFSTableDescriptorForceCreation {
final String name = "testAlreadyExists"; final String name = "testAlreadyExists";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around. // Cleanup old tests if any detrius laying around.
Path rootdir = new Path(fs.getWorkingDirectory(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir); TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd); htds.add(htd);
@ -75,7 +64,7 @@ public class TestFSTableDescriptorForceCreation {
throws Exception { throws Exception {
final String name = "createNewTableNew2"; final String name = "createNewTableNew2";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
Path rootdir = new Path(fs.getWorkingDirectory(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
FSUtils.createTableDescriptor(fs, rootdir, htd, false); FSUtils.createTableDescriptor(fs, rootdir, htd, false);
assertTrue("Should create new table descriptor", FSUtils assertTrue("Should create new table descriptor", FSUtils

View File

@ -50,26 +50,6 @@ import org.junit.Test;
public class TestHBaseTestingUtility { public class TestHBaseTestingUtility {
private final Log LOG = LogFactory.getLog(this.getClass()); private final Log LOG = LogFactory.getLog(this.getClass());
private HBaseTestingUtility hbt;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
}
@Before
public void setUp() throws Exception {
this.hbt = new HBaseTestingUtility();
this.hbt.cleanupTestDir();
}
@After
public void tearDown() throws Exception {
}
/** /**
* Basic sanity test that spins up multiple HDFS and HBase clusters that share * Basic sanity test that spins up multiple HDFS and HBase clusters that share
* the same ZK ensemble. We then create the same table in both and make sure * the same ZK ensemble. We then create the same table in both and make sure
@ -136,27 +116,49 @@ public class TestHBaseTestingUtility {
} }
@Test public void testMiniCluster() throws Exception { @Test public void testMiniCluster() throws Exception {
MiniHBaseCluster cluster = this.hbt.startMiniCluster(); HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniHBaseCluster cluster = hbt.startMiniCluster();
try { try {
assertEquals(1, cluster.getLiveRegionServerThreads().size()); assertEquals(1, cluster.getLiveRegionServerThreads().size());
} finally { } finally {
cluster.shutdown(); hbt.shutdownMiniCluster();
} }
} }
/**
* Test that we can start and stop multiple time a cluster
* with the same HBaseTestingUtility.
*/
@Test public void testMultipleStartStop() throws Exception{
HBaseTestingUtility htu1 = new HBaseTestingUtility();
Path foo = new Path("foo");
htu1.startMiniCluster();
htu1.getDFSCluster().getFileSystem().create(foo);
assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo));
htu1.shutdownMiniCluster();
htu1.startMiniCluster();
assertFalse( htu1.getDFSCluster().getFileSystem().exists(foo));
htu1.getDFSCluster().getFileSystem().create(foo);
assertTrue( htu1.getDFSCluster().getFileSystem().exists(foo));
htu1.shutdownMiniCluster();
}
@Test public void testMiniZooKeeper() throws Exception { @Test public void testMiniZooKeeper() throws Exception {
MiniZooKeeperCluster cluster1 = this.hbt.startMiniZKCluster(); HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniZooKeeperCluster cluster1 = hbt.startMiniZKCluster();
try { try {
assertEquals(0, cluster1.getBackupZooKeeperServerNum()); assertEquals(0, cluster1.getBackupZooKeeperServerNum());
assertTrue((cluster1.killCurrentActiveZooKeeperServer() == -1)); assertTrue((cluster1.killCurrentActiveZooKeeperServer() == -1));
} finally { } finally {
cluster1.shutdown(); hbt.shutdownMiniZKCluster();
} }
this.hbt.shutdownMiniZKCluster();
// set up zookeeper cluster with 5 zk servers // set up zookeeper cluster with 5 zk servers
MiniZooKeeperCluster cluster2 = this.hbt.startMiniZKCluster(5); MiniZooKeeperCluster cluster2 = hbt.startMiniZKCluster(5);
int defaultClientPort = 21818; int defaultClientPort = 21818;
cluster2.setDefaultClientPort(defaultClientPort); cluster2.setDefaultClientPort(defaultClientPort);
try { try {
@ -181,12 +183,13 @@ public class TestHBaseTestingUtility {
assertEquals(-1, cluster2.getBackupZooKeeperServerNum()); assertEquals(-1, cluster2.getBackupZooKeeperServerNum());
assertEquals(0, cluster2.getZooKeeperServerNum()); assertEquals(0, cluster2.getZooKeeperServerNum());
} finally { } finally {
cluster2.shutdown(); hbt.shutdownMiniZKCluster();
} }
} }
@Test public void testMiniDFSCluster() throws Exception { @Test public void testMiniDFSCluster() throws Exception {
MiniDFSCluster cluster = this.hbt.startMiniDFSCluster(1); HBaseTestingUtility hbt = new HBaseTestingUtility();
MiniDFSCluster cluster = hbt.startMiniDFSCluster(1);
FileSystem dfs = cluster.getFileSystem(); FileSystem dfs = cluster.getFileSystem();
Path dir = new Path("dir"); Path dir = new Path("dir");
Path qualifiedDir = dfs.makeQualified(dir); Path qualifiedDir = dfs.makeQualified(dir);
@ -194,26 +197,32 @@ public class TestHBaseTestingUtility {
assertFalse(dfs.exists(qualifiedDir)); assertFalse(dfs.exists(qualifiedDir));
assertTrue(dfs.mkdirs(qualifiedDir)); assertTrue(dfs.mkdirs(qualifiedDir));
assertTrue(dfs.delete(qualifiedDir, true)); assertTrue(dfs.delete(qualifiedDir, true));
try { hbt.shutdownMiniCluster();
} finally {
cluster.shutdown();
}
} }
@Test public void testSetupClusterTestBuildDir() { @Test public void testSetupClusterTestBuildDir() throws Exception {
File testdir = this.hbt.setupClusterTestBuildDir(); HBaseTestingUtility hbt = new HBaseTestingUtility();
Path testdir = hbt.getClusterTestDir();
LOG.info("uuid-subdir=" + testdir); LOG.info("uuid-subdir=" + testdir);
assertFalse(testdir.exists()); FileSystem fs = hbt.getTestFileSystem();
assertTrue(testdir.mkdirs());
assertTrue(testdir.exists()); assertFalse(fs.exists(testdir));
hbt.startMiniDFSCluster(1);
assertTrue(fs.exists(testdir));
hbt.shutdownMiniCluster();
assertFalse(fs.exists(testdir));
} }
@Test public void testTestDir() throws IOException { @Test public void testTestDir() throws Exception {
Path testdir = HBaseTestingUtility.getTestDir(); HBaseTestingUtility hbt = new HBaseTestingUtility();
Path testdir = hbt.getDataTestDir();
LOG.info("testdir=" + testdir); LOG.info("testdir=" + testdir);
FileSystem fs = this.hbt.getTestFileSystem(); FileSystem fs = hbt.getTestFileSystem();
assertTrue(!fs.exists(testdir)); assertTrue(!fs.exists(testdir));
assertTrue(fs.mkdirs(testdir)); assertTrue(fs.mkdirs(testdir));
assertTrue(this.hbt.cleanupTestDir()); assertTrue(hbt.cleanupTestDir());
} }
} }

View File

@ -49,7 +49,7 @@ public class TestInfoServers {
} }
@AfterClass @AfterClass
public static void afterClass() throws IOException { public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }
@ -60,10 +60,10 @@ public class TestInfoServers {
public void testInfoServersRedirect() throws Exception { public void testInfoServersRedirect() throws Exception {
// give the cluster time to start up // give the cluster time to start up
new HTable(UTIL.getConfiguration(), ".META."); new HTable(UTIL.getConfiguration(), ".META.");
int port = UTIL.getHbaseCluster().getMaster().getInfoServer().getPort(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port + assertHasExpectedContent(new URL("http://localhost:" + port +
"/index.html"), "master-status"); "/index.html"), "master-status");
port = UTIL.getHbaseCluster().getRegionServerThreads().get(0).getRegionServer(). port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer().
getInfoServer().getPort(); getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port + assertHasExpectedContent(new URL("http://localhost:" + port +
"/index.html"), "rs-status"); "/index.html"), "rs-status");
@ -80,10 +80,10 @@ public class TestInfoServers {
public void testInfoServersStatusPages() throws Exception { public void testInfoServersStatusPages() throws Exception {
// give the cluster time to start up // give the cluster time to start up
new HTable(UTIL.getConfiguration(), ".META."); new HTable(UTIL.getConfiguration(), ".META.");
int port = UTIL.getHbaseCluster().getMaster().getInfoServer().getPort(); int port = UTIL.getHBaseCluster().getMaster().getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port + assertHasExpectedContent(new URL("http://localhost:" + port +
"/master-status"), "META"); "/master-status"), "META");
port = UTIL.getHbaseCluster().getRegionServerThreads().get(0).getRegionServer(). port = UTIL.getHBaseCluster().getRegionServerThreads().get(0).getRegionServer().
getInfoServer().getPort(); getInfoServer().getPort();
assertHasExpectedContent(new URL("http://localhost:" + port + assertHasExpectedContent(new URL("http://localhost:" + port +
"/rs-status"), "META"); "/rs-status"), "META");

View File

@ -97,7 +97,7 @@ public class TestMultiVersions {
Incommon incommon = new HTableIncommon(table); Incommon incommon = new HTableIncommon(table);
TimestampTestBase.doTestDelete(incommon, new FlushCache() { TimestampTestBase.doTestDelete(incommon, new FlushCache() {
public void flushcache() throws IOException { public void flushcache() throws IOException {
UTIL.getHbaseCluster().flushcache(); UTIL.getHBaseCluster().flushcache();
} }
}); });

View File

@ -56,7 +56,7 @@ public class TestRegionRebalancing {
} }
@AfterClass @AfterClass
public static void afterClass() throws IOException { public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }
@ -94,35 +94,35 @@ public class TestRegionRebalancing {
// add a region server - total of 2 // add a region server - total of 2
LOG.info("Started second server=" + LOG.info("Started second server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName()); UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHbaseCluster().getMaster().balance(); UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced(); assertRegionsAreBalanced();
// add a region server - total of 3 // add a region server - total of 3
LOG.info("Started third server=" + LOG.info("Started third server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName()); UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHbaseCluster().getMaster().balance(); UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced(); assertRegionsAreBalanced();
// kill a region server - total of 2 // kill a region server - total of 2
LOG.info("Stopped third server=" + UTIL.getHbaseCluster().stopRegionServer(2, false)); LOG.info("Stopped third server=" + UTIL.getHBaseCluster().stopRegionServer(2, false));
UTIL.getHbaseCluster().waitOnRegionServer(2); UTIL.getHBaseCluster().waitOnRegionServer(2);
UTIL.getHbaseCluster().getMaster().balance(); UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced(); assertRegionsAreBalanced();
// start two more region servers - total of 4 // start two more region servers - total of 4
LOG.info("Readding third server=" + LOG.info("Readding third server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName()); UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
LOG.info("Added fourth server=" + LOG.info("Added fourth server=" +
UTIL.getHbaseCluster().startRegionServer().getRegionServer().getServerName()); UTIL.getHBaseCluster().startRegionServer().getRegionServer().getServerName());
UTIL.getHbaseCluster().getMaster().balance(); UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced(); assertRegionsAreBalanced();
for (int i = 0; i < 6; i++){ for (int i = 0; i < 6; i++){
LOG.info("Adding " + (i + 5) + "th region server"); LOG.info("Adding " + (i + 5) + "th region server");
UTIL.getHbaseCluster().startRegionServer(); UTIL.getHBaseCluster().startRegionServer();
} }
UTIL.getHbaseCluster().getMaster().balance(); UTIL.getHBaseCluster().getMaster().balance();
assertRegionsAreBalanced(); assertRegionsAreBalanced();
} }
@ -154,7 +154,7 @@ public class TestRegionRebalancing {
int regionCount = getRegionCount(); int regionCount = getRegionCount();
List<HRegionServer> servers = getOnlineRegionServers(); List<HRegionServer> servers = getOnlineRegionServers();
double avg = UTIL.getHbaseCluster().getMaster().getAverageLoad(); double avg = UTIL.getHBaseCluster().getMaster().getAverageLoad();
int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop)); int avgLoadPlusSlop = (int)Math.ceil(avg * (1 + slop));
int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1; int avgLoadMinusSlop = (int)Math.floor(avg * (1 - slop)) - 1;
LOG.debug("There are " + servers.size() + " servers and " + regionCount LOG.debug("There are " + servers.size() + " servers and " + regionCount
@ -179,7 +179,7 @@ public class TestRegionRebalancing {
Thread.sleep(10000); Thread.sleep(10000);
} catch (InterruptedException e) {} } catch (InterruptedException e) {}
UTIL.getHbaseCluster().getMaster().balance(); UTIL.getHBaseCluster().getMaster().balance();
continue; continue;
} }
@ -194,7 +194,7 @@ public class TestRegionRebalancing {
private List<HRegionServer> getOnlineRegionServers() { private List<HRegionServer> getOnlineRegionServers() {
List<HRegionServer> list = new ArrayList<HRegionServer>(); List<HRegionServer> list = new ArrayList<HRegionServer>();
for (JVMClusterUtil.RegionServerThread rst : for (JVMClusterUtil.RegionServerThread rst :
UTIL.getHbaseCluster().getRegionServerThreads()) { UTIL.getHBaseCluster().getRegionServerThreads()) {
if (rst.getRegionServer().isOnline()) { if (rst.getRegionServer().isOnline()) {
list.add(rst.getRegionServer()); list.add(rst.getRegionServer());
} }

View File

@ -85,7 +85,7 @@ public class TestMetaReaderEditor {
ct.start(); ct.start();
} }
@AfterClass public static void afterClass() throws IOException { @AfterClass public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }

View File

@ -1241,7 +1241,7 @@ public class TestAdmin {
throws IOException { throws IOException {
// When the META table can be opened, the region servers are running // When the META table can be opened, the region servers are running
new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME); new HTable(TEST_UTIL.getConfiguration(), HConstants.META_TABLE_NAME);
HRegionServer regionServer = TEST_UTIL.getHbaseCluster() HRegionServer regionServer = TEST_UTIL.getHBaseCluster()
.getRegionServerThreads().get(0).getRegionServer(); .getRegionServerThreads().get(0).getRegionServer();
// Create the test table and open it // Create the test table and open it

View File

@ -44,7 +44,6 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.SynchronousQueue; import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -225,7 +224,7 @@ public class TestFromClientSide {
ResultScanner scanner = table.getScanner(s); ResultScanner scanner = table.getScanner(s);
while (scanner.next() != null) continue; while (scanner.next() != null) continue;
Path tempPath = new Path(HBaseTestingUtility.getTestDir(), "regions.dat"); Path tempPath = new Path(TEST_UTIL.getDataTestDir(), "regions.dat");
final String tempFileName = tempPath.toString(); final String tempFileName = tempPath.toString();

View File

@ -51,7 +51,7 @@ public class TestHTablePool {
} }
@AfterClass @AfterClass
public static void tearDownAfterClass() throws IOException { public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }

View File

@ -62,9 +62,9 @@ public class TestReplicationAdmin {
Configuration conf = TEST_UTIL.getConfiguration(); Configuration conf = TEST_UTIL.getConfiguration();
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
admin = new ReplicationAdmin(conf); admin = new ReplicationAdmin(conf);
Path oldLogDir = new Path(TEST_UTIL.getTestDir(), Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME); HConstants.HREGION_OLDLOGDIR_NAME);
Path logDir = new Path(TEST_UTIL.getTestDir(), Path logDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_LOGDIR_NAME); HConstants.HREGION_LOGDIR_NAME);
manager = new ReplicationSourceManager(admin.getReplicationZk(), conf, manager = new ReplicationSourceManager(admin.getReplicationZk(), conf,
// The following stopper never stops so that we can respond // The following stopper never stops so that we can respond

View File

@ -157,8 +157,8 @@ public class TestClassLoading {
// compose a java source file. // compose a java source file.
String javaCode = "import org.apache.hadoop.hbase.coprocessor.*;" + String javaCode = "import org.apache.hadoop.hbase.coprocessor.*;" +
"public class " + className + " extends BaseRegionObserver {}"; "public class " + className + " extends BaseRegionObserver {}";
Path baseDir = HBaseTestingUtility.getTestDir(); Path baseDir = TEST_UTIL.getDataTestDir();
Path srcDir = new Path(HBaseTestingUtility.getTestDir(), "src"); Path srcDir = new Path(TEST_UTIL.getDataTestDir(), "src");
File srcDirPath = new File(srcDir.toString()); File srcDirPath = new File(srcDir.toString());
srcDirPath.mkdirs(); srcDirPath.mkdirs();
File sourceCodeFile = new File(srcDir.toString(), className + ".java"); File sourceCodeFile = new File(srcDir.toString(), className + ".java");

View File

@ -479,7 +479,7 @@ public class TestMasterObserver {
} }
@AfterClass @AfterClass
public static void teardownAfterClass() throws Exception { public static void tearDownAfterClass() throws Exception {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }

View File

@ -51,8 +51,8 @@ public class TestColumnPrefixFilter {
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family)); htd.addFamily(new HColumnDescriptor(family));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getTestDir(), TEST_UTIL.getConfiguration(), htd); getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row"); List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column"); List<String> columns = generateRandomWords(10000, "column");
@ -107,8 +107,8 @@ public class TestColumnPrefixFilter {
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter"); HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family)); htd.addFamily(new HColumnDescriptor(family));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getTestDir(), TEST_UTIL.getConfiguration(), htd); getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row"); List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column"); List<String> columns = generateRandomWords(10000, "column");

View File

@ -76,7 +76,7 @@ public class TestDependentColumnFilter extends TestCase {
htd.addFamily(new HColumnDescriptor(FAMILIES[0])); htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
htd.addFamily(new HColumnDescriptor(FAMILIES[1])); htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
this.region = HRegion.createHRegion(info, testUtil.getTestDir(), this.region = HRegion.createHRegion(info, testUtil.getDataTestDir(),
testUtil.getConfiguration(), htd); testUtil.getConfiguration(), htd);
addData(); addData();
} }

View File

@ -52,8 +52,8 @@ public class TestMultipleColumnPrefixFilter {
htd.addFamily(new HColumnDescriptor(family)); htd.addFamily(new HColumnDescriptor(family));
// HRegionInfo info = new HRegionInfo(htd, null, null, false); // HRegionInfo info = new HRegionInfo(htd, null, null, false);
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getTestDir(), TEST_UTIL.getConfiguration(), htd); getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row"); List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column"); List<String> columns = generateRandomWords(10000, "column");
@ -111,8 +111,8 @@ public class TestMultipleColumnPrefixFilter {
htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family1));
htd.addFamily(new HColumnDescriptor(family2)); htd.addFamily(new HColumnDescriptor(family2));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getTestDir(), TEST_UTIL.getConfiguration(), htd); getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
List<String> rows = generateRandomWords(100, "row"); List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column"); List<String> columns = generateRandomWords(10000, "column");
@ -174,8 +174,8 @@ public class TestMultipleColumnPrefixFilter {
HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter"); HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family)); htd.addFamily(new HColumnDescriptor(family));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, HBaseTestingUtility. HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getTestDir(), TEST_UTIL.getConfiguration(),htd); getDataTestDir(), TEST_UTIL.getConfiguration(),htd);
List<String> rows = generateRandomWords(100, "row"); List<String> rows = generateRandomWords(100, "row");
List<String> columns = generateRandomWords(10000, "column"); List<String> columns = generateRandomWords(10000, "column");

View File

@ -59,7 +59,7 @@ public class TestHalfStoreFileReader {
@Test @Test
public void testHalfScanAndReseek() throws IOException { public void testHalfScanAndReseek() throws IOException {
HBaseTestingUtility test_util = new HBaseTestingUtility(); HBaseTestingUtility test_util = new HBaseTestingUtility();
String root_dir = HBaseTestingUtility.getTestDir("TestHalfStoreFile").toString(); String root_dir = test_util.getDataTestDir("TestHalfStoreFile").toString();
Path p = new Path(root_dir, "test"); Path p = new Path(root_dir, "test");
Configuration conf = test_util.getConfiguration(); Configuration conf = test_util.getConfiguration();

View File

@ -34,14 +34,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.Compression;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex;
import org.apache.hadoop.hbase.io.hfile.HFileReaderV2;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.junit.After; import org.junit.After;
@ -216,7 +208,7 @@ public class TestCacheOnWrite {
} }
public void writeStoreFile() throws IOException { public void writeStoreFile() throws IOException {
Path storeFileParentDir = new Path(HBaseTestingUtility.getTestDir(), Path storeFileParentDir = new Path(TEST_UTIL.getDataTestDir(),
"test_cache_on_write"); "test_cache_on_write");
StoreFile.Writer sfw = StoreFile.createWriter(fs, storeFileParentDir, StoreFile.Writer sfw = StoreFile.createWriter(fs, storeFileParentDir,
DATA_BLOCK_SIZE, compress, KeyValue.COMPARATOR, conf, DATA_BLOCK_SIZE, compress, KeyValue.COMPARATOR, conf,

View File

@ -127,7 +127,7 @@ public class TestFixedFileTrailer {
} }
// Now check what happens if the trailer is corrupted. // Now check what happens if the trailer is corrupted.
Path trailerPath = new Path(HBaseTestingUtility.getTestDir(), "trailer_" Path trailerPath = new Path(util.getDataTestDir(), "trailer_"
+ version); + version);
{ {

View File

@ -49,12 +49,16 @@ import org.apache.hadoop.io.Writable;
public class TestHFile extends HBaseTestCase { public class TestHFile extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestHFile.class); static final Log LOG = LogFactory.getLog(TestHFile.class);
private static String ROOT_DIR = private String ROOT_DIR;
HBaseTestingUtility.getTestDir("TestHFile").toString();
private final int minBlockSize = 512; private final int minBlockSize = 512;
private static String localFormatter = "%010d"; private static String localFormatter = "%010d";
private static CacheConfig cacheConf = null; private static CacheConfig cacheConf = null;
public void setUp() throws Exception {
super.setUp();
ROOT_DIR = this.getUnitTestdir("TestHFile").toString();
}
/** /**
* Test empty HFile. * Test empty HFile.
* Test all features work reasonably when hfile is empty of entries. * Test all features work reasonably when hfile is empty of entries.
@ -123,7 +127,7 @@ public class TestHFile extends HBaseTestCase {
} }
private FSDataOutputStream createFSOutput(Path name) throws IOException { private FSDataOutputStream createFSOutput(Path name) throws IOException {
if (fs.exists(name)) fs.delete(name, true); //if (fs.exists(name)) fs.delete(name, true);
FSDataOutputStream fout = fs.create(name); FSDataOutputStream fout = fs.create(name);
return fout; return fout;
} }

View File

@ -76,7 +76,6 @@ public class TestHFileBlock {
@Before @Before
public void setUp() throws IOException { public void setUp() throws IOException {
fs = FileSystem.get(TEST_UTIL.getConfiguration()); fs = FileSystem.get(TEST_UTIL.getConfiguration());
TEST_UTIL.initTestDir();
} }
public void writeTestBlockContents(DataOutputStream dos) throws IOException { public void writeTestBlockContents(DataOutputStream dos) throws IOException {
@ -154,8 +153,8 @@ public class TestHFileBlock {
for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
for (boolean pread : new boolean[] { false, true }) { for (boolean pread : new boolean[] { false, true }) {
byte[] block = createTestV1Block(algo); byte[] block = createTestV1Block(algo);
Path path = new Path(HBaseTestingUtility.getTestDir(), "blocks_v1_" Path path = new Path(TEST_UTIL.getDataTestDir(),
+ algo); "blocks_v1_"+ algo);
LOG.info("Creating temporary file at " + path); LOG.info("Creating temporary file at " + path);
FSDataOutputStream os = fs.create(path); FSDataOutputStream os = fs.create(path);
int totalSize = 0; int totalSize = 0;
@ -188,7 +187,7 @@ public class TestHFileBlock {
public void testReaderV2() throws IOException { public void testReaderV2() throws IOException {
for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) { for (Compression.Algorithm algo : COMPRESSION_ALGORITHMS) {
for (boolean pread : new boolean[] { false, true }) { for (boolean pread : new boolean[] { false, true }) {
Path path = new Path(HBaseTestingUtility.getTestDir(), "blocks_v2_" Path path = new Path(TEST_UTIL.getDataTestDir(), "blocks_v2_"
+ algo); + algo);
FSDataOutputStream os = fs.create(path); FSDataOutputStream os = fs.create(path);
HFileBlock.Writer hbw = new HFileBlock.Writer(algo); HFileBlock.Writer hbw = new HFileBlock.Writer(algo);
@ -244,7 +243,7 @@ public class TestHFileBlock {
for (boolean cacheOnWrite : BOOLEAN_VALUES) { for (boolean cacheOnWrite : BOOLEAN_VALUES) {
Random rand = defaultRandom(); Random rand = defaultRandom();
LOG.info("Compression algorithm: " + algo + ", pread=" + pread); LOG.info("Compression algorithm: " + algo + ", pread=" + pread);
Path path = new Path(HBaseTestingUtility.getTestDir(), "prev_offset"); Path path = new Path(TEST_UTIL.getDataTestDir(), "prev_offset");
List<Long> expectedOffsets = new ArrayList<Long>(); List<Long> expectedOffsets = new ArrayList<Long>();
List<Long> expectedPrevOffsets = new ArrayList<Long>(); List<Long> expectedPrevOffsets = new ArrayList<Long>();
List<BlockType> expectedTypes = new ArrayList<BlockType>(); List<BlockType> expectedTypes = new ArrayList<BlockType>();
@ -400,7 +399,7 @@ public class TestHFileBlock {
public void testConcurrentReading() throws Exception { public void testConcurrentReading() throws Exception {
for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) { for (Compression.Algorithm compressAlgo : COMPRESSION_ALGORITHMS) {
Path path = Path path =
new Path(HBaseTestingUtility.getTestDir(), "concurrent_reading"); new Path(TEST_UTIL.getDataTestDir(), "concurrent_reading");
Random rand = defaultRandom(); Random rand = defaultRandom();
List<Long> offsets = new ArrayList<Long>(); List<Long> offsets = new ArrayList<Long>();
List<BlockType> types = new ArrayList<BlockType>(); List<BlockType> types = new ArrayList<BlockType>();

View File

@ -43,8 +43,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk; import org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
@ -113,7 +111,7 @@ public class TestHFileBlockIndex {
@Test @Test
public void testBlockIndex() throws IOException { public void testBlockIndex() throws IOException {
path = new Path(HBaseTestingUtility.getTestDir(), "block_index_" + compr); path = new Path(TEST_UTIL.getDataTestDir(), "block_index_" + compr);
writeWholeIndex(); writeWholeIndex();
readIndex(); readIndex();
} }
@ -458,7 +456,7 @@ public class TestHFileBlockIndex {
*/ */
@Test @Test
public void testHFileWriterAndReader() throws IOException { public void testHFileWriterAndReader() throws IOException {
Path hfilePath = new Path(HBaseTestingUtility.getTestDir(), Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
"hfile_for_block_index"); "hfile_for_block_index");
CacheConfig cacheConf = new CacheConfig(conf); CacheConfig cacheConf = new CacheConfig(conf);
BlockCache blockCache = cacheConf.getBlockCache(); BlockCache blockCache = cacheConf.getBlockCache();

View File

@ -45,8 +45,9 @@ import org.apache.hadoop.io.compress.GzipCodec;
* instead.</p> * instead.</p>
*/ */
public class TestHFilePerformance extends TestCase { public class TestHFilePerformance extends TestCase {
private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static String ROOT_DIR = private static String ROOT_DIR =
HBaseTestingUtility.getTestDir("TestHFilePerformance").toString(); TEST_UTIL.getDataTestDir("TestHFilePerformance").toString();
private FileSystem fs; private FileSystem fs;
private Configuration conf; private Configuration conf;
private long startTimeEpoch; private long startTimeEpoch;

View File

@ -244,8 +244,9 @@ public class TestHFileSeek extends TestCase {
int minWordLen = 5; int minWordLen = 5;
int maxWordLen = 20; int maxWordLen = 20;
private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
String rootDir = String rootDir =
HBaseTestingUtility.getTestDir("TestTFileSeek").toString(); TEST_UTIL.getDataTestDir("TestTFileSeek").toString();
String file = "TestTFileSeek"; String file = "TestTFileSeek";
// String compress = "lzo"; DISABLED // String compress = "lzo"; DISABLED
String compress = "none"; String compress = "none";

View File

@ -63,7 +63,7 @@ public class TestHFileWriterV2 {
@Test @Test
public void testHFileFormatV2() throws IOException { public void testHFileFormatV2() throws IOException {
Path hfilePath = new Path(HBaseTestingUtility.getTestDir(), Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
"testHFileFormatV2"); "testHFileFormatV2");
final Compression.Algorithm COMPRESS_ALGO = Compression.Algorithm.GZ; final Compression.Algorithm COMPRESS_ALGO = Compression.Algorithm.GZ;

View File

@ -40,7 +40,7 @@ public class TestReseekTo {
@Test @Test
public void testReseekTo() throws Exception { public void testReseekTo() throws Exception {
Path ncTFile = new Path(HBaseTestingUtility.getTestDir(), "basic.hfile"); Path ncTFile = new Path(TEST_UTIL.getDataTestDir(), "basic.hfile");
FSDataOutputStream fout = TEST_UTIL.getTestFileSystem().create(ncTFile); FSDataOutputStream fout = TEST_UTIL.getTestFileSystem().create(ncTFile);
CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration()); CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
HFile.Writer writer = HFile.getWriterFactory( HFile.Writer writer = HFile.getWriterFactory(

View File

@ -80,7 +80,7 @@ public class TestTableMapReduce {
} }
@AfterClass @AfterClass
public static void afterClass() throws IOException { public static void afterClass() throws Exception {
UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniMapReduceCluster();
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }

View File

@ -175,7 +175,7 @@ public class TestHFileOutputFormat {
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null; RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null; TaskAttemptContext context = null;
Path dir = Path dir =
HBaseTestingUtility.getTestDir("test_LATEST_TIMESTAMP_isReplaced"); util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced");
try { try {
Job job = new Job(conf); Job job = new Job(conf);
FileOutputFormat.setOutputPath(job, dir); FileOutputFormat.setOutputPath(job, dir);
@ -243,7 +243,7 @@ public class TestHFileOutputFormat {
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null; RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null; TaskAttemptContext context = null;
Path dir = Path dir =
HBaseTestingUtility.getTestDir("test_TIMERANGE_present"); util.getDataTestDir("test_TIMERANGE_present");
LOG.info("Timerange dir writing to dir: "+ dir); LOG.info("Timerange dir writing to dir: "+ dir);
try { try {
// build a record writer using HFileOutputFormat // build a record writer using HFileOutputFormat
@ -307,7 +307,7 @@ public class TestHFileOutputFormat {
@Test @Test
public void testWritingPEData() throws Exception { public void testWritingPEData() throws Exception {
Configuration conf = util.getConfiguration(); Configuration conf = util.getConfiguration();
Path testDir = HBaseTestingUtility.getTestDir("testWritingPEData"); Path testDir = util.getDataTestDir("testWritingPEData");
FileSystem fs = testDir.getFileSystem(conf); FileSystem fs = testDir.getFileSystem(conf);
// Set down this value or we OOME in eclipse. // Set down this value or we OOME in eclipse.
@ -372,7 +372,7 @@ public class TestHFileOutputFormat {
private void doIncrementalLoadTest( private void doIncrementalLoadTest(
boolean shouldChangeRegions) throws Exception { boolean shouldChangeRegions) throws Exception {
Configuration conf = util.getConfiguration(); Configuration conf = util.getConfiguration();
Path testDir = HBaseTestingUtility.getTestDir("testLocalMRIncrementalLoad"); Path testDir = util.getDataTestDir("testLocalMRIncrementalLoad");
byte[][] startKeys = generateRandomStartKeys(5); byte[][] startKeys = generateRandomStartKeys(5);
try { try {
@ -557,7 +557,7 @@ public class TestHFileOutputFormat {
RecordWriter<ImmutableBytesWritable, KeyValue> writer = null; RecordWriter<ImmutableBytesWritable, KeyValue> writer = null;
TaskAttemptContext context = null; TaskAttemptContext context = null;
Path dir = Path dir =
HBaseTestingUtility.getTestDir("testColumnFamilyCompression"); util.getDataTestDir("testColumnFamilyCompression");
HTable table = Mockito.mock(HTable.class); HTable table = Mockito.mock(HTable.class);

View File

@ -252,7 +252,7 @@ public class TestImportTsv {
} }
assertTrue(verified); assertTrue(verified);
} finally { } finally {
cluster.shutdown(); htu1.shutdownMiniCluster();
} }
} }

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileScanner;
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test; import org.junit.*;
/** /**
* Test cases for the "load" half of the HFileOutputFormat bulk load * Test cases for the "load" half of the HFileOutputFormat bulk load
@ -49,8 +49,6 @@ import org.junit.Test;
* tests in TestHFileOutputFormat * tests in TestHFileOutputFormat
*/ */
public class TestLoadIncrementalHFiles { public class TestLoadIncrementalHFiles {
private static final byte[] TABLE = Bytes.toBytes("mytable");
private static final byte[] QUALIFIER = Bytes.toBytes("myqual"); private static final byte[] QUALIFIER = Bytes.toBytes("myqual");
private static final byte[] FAMILY = Bytes.toBytes("myfam"); private static final byte[] FAMILY = Bytes.toBytes("myfam");
@ -63,7 +61,17 @@ public class TestLoadIncrementalHFiles {
public static String COMPRESSION = public static String COMPRESSION =
Compression.Algorithm.NONE.getName(); Compression.Algorithm.NONE.getName();
private HBaseTestingUtility util = new HBaseTestingUtility(); private static HBaseTestingUtility util = new HBaseTestingUtility();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
util.startMiniCluster();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
util.shutdownMiniCluster();
}
/** /**
* Test case that creates some regions and loads * Test case that creates some regions and loads
@ -117,7 +125,7 @@ public class TestLoadIncrementalHFiles {
private void runTest(String testName, BloomType bloomType, private void runTest(String testName, BloomType bloomType,
byte[][][] hfileRanges) throws Exception { byte[][][] hfileRanges) throws Exception {
Path dir = HBaseTestingUtility.getTestDir(testName); Path dir = util.getDataTestDir(testName);
FileSystem fs = util.getTestFileSystem(); FileSystem fs = util.getTestFileSystem();
dir = dir.makeQualified(fs); dir = dir.makeQualified(fs);
Path familyDir = new Path(dir, Bytes.toString(FAMILY)); Path familyDir = new Path(dir, Bytes.toString(FAMILY));
@ -131,9 +139,8 @@ public class TestLoadIncrementalHFiles {
} }
int expectedRows = hfileIdx * 1000; int expectedRows = hfileIdx * 1000;
final byte[] TABLE = Bytes.toBytes("mytable_"+testName);
util.startMiniCluster();
try {
HBaseAdmin admin = new HBaseAdmin(util.getConfiguration()); HBaseAdmin admin = new HBaseAdmin(util.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(TABLE); HTableDescriptor htd = new HTableDescriptor(TABLE);
HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);
@ -148,14 +155,11 @@ public class TestLoadIncrementalHFiles {
loader.doBulkLoad(dir, table); loader.doBulkLoad(dir, table);
assertEquals(expectedRows, util.countRows(table)); assertEquals(expectedRows, util.countRows(table));
} finally {
util.shutdownMiniCluster();
}
} }
@Test @Test
public void testSplitStoreFile() throws IOException { public void testSplitStoreFile() throws IOException {
Path dir = HBaseTestingUtility.getTestDir("testSplitHFile"); Path dir = util.getDataTestDir("testSplitHFile");
FileSystem fs = util.getTestFileSystem(); FileSystem fs = util.getTestFileSystem();
Path testIn = new Path(dir, "testhfile"); Path testIn = new Path(dir, "testhfile");
HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY);

View File

@ -76,7 +76,7 @@ public class TestTableMapReduce {
} }
@AfterClass @AfterClass
public static void afterClass() throws IOException { public static void afterClass() throws Exception {
UTIL.shutdownMiniMapReduceCluster(); UTIL.shutdownMiniMapReduceCluster();
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }

View File

@ -433,7 +433,7 @@ public class TestCatalogJanitor {
private String setRootDirAndCleanIt(final HBaseTestingUtility htu, private String setRootDirAndCleanIt(final HBaseTestingUtility htu,
final String subdir) final String subdir)
throws IOException { throws IOException {
Path testdir = HBaseTestingUtility.getTestDir(subdir); Path testdir = htu.getDataTestDir(subdir);
FileSystem fs = FileSystem.get(htu.getConfiguration()); FileSystem fs = FileSystem.get(htu.getConfiguration());
if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true)); if (fs.exists(testdir)) assertTrue(fs.delete(testdir, true));
htu.getConfiguration().set(HConstants.HBASE_DIR, testdir.toString()); htu.getConfiguration().set(HConstants.HBASE_DIR, testdir.toString());

View File

@ -83,11 +83,6 @@ public class TestDistributedLogSplitting {
Configuration conf; Configuration conf;
HBaseTestingUtility TEST_UTIL; HBaseTestingUtility TEST_UTIL;
@Before
public void before() throws Exception {
}
private void startCluster(int num_rs) throws Exception{ private void startCluster(int num_rs) throws Exception{
ZKSplitLog.Counters.resetCounters(); ZKSplitLog.Counters.resetCounters();
LOG.info("Starting cluster"); LOG.info("Starting cluster");
@ -106,7 +101,7 @@ public class TestDistributedLogSplitting {
@After @After
public void after() throws Exception { public void after() throws Exception {
cluster.shutdown(); TEST_UTIL.shutdownMiniCluster();
} }
@Test (timeout=300000) @Test (timeout=300000)

View File

@ -73,7 +73,7 @@ public class TestLogsCleaner {
ReplicationZookeeper zkHelper = ReplicationZookeeper zkHelper =
new ReplicationZookeeper(server, new AtomicBoolean(true)); new ReplicationZookeeper(server, new AtomicBoolean(true));
Path oldLogDir = new Path(HBaseTestingUtility.getTestDir(), Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME); HConstants.HREGION_OLDLOGDIR_NAME);
String fakeMachineName = String fakeMachineName =
URLEncoder.encode(server.getServerName().toString(), "UTF8"); URLEncoder.encode(server.getServerName().toString(), "UTF8");

View File

@ -61,7 +61,7 @@ public class TestMaster {
} }
@AfterClass @AfterClass
public static void afterAllTests() throws IOException { public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }

View File

@ -105,6 +105,7 @@ public class TestMasterRestartAfterDisablingTable {
assertEquals( assertEquals(
"The assigned regions were not onlined after master switch except for the catalog tables.", "The assigned regions were not onlined after master switch except for the catalog tables.",
6, regions.size()); 6, regions.size());
TEST_UTIL.shutdownMiniCluster();
} }
private void log(String msg) { private void log(String msg) {

View File

@ -66,7 +66,7 @@ public class TestMasterTransitions {
addToEachStartKey(countOfRegions); addToEachStartKey(countOfRegions);
} }
@AfterClass public static void afterAllTests() throws IOException { @AfterClass public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }

View File

@ -124,8 +124,8 @@ public class TestOpenedRegionHandler {
"testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches"); "testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches");
HRegionInfo hri = new HRegionInfo(htd.getName(), HRegionInfo hri = new HRegionInfo(htd.getName(),
Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1)); Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
HRegion region = HRegion.createHRegion(hri, HBaseTestingUtility HRegion region = HRegion.createHRegion(hri, TEST_UTIL
.getTestDir(), TEST_UTIL.getConfiguration(), htd); .getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
assertNotNull(region); assertNotNull(region);
AssignmentManager am = Mockito.mock(AssignmentManager.class); AssignmentManager am = Mockito.mock(AssignmentManager.class);
when(am.isRegionInTransition(hri)).thenReturn( when(am.isRegionInTransition(hri)).thenReturn(

View File

@ -44,30 +44,26 @@ import org.junit.Test;
public class TestRestartCluster { public class TestRestartCluster {
private static final Log LOG = LogFactory.getLog(TestRestartCluster.class); private static final Log LOG = LogFactory.getLog(TestRestartCluster.class);
private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); private HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static ZooKeeperWatcher zooKeeper;
private static final byte[] TABLENAME = Bytes.toBytes("master_transitions"); private static final byte[] TABLENAME = Bytes.toBytes("master_transitions");
private static final byte [][] FAMILIES = new byte [][] {Bytes.toBytes("a")}; private static final byte [][] FAMILIES = {Bytes.toBytes("a")};
private static final byte [][] TABLES = {
private static final byte [][] TABLES = new byte[][] {
Bytes.toBytes("restartTableOne"), Bytes.toBytes("restartTableOne"),
Bytes.toBytes("restartTableTwo"), Bytes.toBytes("restartTableTwo"),
Bytes.toBytes("restartTableThree") Bytes.toBytes("restartTableThree")
}; };
private static final byte [] FAMILY = Bytes.toBytes("family"); private static final byte [] FAMILY = Bytes.toBytes("family");
@Before public void setup() throws Exception { @After public void tearDown() throws Exception {
}
@After public void teardown() throws IOException {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }
@Test (timeout=300000) public void testRestartClusterAfterKill() @Test (timeout=300000) public void testRestartClusterAfterKill()
throws Exception { throws Exception {
UTIL.startMiniZKCluster(); UTIL.startMiniZKCluster();
zooKeeper = new ZooKeeperWatcher(UTIL.getConfiguration(), "cluster1", null, true); ZooKeeperWatcher zooKeeper =
new ZooKeeperWatcher(UTIL.getConfiguration(), "cluster1", null, true);
// create the unassigned region, throw up a region opened state for META // create the unassigned region, throw up a region opened state for META
String unassignedZNode = zooKeeper.assignmentZNode; String unassignedZNode = zooKeeper.assignmentZNode;
@ -106,8 +102,7 @@ public class TestRestartCluster {
assertEquals(3, allRegions.size()); assertEquals(3, allRegions.size());
LOG.info("\n\nShutting down cluster"); LOG.info("\n\nShutting down cluster");
UTIL.getHBaseCluster().shutdown(); UTIL.shutdownMiniHBaseCluster();
UTIL.getHBaseCluster().join();
LOG.info("\n\nSleeping a bit"); LOG.info("\n\nSleeping a bit");
Thread.sleep(2000); Thread.sleep(2000);

View File

@ -76,7 +76,7 @@ public class TestZKBasedOpenCloseRegion {
addToEachStartKey(countOfRegions); addToEachStartKey(countOfRegions);
} }
@AfterClass public static void afterAllTests() throws IOException { @AfterClass public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }

View File

@ -47,8 +47,9 @@ public class TestAtomicOperation extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestAtomicOperation.class); static final Log LOG = LogFactory.getLog(TestAtomicOperation.class);
HRegion region = null; HRegion region = null;
private final String DIR = HBaseTestingUtility.getTestDir() + private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
"/TestIncrement/"; private final String DIR = TEST_UTIL.getDataTestDir("TestIncrement").toString();
private final int MAX_VERSIONS = 2; private final int MAX_VERSIONS = 2;

View File

@ -62,8 +62,8 @@ public class TestBlocksRead extends HBaseTestCase {
} }
HRegion region = null; HRegion region = null;
private final String DIR = HBaseTestingUtility.getTestDir() + private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
"/TestBlocksRead/"; private final String DIR = TEST_UTIL.getDataTestDir("TestBlocksRead").toString();
/** /**
* @see org.apache.hadoop.hbase.HBaseTestCase#setUp() * @see org.apache.hadoop.hbase.HBaseTestCase#setUp()

View File

@ -40,8 +40,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.Test; import org.junit.Test;
@ -70,7 +68,7 @@ public class TestColumnSeeking {
htd.addFamily(hcd); htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
HRegion region = HRegion region =
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL
.getConfiguration(), htd); .getConfiguration(), htd);
List<String> rows = generateRandomWords(10, "row"); List<String> rows = generateRandomWords(10, "row");
@ -175,7 +173,7 @@ public class TestColumnSeeking {
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false); HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
HRegion region = HRegion region =
HRegion.createHRegion(info, HBaseTestingUtility.getTestDir(), TEST_UTIL HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL
.getConfiguration(), htd); .getConfiguration(), htd);
List<String> rows = generateRandomWords(10, "row"); List<String> rows = generateRandomWords(10, "row");

View File

@ -48,8 +48,8 @@ public class TestCompactSelection extends TestCase {
private Configuration conf; private Configuration conf;
private Store store; private Store store;
private static final String DIR private static final String DIR=
= HBaseTestingUtility.getTestDir() + "/TestCompactSelection/"; TEST_UTIL.getDataTestDir("TestCompactSelection").toString();
private static Path TEST_FILE; private static Path TEST_FILE;
private static final int minFiles = 3; private static final int minFiles = 3;

View File

@ -33,10 +33,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -48,6 +45,8 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
@ -57,6 +56,8 @@ import org.mockito.stubbing.Answer;
*/ */
public class TestCompaction extends HBaseTestCase { public class TestCompaction extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestCompaction.class.getName()); static final Log LOG = LogFactory.getLog(TestCompaction.class.getName());
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private HRegion r = null; private HRegion r = null;
private Path compactionDir = null; private Path compactionDir = null;
private Path regionCompactionDir = null; private Path regionCompactionDir = null;
@ -67,7 +68,6 @@ public class TestCompaction extends HBaseTestCase {
private byte[] firstRowBytes, secondRowBytes, thirdRowBytes; private byte[] firstRowBytes, secondRowBytes, thirdRowBytes;
final private byte[] col1, col2; final private byte[] col1, col2;
private MiniDFSCluster cluster;
/** constructor */ /** constructor */
public TestCompaction() throws Exception { public TestCompaction() throws Exception {
@ -76,7 +76,6 @@ public class TestCompaction extends HBaseTestCase {
// Set cache flush size to 1MB // Set cache flush size to 1MB
conf.setInt("hbase.hregion.memstore.flush.size", 1024*1024); conf.setInt("hbase.hregion.memstore.flush.size", 1024*1024);
conf.setInt("hbase.hregion.memstore.block.multiplier", 100); conf.setInt("hbase.hregion.memstore.block.multiplier", 100);
this.cluster = null;
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
firstRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING); firstRowBytes = START_KEY.getBytes(HConstants.UTF8_ENCODING);
@ -92,10 +91,6 @@ public class TestCompaction extends HBaseTestCase {
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
// Make the hbase rootdir match the minidfs we just span up
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
super.setUp(); super.setUp();
HTableDescriptor htd = createTableDescriptor(getName()); HTableDescriptor htd = createTableDescriptor(getName());
this.r = createNewHRegion(htd, null, null); this.r = createNewHRegion(htd, null, null);
@ -106,9 +101,6 @@ public class TestCompaction extends HBaseTestCase {
HLog hlog = r.getLog(); HLog hlog = r.getLog();
this.r.close(); this.r.close();
hlog.closeAndDelete(); hlog.closeAndDelete();
if (this.cluster != null) {
shutdownDfs(cluster);
}
super.tearDown(); super.tearDown();
} }
@ -426,7 +418,7 @@ public class TestCompaction extends HBaseTestCase {
assertEquals(compactionThreshold, s.getStorefilesCount()); assertEquals(compactionThreshold, s.getStorefilesCount());
assertTrue(s.getStorefilesSize() > 15*1000); assertTrue(s.getStorefilesSize() > 15*1000);
// and no new store files persisted past compactStores() // and no new store files persisted past compactStores()
FileStatus[] ls = cluster.getFileSystem().listStatus(r.getTmpDir()); FileStatus[] ls = FileSystem.get(conf).listStatus(r.getTmpDir());
assertEquals(0, ls.length); assertEquals(0, ls.length);
} finally { } finally {
@ -501,7 +493,7 @@ public class TestCompaction extends HBaseTestCase {
StoreFile.Writer compactedFile = store.compactStore(storeFiles, false, maxId); StoreFile.Writer compactedFile = store.compactStore(storeFiles, false, maxId);
// Now lets corrupt the compacted file. // Now lets corrupt the compacted file.
FileSystem fs = cluster.getFileSystem(); FileSystem fs = FileSystem.get(conf);
Path origPath = compactedFile.getPath(); Path origPath = compactedFile.getPath();
Path homedir = store.getHomedir(); Path homedir = store.getHomedir();
Path dstPath = new Path(homedir, origPath.getName()); Path dstPath = new Path(homedir, origPath.getName());

View File

@ -293,7 +293,7 @@ public class TestCompoundBloomFilter {
cacheConf = new CacheConfig(conf); cacheConf = new CacheConfig(conf);
StoreFile.Writer w = StoreFile.createWriter(fs, StoreFile.Writer w = StoreFile.createWriter(fs,
HBaseTestingUtility.getTestDir(), BLOCK_SIZES[t], null, null, conf, TEST_UTIL.getDataTestDir(), BLOCK_SIZES[t], null, null, conf,
cacheConf, bt, 0); cacheConf, bt, 0);
assertTrue(w.hasGeneralBloom()); assertTrue(w.hasGeneralBloom());

View File

@ -41,11 +41,11 @@ public class TestEndToEndSplitTransaction {
@BeforeClass @BeforeClass
public static void beforeAllTests() throws Exception { public static void beforeAllTests() throws Exception {
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5);
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster();
} }
@AfterClass @AfterClass
public static void afterAllTests() throws IOException { public static void afterAllTests() throws Exception {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }

View File

@ -68,7 +68,7 @@ public class TestFSErrorsExposed {
@Test @Test
public void testHFileScannerThrowsErrors() throws IOException { public void testHFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path( Path hfilePath = new Path(new Path(
HBaseTestingUtility.getTestDir("internalScannerExposesErrors"), util.getDataTestDir("internalScannerExposesErrors"),
"regionname"), "familyname"); "regionname"), "familyname");
FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem()); FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); CacheConfig cacheConf = new CacheConfig(util.getConfiguration());
@ -111,7 +111,7 @@ public class TestFSErrorsExposed {
@Test @Test
public void testStoreFileScannerThrowsErrors() throws IOException { public void testStoreFileScannerThrowsErrors() throws IOException {
Path hfilePath = new Path(new Path( Path hfilePath = new Path(new Path(
HBaseTestingUtility.getTestDir("internalScannerExposesErrors"), util.getDataTestDir("internalScannerExposesErrors"),
"regionname"), "familyname"); "regionname"), "familyname");
FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem()); FaultyFileSystem fs = new FaultyFileSystem(util.getTestFileSystem());
CacheConfig cacheConf = new CacheConfig(util.getConfiguration()); CacheConfig cacheConf = new CacheConfig(util.getConfiguration());

View File

@ -46,7 +46,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
*/ */
public class TestGetClosestAtOrBefore extends HBaseTestCase { public class TestGetClosestAtOrBefore extends HBaseTestCase {
private static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class); private static final Log LOG = LogFactory.getLog(TestGetClosestAtOrBefore.class);
private MiniDFSCluster miniHdfs;
private static final byte[] T00 = Bytes.toBytes("000"); private static final byte[] T00 = Bytes.toBytes("000");
private static final byte[] T10 = Bytes.toBytes("010"); private static final byte[] T10 = Bytes.toBytes("010");
@ -58,24 +57,17 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
private static final byte[] T35 = Bytes.toBytes("035"); private static final byte[] T35 = Bytes.toBytes("035");
private static final byte[] T40 = Bytes.toBytes("040"); private static final byte[] T40 = Bytes.toBytes("040");
@Override
protected void setUp() throws Exception {
super.setUp();
this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.miniHdfs.getFileSystem().getHomeDirectory().toString());
}
public void testUsingMetaAndBinary() throws IOException { public void testUsingMetaAndBinary() throws IOException {
FileSystem filesystem = FileSystem.get(conf); FileSystem filesystem = FileSystem.get(conf);
Path rootdir = filesystem.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))); Path rootdir = testDir;
filesystem.mkdirs(rootdir);
// Up flush size else we bind up when we use default catalog flush of 16k. // Up flush size else we bind up when we use default catalog flush of 16k.
HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024); HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024);
HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO,
rootdir, this.conf, HTableDescriptor.META_TABLEDESC); rootdir, this.conf, HTableDescriptor.META_TABLEDESC);
try {
// Write rows for three tables 'A', 'B', and 'C'. // Write rows for three tables 'A', 'B', and 'C'.
for (char c = 'A'; c < 'D'; c++) { for (char c = 'A'; c < 'D'; c++) {
HTableDescriptor htd = new HTableDescriptor("" + c); HTableDescriptor htd = new HTableDescriptor("" + c);
@ -134,6 +126,16 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
findRow(mr, 'C', 45, -1); findRow(mr, 'C', 45, -1);
findRow(mr, 'C', 46, -1); findRow(mr, 'C', 46, -1);
findRow(mr, 'C', 43, -1); findRow(mr, 'C', 43, -1);
} finally {
if (mr != null) {
try {
mr.close();
} catch (Exception e) {
e.printStackTrace();
}
mr.getLog().closeAndDelete();
}
}
} }
/* /*
@ -339,12 +341,4 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase {
} }
} }
} }
@Override
protected void tearDown() throws Exception {
if (this.miniHdfs != null) {
this.miniHdfs.shutdown();
}
super.tearDown();
}
} }

View File

@ -34,8 +34,6 @@ import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
@ -44,7 +42,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
@ -91,8 +88,8 @@ public class TestHRegion extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestHRegion.class); static final Log LOG = LogFactory.getLog(TestHRegion.class);
HRegion region = null; HRegion region = null;
private final String DIR = HBaseTestingUtility.getTestDir() + private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
"/TestHRegion/"; private final String DIR = TEST_UTIL.getDataTestDir("TestHRegion").toString();
private final int MAX_VERSIONS = 2; private final int MAX_VERSIONS = 2;
@ -3023,7 +3020,7 @@ public class TestHRegion extends HBaseTestCase {
put.add(fam2, col, 1, Bytes.toBytes("test2")); put.add(fam2, col, 1, Bytes.toBytes("test2"));
ht.put(put); ht.put(put);
HRegion firstRegion = htu.getHbaseCluster(). HRegion firstRegion = htu.getHBaseCluster().
getRegions(Bytes.toBytes(this.getName())).get(0); getRegions(Bytes.toBytes(this.getName())).get(0);
firstRegion.flushcache(); firstRegion.flushcache();
HDFSBlocksDistribution blocksDistribution1 = HDFSBlocksDistribution blocksDistribution1 =

View File

@ -279,7 +279,7 @@ public class TestMultiColumnScanner {
HRegionInfo info = HRegionInfo info =
new HRegionInfo(Bytes.toBytes(tableName), null, null, false); new HRegionInfo(Bytes.toBytes(tableName), null, null, false);
HRegion region = HRegion.createHRegion( HRegion region = HRegion.createHRegion(
info, HBaseTestingUtility.getTestDir(), TEST_UTIL.getConfiguration(), info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(),
htd); htd);
return region; return region;
} }

View File

@ -62,7 +62,7 @@ public class TestResettingCounters {
for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family)); for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family));
HRegionInfo hri = new HRegionInfo(htd.getName(), null, null, false); HRegionInfo hri = new HRegionInfo(htd.getName(), null, null, false);
String testDir = HBaseTestingUtility.getTestDir() + "/TestResettingCounters/"; String testDir = htu.getDataTestDir() + "/TestResettingCounters/";
Path path = new Path(testDir); Path path = new Path(testDir);
if (fs.exists(path)) { if (fs.exists(path)) {
if (!fs.delete(path, true)) { if (!fs.delete(path, true)) {

View File

@ -79,20 +79,9 @@ public class TestScanner extends HBaseTestCase {
private static final long START_CODE = Long.MAX_VALUE; private static final long START_CODE = Long.MAX_VALUE;
private MiniDFSCluster cluster = null;
private HRegion r; private HRegion r;
private HRegionIncommon region; private HRegionIncommon region;
@Override
public void setUp() throws Exception {
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
super.setUp();
}
/** /**
* Test basic stop row filter works. * Test basic stop row filter works.
* @throws Exception * @throws Exception
@ -138,7 +127,6 @@ public class TestScanner extends HBaseTestCase {
} finally { } finally {
this.r.close(); this.r.close();
this.r.getLog().closeAndDelete(); this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
} }
} }
@ -192,7 +180,6 @@ public class TestScanner extends HBaseTestCase {
} finally { } finally {
this.r.close(); this.r.close();
this.r.getLog().closeAndDelete(); this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
} }
} }
@ -220,7 +207,6 @@ public class TestScanner extends HBaseTestCase {
} finally { } finally {
this.r.close(); this.r.close();
this.r.getLog().closeAndDelete(); this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
} }
} }
@ -333,13 +319,10 @@ public class TestScanner extends HBaseTestCase {
scan(true, address.toString()); scan(true, address.toString());
getRegionInfo(); getRegionInfo();
} finally {
// clean up // clean up
r.close(); r.close();
r.getLog().closeAndDelete(); r.getLog().closeAndDelete();
} finally {
shutdownDfs(cluster);
} }
} }
@ -461,7 +444,6 @@ public class TestScanner extends HBaseTestCase {
} finally { } finally {
this.r.close(); this.r.close();
this.r.getLog().closeAndDelete(); this.r.getLog().closeAndDelete();
shutdownDfs(cluster);
} }
} }
@ -485,7 +467,6 @@ public class TestScanner extends HBaseTestCase {
} finally { } finally {
this.r.close(); this.r.close();
this.r.getLog().closeAndDelete(); this.r.getLog().closeAndDelete();
shutdownDfs(cluster);
} }
} }

View File

@ -55,7 +55,7 @@ import org.mockito.Mockito;
public class TestSplitTransaction { public class TestSplitTransaction {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final Path testdir = private final Path testdir =
HBaseTestingUtility.getTestDir(this.getClass().getName()); TEST_UTIL.getDataTestDir(this.getClass().getName());
private HRegion parent; private HRegion parent;
private HLog wal; private HLog wal;
private FileSystem fs; private FileSystem fs;

View File

@ -92,7 +92,9 @@ public class TestStore extends TestCase {
long id = System.currentTimeMillis(); long id = System.currentTimeMillis();
Get get = new Get(row); Get get = new Get(row);
private static final String DIR = HBaseTestingUtility.getTestDir() + "/TestStore/"; private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final String DIR = TEST_UTIL.getDataTestDir("TestStore").toString();
/** /**
* Setup * Setup

View File

@ -58,29 +58,13 @@ import com.google.common.collect.Lists;
*/ */
public class TestStoreFile extends HBaseTestCase { public class TestStoreFile extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestStoreFile.class); static final Log LOG = LogFactory.getLog(TestStoreFile.class);
private MiniDFSCluster cluster; private CacheConfig cacheConf = new CacheConfig(conf);
private CacheConfig cacheConf; private String ROOT_DIR;
@Override @Override
public void setUp() throws Exception { public void setUp() throws Exception {
try {
this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
this.cacheConf = new CacheConfig(conf);
} catch (IOException e) {
shutdownDfs(cluster);
}
super.setUp(); super.setUp();
} ROOT_DIR = new Path(this.testDir, "TestStoreFile").toString();
@Override
public void tearDown() throws Exception {
super.tearDown();
shutdownDfs(cluster);
// ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
// "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName());
} }
/** /**
@ -330,9 +314,7 @@ public class TestStoreFile extends HBaseTestCase {
} }
} }
private static String ROOT_DIR = private static final String localFormatter = "%010d";
HBaseTestingUtility.getTestDir("TestStoreFile").toString();
private static String localFormatter = "%010d";
private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs) private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs)
throws Exception { throws Exception {
@ -700,8 +682,7 @@ public class TestStoreFile extends HBaseTestCase {
Configuration conf = this.conf; Configuration conf = this.conf;
// Find a home for our files // Find a home for our files
Path baseDir = new Path(new Path(this.testDir, "regionname"), Path baseDir = new Path(new Path(this.testDir, "regionname"),"twoCOWEOC");
"twoCOWEOC");
// Grab the block cache and get the initial hit/miss counts // Grab the block cache and get the initial hit/miss counts
BlockCache bc = new CacheConfig(conf).getBlockCache(); BlockCache bc = new CacheConfig(conf).getBlockCache();

View File

@ -69,18 +69,8 @@ public class TestWideScanner extends HBaseTestCase {
} }
/** HRegionInfo for root region */ /** HRegionInfo for root region */
MiniDFSCluster cluster = null;
HRegion r; HRegion r;
@Override
public void setUp() throws Exception {
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
// Set the hbase.rootdir to be the home directory in mini dfs.
this.conf.set(HConstants.HBASE_DIR,
this.cluster.getFileSystem().getHomeDirectory().toString());
super.setUp();
}
private int addWideContent(HRegion region) throws IOException { private int addWideContent(HRegion region) throws IOException {
int count = 0; int count = 0;
for (char c = 'a'; c <= 'c'; c++) { for (char c = 'a'; c <= 'c'; c++) {
@ -153,7 +143,6 @@ public class TestWideScanner extends HBaseTestCase {
} finally { } finally {
this.r.close(); this.r.close();
this.r.getLog().closeAndDelete(); this.r.getLog().closeAndDelete();
shutdownDfs(this.cluster);
} }
} }
} }

View File

@ -62,7 +62,7 @@ public class TestCloseRegionHandler {
new HRegionInfo(htd.getName(), HConstants.EMPTY_END_ROW, new HRegionInfo(htd.getName(), HConstants.EMPTY_END_ROW,
HConstants.EMPTY_END_ROW); HConstants.EMPTY_END_ROW);
HRegion region = HRegion region =
HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), HRegion.createHRegion(hri, HTU.getDataTestDir(),
HTU.getConfiguration(), htd); HTU.getConfiguration(), htd);
assertNotNull(region); assertNotNull(region);
// Spy on the region so can throw exception when close is called. // Spy on the region so can throw exception when close is called.

View File

@ -25,14 +25,10 @@ import java.io.IOException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.executor.RegionTransitionData; import org.apache.hadoop.hbase.executor.RegionTransitionData;
import org.apache.hadoop.hbase.executor.EventHandler.EventType; import org.apache.hadoop.hbase.executor.EventHandler.EventType;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
@ -49,9 +45,6 @@ import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/** /**
* Test of the {@link OpenRegionHandler}. * Test of the {@link OpenRegionHandler}.
@ -59,17 +52,18 @@ import org.mockito.stubbing.Answer;
public class TestOpenRegionHandler { public class TestOpenRegionHandler {
static final Log LOG = LogFactory.getLog(TestOpenRegionHandler.class); static final Log LOG = LogFactory.getLog(TestOpenRegionHandler.class);
private final static HBaseTestingUtility HTU = new HBaseTestingUtility(); private final static HBaseTestingUtility HTU = new HBaseTestingUtility();
private static final HTableDescriptor TEST_HTD = private static HTableDescriptor TEST_HTD;
new HTableDescriptor("TestOpenRegionHandler.java");
private HRegionInfo TEST_HRI; private HRegionInfo TEST_HRI;
private int testIndex = 0; private int testIndex = 0;
@BeforeClass public static void before() throws Exception { @BeforeClass public static void before() throws Exception {
HTU.startMiniZKCluster(); HTU.startMiniZKCluster();
TEST_HTD = new HTableDescriptor("TestOpenRegionHandler.java");
} }
@AfterClass public static void after() throws IOException { @AfterClass public static void after() throws IOException {
TEST_HTD = null;
HTU.shutdownMiniZKCluster(); HTU.shutdownMiniZKCluster();
} }
@ -102,7 +96,7 @@ public class TestOpenRegionHandler {
HTableDescriptor htd = TEST_HTD; HTableDescriptor htd = TEST_HTD;
final HRegionInfo hri = TEST_HRI; final HRegionInfo hri = TEST_HRI;
HRegion region = HRegion region =
HRegion.createHRegion(hri, HBaseTestingUtility.getTestDir(), HTU HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU
.getConfiguration(), htd); .getConfiguration(), htd);
assertNotNull(region); assertNotNull(region);
OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) { OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {

View File

@ -93,6 +93,7 @@ public class TestHLog {
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
} }
@BeforeClass @BeforeClass
public static void setUpBeforeClass() throws Exception { public static void setUpBeforeClass() throws Exception {
// Make block sizes small. // Make block sizes small.

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.regionserver.wal;
import java.io.IOException; import java.io.IOException;
import java.util.Random; import java.util.Random;
import java.text.NumberFormat;
import org.apache.commons.logging.impl.Log4JLogger; import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level; import org.apache.log4j.Level;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -39,17 +39,10 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.ipc.HBaseRPC; import org.apache.hadoop.hbase.ipc.HBaseRPC;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
public class TestHLogBench extends Configured implements Tool { public class TestHLogBench extends Configured implements Tool {
@ -69,8 +62,10 @@ public class TestHLogBench extends Configured implements Tool {
// the number of threads and the number of iterations per thread // the number of threads and the number of iterations per thread
private int numThreads = 300; private int numThreads = 300;
private int numIterationsPerThread = 10000; private int numIterationsPerThread = 10000;
private Path regionRootDir = new Path(HBaseTestingUtility.getTestDir() +
"/TestHLogBench/"); private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Path regionRootDir =TEST_UTIL.getDataTestDir("TestHLogBench") ;
private boolean appendNoSync = false; private boolean appendNoSync = false;
public TestHLogBench() { public TestHLogBench() {
@ -327,7 +322,7 @@ public class TestHLogBench extends Configured implements Tool {
argv[2] = "-numIterationsPerThread"; argv[2] = "-numIterationsPerThread";
argv[3] = Integer.toString(1000); argv[3] = Integer.toString(1000);
argv[4] = "-path"; argv[4] = "-path";
argv[5] = HBaseTestingUtility.getTestDir() + "/HlogPerformance"; argv[5] = TEST_UTIL.getDataTestDir() + "/HlogPerformance";
argv[6] = "-nosync"; argv[6] = "-nosync";
try { try {
res = ToolRunner.run(bench, argv); res = ToolRunner.run(bench, argv);

View File

@ -31,9 +31,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.MultithreadedTestUtil;
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestContext;
import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.EntryBuffers; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.EntryBuffers;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.RegionEntryBuffer; import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter.RegionEntryBuffer;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -56,7 +53,7 @@ public class TestHLogMethods {
*/ */
@Test public void testGetSplitEditFilesSorted() throws IOException { @Test public void testGetSplitEditFilesSorted() throws IOException {
FileSystem fs = FileSystem.get(util.getConfiguration()); FileSystem fs = FileSystem.get(util.getConfiguration());
Path regiondir = HBaseTestingUtility.getTestDir("regiondir"); Path regiondir = util.getDataTestDir("regiondir");
fs.delete(regiondir, true); fs.delete(regiondir, true);
fs.mkdirs(regiondir); fs.mkdirs(regiondir);
Path recoverededits = HLog.getRegionDirRecoveredEditsDir(regiondir); Path recoverededits = HLog.getRegionDirRecoveredEditsDir(regiondir);

View File

@ -112,7 +112,7 @@ public class TestLogRollAbort {
} }
@After @After
public void tearDown() throws IOException { public void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }

View File

@ -77,11 +77,11 @@ public class TestLogRolling {
private HLog log; private HLog log;
private String tableName; private String tableName;
private byte[] value; private byte[] value;
private static FileSystem fs; private FileSystem fs;
private static MiniDFSCluster dfsCluster; private MiniDFSCluster dfsCluster;
private static HBaseAdmin admin; private HBaseAdmin admin;
private static MiniHBaseCluster cluster; private MiniHBaseCluster cluster;
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
// verbose logging on classes that are touched in these tests // verbose logging on classes that are touched in these tests
{ {
@ -100,19 +100,16 @@ public class TestLogRolling {
* @throws Exception * @throws Exception
*/ */
public TestLogRolling() { public TestLogRolling() {
// start one regionserver and a minidfs.
super();
this.server = null; this.server = null;
this.log = null; this.log = null;
this.tableName = null; this.tableName = null;
this.value = null;
String className = this.getClass().getName(); String className = this.getClass().getName();
StringBuilder v = new StringBuilder(className); StringBuilder v = new StringBuilder(className);
while (v.length() < 1000) { while (v.length() < 1000) {
v.append(className); v.append(className);
} }
value = Bytes.toBytes(v.toString()); this.value = Bytes.toBytes(v.toString());
} }
// Need to override this setup so we can edit the config before it gets sent // Need to override this setup so we can edit the config before it gets sent
@ -175,8 +172,7 @@ public class TestLogRolling {
} }
@After @After
public void tearDown() throws IOException { public void tearDown() throws Exception {
TEST_UTIL.cleanupTestDir();
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();
} }

View File

@ -55,9 +55,9 @@ public class TestWALActionsListener {
conf = TEST_UTIL.getConfiguration(); conf = TEST_UTIL.getConfiguration();
conf.setInt("hbase.regionserver.maxlogs", 5); conf.setInt("hbase.regionserver.maxlogs", 5);
fs = FileSystem.get(conf); fs = FileSystem.get(conf);
oldLogDir = new Path(HBaseTestingUtility.getTestDir(), oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME); HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(HBaseTestingUtility.getTestDir(), logDir = new Path(TEST_UTIL.getDataTestDir(),
HConstants.HREGION_LOGDIR_NAME); HConstants.HREGION_LOGDIR_NAME);
} }

View File

@ -26,8 +26,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException.SessionExpiredException; import org.apache.zookeeper.KeeperException.SessionExpiredException;
import org.junit.BeforeClass; import org.junit.*;
import org.junit.Test;
public class TestReplicationPeer { public class TestReplicationPeer {

View File

@ -112,9 +112,9 @@ public class TestReplicationSourceManager {
replication = new Replication(new DummyServer(), fs, logDir, oldLogDir); replication = new Replication(new DummyServer(), fs, logDir, oldLogDir);
manager = replication.getReplicationManager(); manager = replication.getReplicationManager();
fs = FileSystem.get(conf); fs = FileSystem.get(conf);
oldLogDir = new Path(utility.getTestDir(), oldLogDir = new Path(utility.getDataTestDir(),
HConstants.HREGION_OLDLOGDIR_NAME); HConstants.HREGION_OLDLOGDIR_NAME);
logDir = new Path(utility.getTestDir(), logDir = new Path(utility.getDataTestDir(),
HConstants.HREGION_LOGDIR_NAME); HConstants.HREGION_LOGDIR_NAME);
manager.addSource(slaveId); manager.addSource(slaveId);

View File

@ -65,7 +65,7 @@ public class TestThriftServer {
} }
@AfterClass @AfterClass
public static void afterClass() throws IOException { public static void afterClass() throws Exception {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }

View File

@ -32,8 +32,6 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableDescriptors;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.Test; import org.junit.Test;
@ -49,7 +47,7 @@ public class TestFSTableDescriptors {
final String name = "testRemoves"; final String name = "testRemoves";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around. // Cleanup old tests if any detrius laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir); TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd); htds.add(htd);
@ -61,7 +59,7 @@ public class TestFSTableDescriptors {
final String name = "testReadingHTDFromFS"; final String name = "testReadingHTDFromFS";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
Path rootdir = HBaseTestingUtility.getTestDir(name); Path rootdir = UTIL.getDataTestDir(name);
createHTDInFS(fs, rootdir, htd); createHTDInFS(fs, rootdir, htd);
HTableDescriptor htd2 = HTableDescriptor htd2 =
FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString()); FSUtils.getTableDescriptor(fs, rootdir, htd.getNameAsString());
@ -79,7 +77,7 @@ public class TestFSTableDescriptors {
final String name = "testHTableDescriptors"; final String name = "testHTableDescriptors";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any debris laying around. // Cleanup old tests if any debris laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
final int count = 10; final int count = 10;
// Write out table infos. // Write out table infos.
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
@ -128,7 +126,7 @@ public class TestFSTableDescriptors {
final String name = "testNoSuchTable"; final String name = "testNoSuchTable";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around. // Cleanup old tests if any detrius laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir); TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
htds.get("NoSuchTable"); htds.get("NoSuchTable");
} }
@ -138,7 +136,7 @@ public class TestFSTableDescriptors {
final String name = "testUpdates"; final String name = "testUpdates";
FileSystem fs = FileSystem.get(UTIL.getConfiguration()); FileSystem fs = FileSystem.get(UTIL.getConfiguration());
// Cleanup old tests if any detrius laying around. // Cleanup old tests if any detrius laying around.
Path rootdir = new Path(HBaseTestingUtility.getTestDir(), name); Path rootdir = new Path(UTIL.getDataTestDir(), name);
TableDescriptors htds = new FSTableDescriptors(fs, rootdir); TableDescriptors htds = new FSTableDescriptors(fs, rootdir);
HTableDescriptor htd = new HTableDescriptor(name); HTableDescriptor htd = new HTableDescriptor(name);
htds.add(htd); htds.add(htd);

View File

@ -61,7 +61,7 @@ public class TestRegionSplitter {
} }
@AfterClass @AfterClass
public static void teardown() throws IOException { public static void teardown() throws Exception {
UTIL.shutdownMiniCluster(); UTIL.shutdownMiniCluster();
} }

View File

@ -52,7 +52,7 @@ public class TestHQuorumPeer {
// Set it to a non-standard port. // Set it to a non-standard port.
TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.clientPort", TEST_UTIL.getConfiguration().setInt("hbase.zookeeper.property.clientPort",
PORT_NO); PORT_NO);
this.dataDir = HBaseTestingUtility.getTestDir(this.getClass().getName()); this.dataDir = TEST_UTIL.getDataTestDir(this.getClass().getName());
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
if (fs.exists(this.dataDir)) { if (fs.exists(this.dataDir)) {
if (!fs.delete(this.dataDir, true)) { if (!fs.delete(this.dataDir, true)) {