HBASE-2587 Coral where tests write data when running and make sure clean target removes all written

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@946874 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-05-21 05:17:20 +00:00
parent 897e903d90
commit cb03dcca73
8 changed files with 153 additions and 105 deletions

View File

@ -623,6 +623,8 @@ Release 0.21.0 - Unreleased
pulled from http://people.apache.org/~rawson/repo/....
HBASE-2534 Recursive deletes and misc improvements to ZKW
HBASE-2577 Remove 'core' maven module; move core up a level
HBASE-2587 Coral where tests write data when running and make sure clean
target removes all written
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -227,6 +227,15 @@
<plugin>
<artifactId>maven-clean-plugin</artifactId>
<version>2.4</version>
<configuration>
<filesets>
<fileset>
<!--dfs tests have build dir hardcoded. Clean it as part of
clean target-->
<directory>build</directory>
</fileset>
</filesets>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.rat</groupId>

View File

@ -48,20 +48,21 @@ import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWrapper;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.zookeeper.ZooKeeper;
/**
* Facility for testing HBase. Added as tool to abet junit4 testing. Replaces
* old HBaseTestCase and HBaseCluserTestCase functionality.
* Create an instance and keep it around doing HBase testing. This class is
* meant to be your one-stop shop for anything you mind need testing. Manages
* one cluster at a time only. Depends on log4j on classpath and hbase-site.xml
* for logging and test-run configuration. It does not set logging levels nor
* make changes to configuration parameters.
* meant to be your one-stop shop for anything you might need testing. Manages
* one cluster at a time only. Depends on log4j being on classpath and
* hbase-site.xml for logging and test-run configuration. It does not set
* logging levels nor make changes to configuration parameters.
*/
public class HBaseTestingUtility {
private final Log LOG = LogFactory.getLog(getClass());
private final Configuration conf;
private MiniZooKeeperCluster zkCluster = null;
private MiniDFSCluster dfsCluster = null;
@ -71,6 +72,23 @@ public class HBaseTestingUtility {
private File clusterTestBuildDir = null;
private HBaseAdmin hbaseAdmin = null;
// Cache this. For some reason only works first time I get it. TODO: Figure
// out why.
private final static UserGroupInformation UGI;
static {
UGI = UserGroupInformation.getCurrentUGI();
}
/**
* System property key to get test directory value.
*/
public static final String TEST_DIRECTORY_KEY = "test.build.data";
/**
* Default parent directory for test output.
*/
public static final String DEFAULT_TEST_DIRECTORY = "target/build/data";
public HBaseTestingUtility() {
this(HBaseConfiguration.create());
}
@ -79,10 +97,6 @@ public class HBaseTestingUtility {
this.conf = conf;
}
/** System property key to get test directory value.
*/
public static final String TEST_DIRECTORY_KEY = "test.build.data";
/**
* @return Instance of Configuration.
*/
@ -91,27 +105,38 @@ public class HBaseTestingUtility {
}
/**
* @return Where to write test data on local filesystem; usually build/test/data
* @return Where to write test data on local filesystem; usually
* {@link #DEFAULT_TEST_DIRECTORY}
* @see #setupClusterTestBuildDir()
*/
public static Path getTestDir() {
return new Path(System.getProperty(TEST_DIRECTORY_KEY, "target/test/data"));
return new Path(System.getProperty(TEST_DIRECTORY_KEY,
DEFAULT_TEST_DIRECTORY));
}
/**
* Home our cluster in a dir under build/test. Give it a random name
* @param subdirName
* @return Path to a subdirectory named <code>subdirName</code> under
* {@link #getTestDir()}.
* @see #setupClusterTestBuildDir()
*/
public static Path getTestDir(final String subdirName) {
return new Path(getTestDir(), subdirName);
}
/**
* Home our cluster in a dir under target/test. Give it a random name
* so can have many concurrent clusters running if we need to. Need to
* amend the test.build.data System property. Its what minidfscluster bases
* it data dir on. Moding a System property is not the way to do concurrent
* instances -- another instance could grab the temporary
* value unintentionally -- but not anything can do about it at moment; its
* how the minidfscluster works.
* value unintentionally -- but not anything can do about it at moment;
* single instance only is how the minidfscluster works.
* @return The calculated cluster test build directory.
*/
File setupClusterTestBuildDir() {
String oldTestBuildDir =
System.getProperty(TEST_DIRECTORY_KEY, "build/test/data");
String randomStr = UUID.randomUUID().toString();
String dirStr = oldTestBuildDir + "." + randomStr;
String dirStr = getTestDir(randomStr).toString();
File dir = new File(dirStr).getAbsoluteFile();
// Have it cleaned up on exit
dir.deleteOnExit();
@ -119,7 +144,7 @@ public class HBaseTestingUtility {
}
/**
* @throws IOException If cluster already running.
* @throws IOException If a cluster -- zk, dfs, or hbase -- already running.
*/
void isRunningCluster() throws IOException {
if (this.clusterTestBuildDir == null) return;
@ -128,20 +153,50 @@ public class HBaseTestingUtility {
}
/**
* @param subdirName
* @return Path to a subdirectory named <code>subdirName</code> under
* {@link #getTestDir()}.
* Start a minidfscluster.
* @param servers How many DNs to start.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @return The mini dfs cluster created.
*/
public Path getTestDir(final String subdirName) {
return new Path(getTestDir(), subdirName);
public MiniDFSCluster startMiniDFSCluster(int servers) throws Exception {
return startMiniDFSCluster(servers, null);
}
/**
* Start up a minicluster of hbase, dfs, and zookeeper.
* Start a minidfscluster.
* Can only create one.
* @param dir Where to home your dfs cluster.
* @param servers How many DNs to start.
* @throws Exception
* @see {@link #shutdownMiniDFSCluster()}
* @return The mini dfs cluster created.
*/
public MiniDFSCluster startMiniDFSCluster(int servers, final File dir)
throws Exception {
// This does the following to home the minidfscluster
// base_dir = new File(System.getProperty("test.build.data", "build/test/data"), "dfs/");
// Some tests also do this:
// System.getProperty("test.cache.data", "build/test/cache");
if (dir == null) this.clusterTestBuildDir = setupClusterTestBuildDir();
else this.clusterTestBuildDir = dir;
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.toString());
System.setProperty("test.cache.data", this.clusterTestBuildDir.toString());
this.dfsCluster = new MiniDFSCluster(12345, this.conf, servers, true,
true, true, null, null, null, null);
return this.dfsCluster;
}
/**
* Shuts down instance created by call to {@link #startMiniDFSCluster(int, File)}
* or does nothing.
* @throws Exception
*/
public void startMiniCluster() throws Exception {
startMiniCluster(1);
public void shutdownMiniDFSCluster() throws Exception {
if (this.dfsCluster != null) {
// The below throws an exception per dn, AsynchronousCloseException.
this.dfsCluster.shutdown();
}
}
/**
@ -149,13 +204,15 @@ public class HBaseTestingUtility {
* @see #startMiniZKCluster() if you want zk + dfs + hbase mini cluster.
* @throws Exception
* @see #shutdownMiniZKCluster()
* @return zk cluster started.
*/
public void startMiniZKCluster() throws Exception {
startMiniZKCluster(setupClusterTestBuildDir());
public MiniZooKeeperCluster startMiniZKCluster() throws Exception {
return startMiniZKCluster(setupClusterTestBuildDir());
}
private void startMiniZKCluster(final File dir) throws Exception {
private MiniZooKeeperCluster startMiniZKCluster(final File dir)
throws Exception {
if (this.zkCluster != null) {
throw new IOException("Cluster already running at " + dir);
}
@ -163,9 +220,12 @@ public class HBaseTestingUtility {
int clientPort = this.zkCluster.startup(dir);
this.conf.set("hbase.zookeeper.property.clientPort",
Integer.toString(clientPort));
return this.zkCluster;
}
/**
* Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)}
* or does nothing.
* @throws IOException
* @see #startMiniZKCluster()
*/
@ -174,44 +234,50 @@ public class HBaseTestingUtility {
}
/**
* Start up a minicluster of hbase, optinally dfs, and zookeeper.
* Start up a minicluster of hbase, dfs, and zookeeper.
* @throws Exception
* @return Mini hbase cluster instance created.
* @see {@link #shutdownMiniDFSCluster()}
*/
public MiniHBaseCluster startMiniCluster() throws Exception {
return startMiniCluster(1);
}
/**
* Start up a minicluster of hbase, optionally dfs, and zookeeper.
* Modifies Configuration. Homes the cluster data directory under a random
* subdirectory in a directory under System property test.build.data.
* Directory is cleaned up on exit.
* @param servers Number of servers to start up. We'll start this many
* datanodes and regionservers. If servers is > 1, then make sure
* hbase.regionserver.info.port is -1 (i.e. no ui per regionserver) otherwise
* bind errors.
* @throws Exception
* @see {@link #shutdownMiniCluster()}
* @return Mini hbase cluster instance created.
*/
public void startMiniCluster(final int servers)
public MiniHBaseCluster startMiniCluster(final int servers)
throws Exception {
LOG.info("Starting up minicluster");
// If we already put up a cluster, fail.
isRunningCluster();
String oldBuildTestDir =
System.getProperty(TEST_DIRECTORY_KEY, "build/test/data");
// Make a new random dir to home everything in. Set it as system property.
// minidfs reads home from system property.
this.clusterTestBuildDir = setupClusterTestBuildDir();
// Set our random dir while minidfscluster is being constructed.
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestBuildDir.getPath());
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
// scheme. TODO: fix.
// Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true,
true, true, null, null, null, null);
// Restore System property. minidfscluster accesses content of
// the TEST_DIRECTORY_KEY to make bad blocks, a feature we are not using,
// but otherwise, just in constructor.
System.setProperty(TEST_DIRECTORY_KEY, oldBuildTestDir);
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
startMiniDFSCluster(servers, this.clusterTestBuildDir);
// Mangle conf so fs parameter points to minidfs we just started up
FileSystem fs = this.dfsCluster.getFileSystem();
this.conf.set("fs.defaultFS", fs.getUri().toString());
// Do old style too just to be safe.
this.conf.set("fs.default.name", fs.getUri().toString());
this.dfsCluster.waitClusterUp();
// It could be created before the cluster
if(this.zkCluster == null) {
// Start up a zk cluster.
if (this.zkCluster == null) {
startMiniZKCluster(this.clusterTestBuildDir);
}
@ -226,6 +292,7 @@ public class HBaseTestingUtility {
ResultScanner s = t.getScanner(new Scan());
while (s.next() != null) continue;
LOG.info("Minicluster is up");
return this.hbaseCluster;
}
/**
@ -502,25 +569,6 @@ public class HBaseTestingUtility {
return rows;
}
/**
* Removes all rows from the .META. in preparation to add custom ones.
*
* @throws IOException When removing the rows fails.
*/
private void emptyMetaTable() throws IOException {
HTable t = new HTable(this.conf, HConstants.META_TABLE_NAME);
ArrayList<Delete> deletes = new ArrayList<Delete>();
ResultScanner s = t.getScanner(new Scan());
for (Result result : s) {
LOG.info("emptyMetaTable: remove row -> " +
Bytes.toStringBinary(result.getRow()));
Delete del = new Delete(result.getRow());
deletes.add(del);
}
s.close();
t.delete(deletes);
}
/**
* Starts a <code>MiniMRCluster</code> with a default number of
* <code>TaskTracker</code>'s.
@ -688,4 +736,23 @@ public class HBaseTestingUtility {
public MiniDFSCluster getDFSCluster() {
return dfsCluster;
}
}
/**
* Create a copy of the passed configuration laden with a new user. Use it
* to do things like get a new FileSystem instance.
* @param c
* @param index Some unique number used to make a unique user.
* @return Copy of <code>c</code> with new user stuff loaded into it.
* @throws IOException
*/
public static Configuration setDifferentUser(final Configuration c,
final int index)
throws IOException {
Configuration c2 = new Configuration(c);
String username = UGI.getUserName() + ".hrs." + index;
UnixUserGroupInformation.saveToConf(c2,
UnixUserGroupInformation.UGI_PROPERTY_NAME,
new UnixUserGroupInformation(username, new String[]{"supergroup"}));
return c2;
}
}

View File

@ -20,7 +20,6 @@
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.net.BindException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -36,10 +35,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
/**
* This class creates a single process HBase cluster.
@ -49,13 +45,6 @@ import org.apache.hadoop.security.UserGroupInformation;
*/
public class MiniHBaseCluster implements HConstants {
static final Log LOG = LogFactory.getLog(MiniHBaseCluster.class.getName());
// Cache this. For some reason only works first time I get it. TODO: Figure
// out why.
private final static UserGroupInformation UGI;
static {
UGI = UserGroupInformation.getCurrentUGI();
}
private Configuration conf;
public LocalHBaseCluster hbaseCluster;
@ -133,28 +122,7 @@ public class MiniHBaseCluster implements HConstants {
public MiniHBaseClusterRegionServer(Configuration conf)
throws IOException {
super(setDifferentUser(conf));
}
/*
* @param c
* @param currentfs We return this if we did not make a new one.
* @param uniqueName Same name used to help identify the created fs.
* @return A new fs instance if we are up on DistributeFileSystem.
* @throws IOException
*/
private static Configuration setDifferentUser(final Configuration c)
throws IOException {
FileSystem currentfs = FileSystem.get(c);
if (!(currentfs instanceof DistributedFileSystem)) return c;
// Else distributed filesystem. Make a new instance per daemon. Below
// code is taken from the AppendTestUtil over in hdfs.
Configuration c2 = new Configuration(c);
String username = UGI.getUserName() + ".hrs." + index++;
UnixUserGroupInformation.saveToConf(c2,
UnixUserGroupInformation.UGI_PROPERTY_NAME,
new UnixUserGroupInformation(username, new String[]{"supergroup"}));
return c2;
super(HBaseTestingUtility.setDifferentUser(conf, index++));
}
@Override

View File

@ -31,13 +31,13 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue.KeyComparator;
import org.apache.hadoop.hbase.io.hfile.HFile.BlockIndex;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.Writable;
/**
@ -52,7 +52,7 @@ public class TestHFile extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestHFile.class);
private static String ROOT_DIR =
System.getProperty("test.build.data", "/tmp/TestHFile");
HBaseTestingUtility.getTestDir("TestHFile").toString();
private final int minBlockSize = 512;
private static String localFormatter = "%010d";

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.compress.CompressionCodec;
@ -45,7 +46,7 @@ import org.apache.hadoop.io.compress.GzipCodec;
*/
public class TestHFilePerformance extends TestCase {
private static String ROOT_DIR =
System.getProperty("test.build.data", "/tmp/TestHFilePerformance");
HBaseTestingUtility.getTestDir("TestHFilePerformance").toString();
private FileSystem fs;
private Configuration conf;
private long startTimeEpoch;

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.io.hfile.HFile.Reader;
import org.apache.hadoop.hbase.io.hfile.HFile.Writer;
import org.apache.hadoop.io.BytesWritable;
@ -235,7 +236,7 @@ public class TestHFileSeek extends TestCase {
int maxWordLen = 20;
String rootDir =
System.getProperty("test.build.data", "/tmp/TestTFileSeek");
HBaseTestingUtility.getTestDir("TestTFileSeek").toString();
String file = "TestTFileSeek";
// String compress = "lzo"; DISABLED
String compress = "none";

View File

@ -27,8 +27,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.Reference.Range;
@ -308,7 +308,7 @@ public class TestStoreFile extends HBaseTestCase {
}
private static String ROOT_DIR =
System.getProperty("test.build.data", "/tmp/TestStoreFile");
HBaseTestingUtility.getTestDir("TestStoreFile").toString();
private static String localFormatter = "%010d";
public void testBloomFilter() throws Exception {