HBASE-5747 Forward port "hbase-5708 [89-fb] Make MiniMapRedCluster directory a subdirectory of target/test"
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1326000 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4d04f85308
commit
b987a52e36
|
@ -103,6 +103,10 @@ public final class HConstants {
|
||||||
/** by default every master is a possible primary master unless the conf explicitly overrides it */
|
/** by default every master is a possible primary master unless the conf explicitly overrides it */
|
||||||
public static final boolean DEFAULT_MASTER_TYPE_BACKUP = false;
|
public static final boolean DEFAULT_MASTER_TYPE_BACKUP = false;
|
||||||
|
|
||||||
|
/** Parameter name for ZooKeeper session time out.*/
|
||||||
|
public static final String ZOOKEEPER_SESSION_TIMEOUT =
|
||||||
|
"zookeeper.session.timeout";
|
||||||
|
|
||||||
/** Name of ZooKeeper quorum configuration parameter. */
|
/** Name of ZooKeeper quorum configuration parameter. */
|
||||||
public static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum";
|
public static final String ZOOKEEPER_QUORUM = "hbase.zookeeper.quorum";
|
||||||
|
|
||||||
|
@ -655,6 +659,8 @@ public final class HConstants {
|
||||||
/** Region in Transition metrics threshold time */
|
/** Region in Transition metrics threshold time */
|
||||||
public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD="hbase.metrics.rit.stuck.warning.threshold";
|
public static final String METRICS_RIT_STUCK_WARNING_THRESHOLD="hbase.metrics.rit.stuck.warning.threshold";
|
||||||
|
|
||||||
|
public static final String LOAD_BALANCER_SLOP_KEY = "hbase.regions.slop";
|
||||||
|
|
||||||
private HConstants() {
|
private HConstants() {
|
||||||
// Can't be instantiated with this ctor.
|
// Can't be instantiated with this ctor.
|
||||||
}
|
}
|
||||||
|
|
|
@ -1004,6 +1004,10 @@ public class SplitLogManager extends ZooKeeperListener {
|
||||||
Stat stat) {
|
Stat stat) {
|
||||||
tot_mgr_get_data_result.incrementAndGet();
|
tot_mgr_get_data_result.incrementAndGet();
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
|
if (rc == KeeperException.Code.SESSIONEXPIRED.intValue()) {
|
||||||
|
LOG.error("ZK session expired. Master is expected to shut down. Abandoning retries.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (rc == KeeperException.Code.NONODE.intValue()) {
|
if (rc == KeeperException.Code.NONODE.intValue()) {
|
||||||
tot_mgr_get_data_nonode.incrementAndGet();
|
tot_mgr_get_data_nonode.incrementAndGet();
|
||||||
// The task znode has been deleted. Must be some pending delete
|
// The task znode has been deleted. Must be some pending delete
|
||||||
|
@ -1085,6 +1089,10 @@ public class SplitLogManager extends ZooKeeperListener {
|
||||||
@Override
|
@Override
|
||||||
public void processResult(int rc, String path, Object ctx, String name) {
|
public void processResult(int rc, String path, Object ctx, String name) {
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
|
if (rc == KeeperException.Code.SESSIONEXPIRED.intValue()) {
|
||||||
|
LOG.error("ZK session expired. Master is expected to shut down. Abandoning retries.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
Long retry_count = (Long)ctx;
|
Long retry_count = (Long)ctx;
|
||||||
LOG.warn("rc=" + KeeperException.Code.get(rc) + " for "+ path +
|
LOG.warn("rc=" + KeeperException.Code.get(rc) + " for "+ path +
|
||||||
" remaining retries=" + retry_count);
|
" remaining retries=" + retry_count);
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase;
|
package org.apache.hadoop.hbase;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
@ -54,16 +53,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
public abstract class HBaseTestCase extends TestCase {
|
public abstract class HBaseTestCase extends TestCase {
|
||||||
private static final Log LOG = LogFactory.getLog(HBaseTestCase.class);
|
private static final Log LOG = LogFactory.getLog(HBaseTestCase.class);
|
||||||
|
|
||||||
/** configuration parameter name for test directory
|
|
||||||
* @deprecated see HBaseTestingUtility#TEST_DIRECTORY_KEY
|
|
||||||
**/
|
|
||||||
private static final String TEST_DIRECTORY_KEY = "test.build.data";
|
|
||||||
|
|
||||||
/*
|
|
||||||
protected final static byte [] fam1 = Bytes.toBytes("colfamily1");
|
|
||||||
protected final static byte [] fam2 = Bytes.toBytes("colfamily2");
|
|
||||||
protected final static byte [] fam3 = Bytes.toBytes("colfamily3");
|
|
||||||
*/
|
|
||||||
protected final static byte [] fam1 = Bytes.toBytes("colfamily11");
|
protected final static byte [] fam1 = Bytes.toBytes("colfamily11");
|
||||||
protected final static byte [] fam2 = Bytes.toBytes("colfamily21");
|
protected final static byte [] fam2 = Bytes.toBytes("colfamily21");
|
||||||
protected final static byte [] fam3 = Bytes.toBytes("colfamily31");
|
protected final static byte [] fam3 = Bytes.toBytes("colfamily31");
|
||||||
|
@ -82,9 +71,7 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
protected String START_KEY;
|
protected String START_KEY;
|
||||||
protected static final int MAXVERSIONS = 3;
|
protected static final int MAXVERSIONS = 3;
|
||||||
|
|
||||||
static {
|
protected final HBaseTestingUtility testUtil = new HBaseTestingUtility();
|
||||||
initialize();
|
|
||||||
}
|
|
||||||
|
|
||||||
public volatile Configuration conf;
|
public volatile Configuration conf;
|
||||||
|
|
||||||
|
@ -161,19 +148,12 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
* @return directory to use for this test
|
* @return directory to use for this test
|
||||||
*/
|
*/
|
||||||
protected Path getUnitTestdir(String testName) {
|
protected Path getUnitTestdir(String testName) {
|
||||||
return new Path(
|
return testUtil.getDataTestDir(testName);
|
||||||
System.getProperty(
|
|
||||||
HBaseTestingUtility.BASE_TEST_DIRECTORY_KEY,
|
|
||||||
HBaseTestingUtility.DEFAULT_BASE_TEST_DIRECTORY
|
|
||||||
),
|
|
||||||
testName
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
|
protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
|
||||||
byte [] endKey)
|
byte [] endKey)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
FileSystem filesystem = FileSystem.get(conf);
|
|
||||||
HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
|
HRegionInfo hri = new HRegionInfo(desc.getName(), startKey, endKey);
|
||||||
return HRegion.createHRegion(hri, testDir, conf, desc);
|
return HRegion.createHRegion(hri, testDir, conf, desc);
|
||||||
}
|
}
|
||||||
|
@ -628,21 +608,6 @@ public abstract class HBaseTestCase extends TestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Initializes parameters used in the test environment:
|
|
||||||
*
|
|
||||||
* Sets the configuration parameter TEST_DIRECTORY_KEY if not already set.
|
|
||||||
* Sets the boolean debugging if "DEBUGGING" is set in the environment.
|
|
||||||
* If debugging is enabled, reconfigures logging so that the root log level is
|
|
||||||
* set to WARN and the logging level for the package is set to DEBUG.
|
|
||||||
*/
|
|
||||||
public static void initialize() {
|
|
||||||
if (System.getProperty(TEST_DIRECTORY_KEY) == null) {
|
|
||||||
System.setProperty(TEST_DIRECTORY_KEY, new File(
|
|
||||||
"build/hbase/test").getAbsolutePath());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Common method to close down a MiniDFSCluster and the associated file system
|
* Common method to close down a MiniDFSCluster and the associated file system
|
||||||
*
|
*
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.lang.reflect.Field;
|
import java.lang.reflect.Field;
|
||||||
|
import java.lang.reflect.Modifier;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.ServerSocket;
|
import java.net.ServerSocket;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
@ -84,8 +85,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.mapred.JobConf;
|
|
||||||
import org.apache.hadoop.mapred.MiniMRCluster;
|
import org.apache.hadoop.mapred.MiniMRCluster;
|
||||||
|
import org.apache.hadoop.mapred.TaskLog;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
import org.apache.zookeeper.KeeperException.NodeExistsException;
|
import org.apache.zookeeper.KeeperException.NodeExistsException;
|
||||||
import org.apache.zookeeper.WatchedEvent;
|
import org.apache.zookeeper.WatchedEvent;
|
||||||
|
@ -124,11 +125,15 @@ public class HBaseTestingUtility {
|
||||||
private MiniHBaseCluster hbaseCluster = null;
|
private MiniHBaseCluster hbaseCluster = null;
|
||||||
private MiniMRCluster mrCluster = null;
|
private MiniMRCluster mrCluster = null;
|
||||||
|
|
||||||
// Directory where we put the data for this instance of HBaseTestingUtility
|
/** If there is a mini cluster running for this testing utility instance. */
|
||||||
|
private boolean miniClusterRunning;
|
||||||
|
|
||||||
|
private String hadoopLogDir;
|
||||||
|
|
||||||
|
// Directory where we put the data for this instance of HBaseTestingUtility.
|
||||||
private File dataTestDir = null;
|
private File dataTestDir = null;
|
||||||
|
|
||||||
// Directory (usually a subdirectory of dataTestDir) used by the dfs cluster
|
// Directory (a subdirectory of dataTestDir) used by the dfs cluster if any
|
||||||
// if any
|
|
||||||
private File clusterTestDir = null;
|
private File clusterTestDir = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -151,6 +156,9 @@ public class HBaseTestingUtility {
|
||||||
*/
|
*/
|
||||||
public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
|
public static final String DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
|
||||||
|
|
||||||
|
/** Filesystem URI used for map-reduce mini-cluster setup */
|
||||||
|
private static String FS_URI;
|
||||||
|
|
||||||
/** Compression algorithms to use in parameterized JUnit 4 tests */
|
/** Compression algorithms to use in parameterized JUnit 4 tests */
|
||||||
public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
|
public static final List<Object[]> COMPRESSION_ALGORITHMS_PARAMETERIZED =
|
||||||
Arrays.asList(new Object[][] {
|
Arrays.asList(new Object[][] {
|
||||||
|
@ -236,24 +244,42 @@ public class HBaseTestingUtility {
|
||||||
* @see #getTestFileSystem()
|
* @see #getTestFileSystem()
|
||||||
*/
|
*/
|
||||||
public Path getDataTestDir() {
|
public Path getDataTestDir() {
|
||||||
if (dataTestDir == null){
|
if (this.dataTestDir == null){
|
||||||
setupDataTestDir();
|
setupDataTestDir();
|
||||||
}
|
}
|
||||||
return new Path(dataTestDir.getAbsolutePath());
|
return new Path(this.dataTestDir.getAbsolutePath());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return Where the DFS cluster will write data on the local subsystem.
|
* @return Where the DFS cluster will write data on the local subsystem.
|
||||||
* Creates it if it does not exist already.
|
* Creates it if it does not exist already. A subdir of {@link #getBaseTestDir()}
|
||||||
* @see #getTestFileSystem()
|
* @see #getTestFileSystem()
|
||||||
*/
|
*/
|
||||||
public Path getClusterTestDir() {
|
Path getClusterTestDir() {
|
||||||
if (clusterTestDir == null){
|
if (clusterTestDir == null){
|
||||||
setupClusterTestDir();
|
setupClusterTestDir();
|
||||||
}
|
}
|
||||||
return new Path(clusterTestDir.getAbsolutePath());
|
return new Path(clusterTestDir.getAbsolutePath());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a directory for the DFS cluster, under the test data
|
||||||
|
*/
|
||||||
|
private void setupClusterTestDir() {
|
||||||
|
if (clusterTestDir != null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Using randomUUID ensures that multiple clusters can be launched by
|
||||||
|
// a same test, if it stops & starts them
|
||||||
|
Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
|
||||||
|
clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
|
||||||
|
// Have it cleaned up on exit
|
||||||
|
clusterTestDir.deleteOnExit();
|
||||||
|
conf.set(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
|
||||||
|
LOG.info("Created new mini-cluster data directory: " + clusterTestDir);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param subdirName
|
* @param subdirName
|
||||||
* @return Path to a subdirectory named <code>subdirName</code> under
|
* @return Path to a subdirectory named <code>subdirName</code> under
|
||||||
|
@ -281,7 +307,7 @@ public class HBaseTestingUtility {
|
||||||
* @return The calculated data test build directory.
|
* @return The calculated data test build directory.
|
||||||
*/
|
*/
|
||||||
private void setupDataTestDir() {
|
private void setupDataTestDir() {
|
||||||
if (dataTestDir != null) {
|
if (this.dataTestDir != null) {
|
||||||
LOG.warn("Data test dir already setup in " +
|
LOG.warn("Data test dir already setup in " +
|
||||||
dataTestDir.getAbsolutePath());
|
dataTestDir.getAbsolutePath());
|
||||||
return;
|
return;
|
||||||
|
@ -290,8 +316,8 @@ public class HBaseTestingUtility {
|
||||||
String randomStr = UUID.randomUUID().toString();
|
String randomStr = UUID.randomUUID().toString();
|
||||||
Path testPath= new Path(getBaseTestDir(), randomStr);
|
Path testPath= new Path(getBaseTestDir(), randomStr);
|
||||||
|
|
||||||
dataTestDir = new File(testPath.toString()).getAbsoluteFile();
|
this.dataTestDir = new File(testPath.toString()).getAbsoluteFile();
|
||||||
dataTestDir.deleteOnExit();
|
this.dataTestDir.deleteOnExit();
|
||||||
|
|
||||||
createSubDirAndSystemProperty(
|
createSubDirAndSystemProperty(
|
||||||
"hadoop.log.dir",
|
"hadoop.log.dir",
|
||||||
|
@ -329,7 +355,7 @@ public class HBaseTestingUtility {
|
||||||
// There is already a value set. So we do nothing but hope
|
// There is already a value set. So we do nothing but hope
|
||||||
// that there will be no conflicts
|
// that there will be no conflicts
|
||||||
LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
|
LOG.info("System.getProperty(\""+propertyName+"\") already set to: "+
|
||||||
sysValue + " so I do NOT create it in "+dataTestDir.getAbsolutePath());
|
sysValue + " so I do NOT create it in " + this.dataTestDir.getAbsolutePath());
|
||||||
String confValue = conf.get(propertyName);
|
String confValue = conf.get(propertyName);
|
||||||
if (confValue != null && !confValue.endsWith(sysValue)){
|
if (confValue != null && !confValue.endsWith(sysValue)){
|
||||||
LOG.warn(
|
LOG.warn(
|
||||||
|
@ -346,33 +372,6 @@ public class HBaseTestingUtility {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a directory for the DFS cluster, under the test data
|
|
||||||
*/
|
|
||||||
private void setupClusterTestDir() {
|
|
||||||
if (clusterTestDir != null) {
|
|
||||||
LOG.warn("Cluster test dir already setup in " +
|
|
||||||
clusterTestDir.getAbsolutePath());
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Using randomUUID ensures that multiple clusters can be launched by
|
|
||||||
// a same test, if it stops & starts them
|
|
||||||
Path testDir = getDataTestDir("dfscluster_" + UUID.randomUUID().toString());
|
|
||||||
clusterTestDir = new File(testDir.toString()).getAbsoluteFile();
|
|
||||||
// Have it cleaned up on exit
|
|
||||||
clusterTestDir.deleteOnExit();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @throws IOException If a cluster -- zk, dfs, or hbase -- already running.
|
|
||||||
*/
|
|
||||||
public void isRunningCluster() throws IOException {
|
|
||||||
if (dfsCluster == null) return;
|
|
||||||
throw new IOException("Cluster already running at " +
|
|
||||||
this.clusterTestDir);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Start a minidfscluster.
|
* Start a minidfscluster.
|
||||||
* @param servers How many DNs to start.
|
* @param servers How many DNs to start.
|
||||||
|
@ -415,24 +414,7 @@ public class HBaseTestingUtility {
|
||||||
*/
|
*/
|
||||||
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
|
public MiniDFSCluster startMiniDFSCluster(int servers, final String hosts[])
|
||||||
throws Exception {
|
throws Exception {
|
||||||
|
createDirsAndSetProperties();
|
||||||
// Check that there is not already a cluster running
|
|
||||||
isRunningCluster();
|
|
||||||
|
|
||||||
// Initialize the local directory used by the MiniDFS
|
|
||||||
if (clusterTestDir == null) {
|
|
||||||
setupClusterTestDir();
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have to set this property as it is used by MiniCluster
|
|
||||||
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString());
|
|
||||||
|
|
||||||
// Some tests also do this:
|
|
||||||
// System.getProperty("test.cache.data", "build/test/cache");
|
|
||||||
// It's also deprecated
|
|
||||||
System.setProperty("test.cache.data", this.clusterTestDir.toString());
|
|
||||||
|
|
||||||
// Ok, now we can start
|
|
||||||
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
|
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
|
||||||
true, null, null, hosts, null);
|
true, null, null, hosts, null);
|
||||||
|
|
||||||
|
@ -448,18 +430,46 @@ public class HBaseTestingUtility {
|
||||||
return this.dfsCluster;
|
return this.dfsCluster;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public MiniDFSCluster startMiniDFSClusterForTestHLog(int namenodePort) throws IOException {
|
||||||
|
createDirsAndSetProperties();
|
||||||
|
dfsCluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null,
|
||||||
|
null, null, null);
|
||||||
|
return dfsCluster;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** This is used before starting HDFS and map-reduce mini-clusters */
|
||||||
|
private void createDirsAndSetProperties() {
|
||||||
|
setupClusterTestDir();
|
||||||
|
System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath());
|
||||||
|
createDirAndSetProperty("cache_data", "test.cache.data");
|
||||||
|
createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir");
|
||||||
|
hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir");
|
||||||
|
createDirAndSetProperty("mapred_output", "mapred.output.dir");
|
||||||
|
createDirAndSetProperty("mapred_local", "mapred.local.dir");
|
||||||
|
createDirAndSetProperty("mapred_system", "mapred.system.dir");
|
||||||
|
createDirAndSetProperty("mapred_temp", "mapred.temp.dir");
|
||||||
|
}
|
||||||
|
|
||||||
|
private String createDirAndSetProperty(final String relPath, String property) {
|
||||||
|
String path = clusterTestDir.getPath() + "/" + relPath;
|
||||||
|
System.setProperty(property, path);
|
||||||
|
conf.set(property, path);
|
||||||
|
new File(path).mkdirs();
|
||||||
|
LOG.info("Setting " + property + " to " + path + " in system properties and HBase conf");
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
|
* Shuts down instance created by call to {@link #startMiniDFSCluster(int)}
|
||||||
* or does nothing.
|
* or does nothing.
|
||||||
* @throws Exception
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void shutdownMiniDFSCluster() throws Exception {
|
public void shutdownMiniDFSCluster() throws IOException {
|
||||||
if (this.dfsCluster != null) {
|
if (this.dfsCluster != null) {
|
||||||
// The below throws an exception per dn, AsynchronousCloseException.
|
// The below throws an exception per dn, AsynchronousCloseException.
|
||||||
this.dfsCluster.shutdown();
|
this.dfsCluster.shutdown();
|
||||||
dfsCluster = null;
|
dfsCluster = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -483,8 +493,8 @@ public class HBaseTestingUtility {
|
||||||
*/
|
*/
|
||||||
public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
|
public MiniZooKeeperCluster startMiniZKCluster(int zooKeeperServerNum)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
File zkClusterFile = new File(getClusterTestDir().toString());
|
setupClusterTestDir();
|
||||||
return startMiniZKCluster(zkClusterFile, zooKeeperServerNum);
|
return startMiniZKCluster(clusterTestDir, zooKeeperServerNum);
|
||||||
}
|
}
|
||||||
|
|
||||||
private MiniZooKeeperCluster startMiniZKCluster(final File dir)
|
private MiniZooKeeperCluster startMiniZKCluster(final File dir)
|
||||||
|
@ -597,7 +607,13 @@ public class HBaseTestingUtility {
|
||||||
numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
|
numSlaves + " regionserver(s) and " + numDataNodes + " datanode(s)");
|
||||||
|
|
||||||
// If we already put up a cluster, fail.
|
// If we already put up a cluster, fail.
|
||||||
isRunningCluster();
|
if (miniClusterRunning) {
|
||||||
|
throw new IllegalStateException("A mini-cluster is already running");
|
||||||
|
}
|
||||||
|
miniClusterRunning = true;
|
||||||
|
|
||||||
|
setupClusterTestDir();
|
||||||
|
System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.getPath());
|
||||||
|
|
||||||
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
|
// Bring up mini dfs cluster. This spews a bunch of warnings about missing
|
||||||
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
|
// scheme. Complaints are 'Scheme is undefined for build/test/data/dfs/name1'.
|
||||||
|
@ -691,15 +707,8 @@ public class HBaseTestingUtility {
|
||||||
}
|
}
|
||||||
shutdownMiniDFSCluster();
|
shutdownMiniDFSCluster();
|
||||||
|
|
||||||
// Clean up our directory.
|
cleanupTestDir();
|
||||||
if (this.clusterTestDir != null && this.clusterTestDir.exists()) {
|
miniClusterRunning = false;
|
||||||
// Need to use deleteDirectory because File.delete required dir is empty.
|
|
||||||
if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
|
|
||||||
new Path(this.clusterTestDir.toString()))) {
|
|
||||||
LOG.warn("Failed delete of " + this.clusterTestDir.toString());
|
|
||||||
}
|
|
||||||
this.clusterTestDir = null;
|
|
||||||
}
|
|
||||||
LOG.info("Minicluster is down");
|
LOG.info("Minicluster is down");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1265,26 +1274,57 @@ public class HBaseTestingUtility {
|
||||||
*
|
*
|
||||||
* @throws IOException When starting the cluster fails.
|
* @throws IOException When starting the cluster fails.
|
||||||
*/
|
*/
|
||||||
public void startMiniMapReduceCluster() throws IOException {
|
public MiniMRCluster startMiniMapReduceCluster() throws IOException {
|
||||||
startMiniMapReduceCluster(2);
|
startMiniMapReduceCluster(2);
|
||||||
|
return mrCluster;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Starts a <code>MiniMRCluster</code>.
|
* Tasktracker has a bug where changing the hadoop.log.dir system property
|
||||||
*
|
* will not change its internal static LOG_DIR variable.
|
||||||
|
*/
|
||||||
|
private void forceChangeTaskLogDir() {
|
||||||
|
Field logDirField;
|
||||||
|
try {
|
||||||
|
logDirField = TaskLog.class.getDeclaredField("LOG_DIR");
|
||||||
|
logDirField.setAccessible(true);
|
||||||
|
|
||||||
|
Field modifiersField = Field.class.getDeclaredField("modifiers");
|
||||||
|
modifiersField.setAccessible(true);
|
||||||
|
modifiersField.setInt(logDirField, logDirField.getModifiers() & ~Modifier.FINAL);
|
||||||
|
|
||||||
|
logDirField.set(null, new File(hadoopLogDir, "userlogs"));
|
||||||
|
} catch (SecurityException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
} catch (NoSuchFieldException e) {
|
||||||
|
// TODO Auto-generated catch block
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
} catch (IllegalAccessException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts a <code>MiniMRCluster</code>. Call {@link #setFileSystemURI(String)} to use a different
|
||||||
|
* filesystem.
|
||||||
* @param servers The number of <code>TaskTracker</code>'s to start.
|
* @param servers The number of <code>TaskTracker</code>'s to start.
|
||||||
* @throws IOException When starting the cluster fails.
|
* @throws IOException When starting the cluster fails.
|
||||||
*/
|
*/
|
||||||
public void startMiniMapReduceCluster(final int servers) throws IOException {
|
private void startMiniMapReduceCluster(final int servers) throws IOException {
|
||||||
LOG.info("Starting mini mapreduce cluster...");
|
if (mrCluster != null) {
|
||||||
if (dataTestDir == null) {
|
throw new IllegalStateException("MiniMRCluster is already running");
|
||||||
setupDataTestDir();
|
|
||||||
}
|
}
|
||||||
|
LOG.info("Starting mini mapreduce cluster...");
|
||||||
|
setupClusterTestDir();
|
||||||
|
createDirsAndSetProperties();
|
||||||
|
|
||||||
// These are needed for the new and improved Map/Reduce framework
|
forceChangeTaskLogDir();
|
||||||
conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
|
|
||||||
mrCluster = new MiniMRCluster(0, 0, servers,
|
// Allow the user to override FS URI for this map-reduce cluster to use.
|
||||||
FileSystem.get(conf).getUri().toString(), 1, null, null, null, new JobConf(conf));
|
mrCluster = new MiniMRCluster(servers,
|
||||||
|
FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1);
|
||||||
mrCluster.getJobTrackerRunner().getJobTracker().getConf().set("mapred.local.dir",
|
mrCluster.getJobTrackerRunner().getJobTracker().getConf().set("mapred.local.dir",
|
||||||
conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
|
conf.get("mapred.local.dir")); //Hadoop MiniMR overwrites this while it should not
|
||||||
LOG.info("Mini mapreduce cluster started");
|
LOG.info("Mini mapreduce cluster started");
|
||||||
|
@ -1524,17 +1564,15 @@ public class HBaseTestingUtility {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return True if we removed the test dir
|
* @return True if we removed the test dirs
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public boolean cleanupTestDir() throws IOException {
|
boolean cleanupTestDir() throws IOException {
|
||||||
if (dataTestDir == null ){
|
if (deleteDir(this.dataTestDir)) {
|
||||||
return false;
|
this.dataTestDir = null;
|
||||||
} else {
|
return true;
|
||||||
boolean ret = deleteDir(getDataTestDir());
|
|
||||||
dataTestDir = null;
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1542,11 +1580,11 @@ public class HBaseTestingUtility {
|
||||||
* @return True if we removed the test dir
|
* @return True if we removed the test dir
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public boolean cleanupTestDir(final String subdir) throws IOException {
|
boolean cleanupTestDir(final String subdir) throws IOException {
|
||||||
if (dataTestDir == null){
|
if (this.dataTestDir == null){
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return deleteDir(getDataTestDir(subdir));
|
return deleteDir(new File(this.dataTestDir, subdir));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1554,10 +1592,15 @@ public class HBaseTestingUtility {
|
||||||
* @return True if we deleted it.
|
* @return True if we deleted it.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public boolean deleteDir(final Path dir) throws IOException {
|
boolean deleteDir(final File dir) throws IOException {
|
||||||
FileSystem fs = getTestFileSystem();
|
if (dir != null && dir.exists()) {
|
||||||
if (fs.exists(dir)) {
|
// Need to use deleteDirectory because File.delete required dir is empty.
|
||||||
return fs.delete(getDataTestDir(), true);
|
if (!FSUtils.deleteDirectory(FileSystem.getLocal(this.conf),
|
||||||
|
new Path(dir.getAbsolutePath()))) {
|
||||||
|
LOG.warn("Failed delete of " + dir.toString());
|
||||||
|
} else {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -2037,4 +2080,7 @@ public class HBaseTestingUtility {
|
||||||
return region;
|
return region;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setFileSystemURI(String fsURI) {
|
||||||
|
FS_URI = fsURI;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,6 +58,7 @@ public class TestFullLogReconstruction {
|
||||||
// faster failover with cluster.shutdown();fs.close() idiom
|
// faster failover with cluster.shutdown();fs.close() idiom
|
||||||
c.setInt("ipc.client.connect.max.retries", 1);
|
c.setInt("ipc.client.connect.max.retries", 1);
|
||||||
c.setInt("dfs.client.block.recovery.retries", 1);
|
c.setInt("dfs.client.block.recovery.retries", 1);
|
||||||
|
c.setInt(HConstants.ZOOKEEPER_SESSION_TIMEOUT, 1000);
|
||||||
TEST_UTIL.startMiniCluster(2);
|
TEST_UTIL.startMiniCluster(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -129,4 +130,3 @@ public class TestFullLogReconstruction {
|
||||||
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
|
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
|
||||||
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
|
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,9 +24,6 @@ import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
@ -38,10 +35,6 @@ import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
|
import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.junit.After;
|
|
||||||
import org.junit.AfterClass;
|
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.BeforeClass;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
@ -194,7 +187,7 @@ public class TestHBaseTestingUtility {
|
||||||
|
|
||||||
@Test public void testMiniDFSCluster() throws Exception {
|
@Test public void testMiniDFSCluster() throws Exception {
|
||||||
HBaseTestingUtility hbt = new HBaseTestingUtility();
|
HBaseTestingUtility hbt = new HBaseTestingUtility();
|
||||||
MiniDFSCluster cluster = hbt.startMiniDFSCluster(1);
|
MiniDFSCluster cluster = hbt.startMiniDFSCluster(null);
|
||||||
FileSystem dfs = cluster.getFileSystem();
|
FileSystem dfs = cluster.getFileSystem();
|
||||||
Path dir = new Path("dir");
|
Path dir = new Path("dir");
|
||||||
Path qualifiedDir = dfs.makeQualified(dir);
|
Path qualifiedDir = dfs.makeQualified(dir);
|
||||||
|
@ -213,12 +206,11 @@ public class TestHBaseTestingUtility {
|
||||||
|
|
||||||
assertFalse(fs.exists(testdir));
|
assertFalse(fs.exists(testdir));
|
||||||
|
|
||||||
hbt.startMiniDFSCluster(1);
|
hbt.startMiniDFSCluster(null);
|
||||||
assertTrue(fs.exists(testdir));
|
assertTrue(fs.exists(testdir));
|
||||||
|
|
||||||
hbt.shutdownMiniCluster();
|
hbt.shutdownMiniCluster();
|
||||||
assertFalse(fs.exists(testdir));
|
assertFalse(fs.exists(testdir));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testTestDir() throws Exception {
|
@Test public void testTestDir() throws Exception {
|
||||||
|
|
|
@ -68,8 +68,10 @@ public class TestZooKeeper {
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
// Test we can first start the ZK cluster by itself
|
// Test we can first start the ZK cluster by itself
|
||||||
|
Configuration conf = TEST_UTIL.getConfiguration();
|
||||||
TEST_UTIL.startMiniZKCluster();
|
TEST_UTIL.startMiniZKCluster();
|
||||||
TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
|
conf.setBoolean("dfs.support.append", true);
|
||||||
|
conf.setInt(HConstants.ZOOKEEPER_SESSION_TIMEOUT, 1000);
|
||||||
TEST_UTIL.startMiniCluster(2);
|
TEST_UTIL.startMiniCluster(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,6 +176,8 @@ public class TestClassLoading {
|
||||||
String classpath =
|
String classpath =
|
||||||
currentDir + Path.SEPARATOR + "target"+ Path.SEPARATOR + "classes" +
|
currentDir + Path.SEPARATOR + "target"+ Path.SEPARATOR + "classes" +
|
||||||
System.getProperty("path.separator") +
|
System.getProperty("path.separator") +
|
||||||
|
// Note that the below trick only works if mvn is running the test;
|
||||||
|
// doesn't work in eclipse for example.
|
||||||
System.getProperty("surefire.test.class.path");
|
System.getProperty("surefire.test.class.path");
|
||||||
options.add(classpath);
|
options.add(classpath);
|
||||||
LOG.debug("Setting classpath to: "+classpath);
|
LOG.debug("Setting classpath to: "+classpath);
|
||||||
|
|
|
@ -67,7 +67,6 @@ public class TestCoprocessorEndpoint {
|
||||||
private static byte[][] ROWS = makeN(ROW, ROWSIZE);
|
private static byte[][] ROWS = makeN(ROW, ROWSIZE);
|
||||||
|
|
||||||
private static HBaseTestingUtility util = new HBaseTestingUtility();
|
private static HBaseTestingUtility util = new HBaseTestingUtility();
|
||||||
private static MiniHBaseCluster cluster = null;
|
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setupBeforeClass() throws Exception {
|
public static void setupBeforeClass() throws Exception {
|
||||||
|
@ -76,10 +75,7 @@ public class TestCoprocessorEndpoint {
|
||||||
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
|
conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
|
||||||
"org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint",
|
"org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint",
|
||||||
"org.apache.hadoop.hbase.coprocessor.GenericEndpoint");
|
"org.apache.hadoop.hbase.coprocessor.GenericEndpoint");
|
||||||
|
|
||||||
util.startMiniCluster(2);
|
util.startMiniCluster(2);
|
||||||
cluster = util.getMiniHBaseCluster();
|
|
||||||
|
|
||||||
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
|
HTable table = util.createTable(TEST_TABLE, TEST_FAMILY);
|
||||||
util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY,
|
util.createMultiRegions(util.getConfiguration(), table, TEST_FAMILY,
|
||||||
new byte[][] { HConstants.EMPTY_BYTE_ARRAY,
|
new byte[][] { HConstants.EMPTY_BYTE_ARRAY,
|
||||||
|
|
|
@ -130,8 +130,7 @@ public class TestFixedFileTrailer {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now check what happens if the trailer is corrupted.
|
// Now check what happens if the trailer is corrupted.
|
||||||
Path trailerPath = new Path(util.getDataTestDir(), "trailer_"
|
Path trailerPath = new Path(util.getDataTestDir(), "trailer_" + version);
|
||||||
+ version);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
for (byte invalidVersion : new byte[] { HFile.MIN_FORMAT_VERSION - 1,
|
for (byte invalidVersion : new byte[] { HFile.MIN_FORMAT_VERSION - 1,
|
||||||
|
|
|
@ -52,7 +52,9 @@ import org.junit.experimental.categories.Category;
|
||||||
public class TestHFile extends HBaseTestCase {
|
public class TestHFile extends HBaseTestCase {
|
||||||
static final Log LOG = LogFactory.getLog(TestHFile.class);
|
static final Log LOG = LogFactory.getLog(TestHFile.class);
|
||||||
|
|
||||||
private String ROOT_DIR;
|
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
private static String ROOT_DIR =
|
||||||
|
TEST_UTIL.getDataTestDir("TestHFile").toString();
|
||||||
private final int minBlockSize = 512;
|
private final int minBlockSize = 512;
|
||||||
private static String localFormatter = "%010d";
|
private static String localFormatter = "%010d";
|
||||||
private static CacheConfig cacheConf = null;
|
private static CacheConfig cacheConf = null;
|
||||||
|
@ -61,7 +63,6 @@ public class TestHFile extends HBaseTestCase {
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
startingMetrics = SchemaMetrics.getMetricsSnapshot();
|
startingMetrics = SchemaMetrics.getMetricsSnapshot();
|
||||||
ROOT_DIR = this.getUnitTestdir("TestHFile").toString();
|
|
||||||
super.setUp();
|
super.setUp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,14 +78,6 @@ public class TestHFileDataBlockEncoder {
|
||||||
SchemaMetrics.configureGlobally(conf);
|
SchemaMetrics.configureGlobally(conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Cleanup after JUnit test.
|
|
||||||
*/
|
|
||||||
@After
|
|
||||||
public void tearDown() throws IOException {
|
|
||||||
TEST_UTIL.cleanupTestDir();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test putting and taking out blocks into cache with different
|
* Test putting and taking out blocks into cache with different
|
||||||
* encoding options.
|
* encoding options.
|
||||||
|
|
|
@ -66,6 +66,8 @@ public class TestHFileSeek extends TestCase {
|
||||||
private RandomDistribution.DiscreteRNG keyLenGen;
|
private RandomDistribution.DiscreteRNG keyLenGen;
|
||||||
private KVGenerator kvGen;
|
private KVGenerator kvGen;
|
||||||
|
|
||||||
|
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws IOException {
|
public void setUp() throws IOException {
|
||||||
if (options == null) {
|
if (options == null) {
|
||||||
|
|
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
|
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType;
|
|
||||||
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
|
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
|
||||||
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType;
|
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
@ -66,7 +65,7 @@ public class TestScannerSelectionUsingTTL {
|
||||||
private static String FAMILY = "myCF";
|
private static String FAMILY = "myCF";
|
||||||
private static byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY);
|
private static byte[] FAMILY_BYTES = Bytes.toBytes(FAMILY);
|
||||||
|
|
||||||
private static final int TTL_SECONDS = 2;
|
private static final int TTL_SECONDS = 10;
|
||||||
private static final int TTL_MS = TTL_SECONDS * 1000;
|
private static final int TTL_MS = TTL_SECONDS * 1000;
|
||||||
|
|
||||||
private static final int NUM_EXPIRED_FILES = 2;
|
private static final int NUM_EXPIRED_FILES = 2;
|
||||||
|
@ -106,7 +105,7 @@ public class TestScannerSelectionUsingTTL {
|
||||||
htd.addFamily(hcd);
|
htd.addFamily(hcd);
|
||||||
HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
|
HRegionInfo info = new HRegionInfo(Bytes.toBytes(TABLE));
|
||||||
HRegion region =
|
HRegion region =
|
||||||
HRegion.createHRegion(info, TEST_UTIL.getClusterTestDir(),
|
HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(info.getEncodedName()),
|
||||||
TEST_UTIL.getConfiguration(), htd);
|
TEST_UTIL.getConfiguration(), htd);
|
||||||
|
|
||||||
for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
|
for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
|
||||||
|
|
|
@ -77,7 +77,6 @@ import org.apache.hadoop.mapreduce.RecordWriter;
|
||||||
import org.apache.hadoop.mapreduce.TaskAttemptContext;
|
import org.apache.hadoop.mapreduce.TaskAttemptContext;
|
||||||
import org.apache.hadoop.mapreduce.TaskAttemptID;
|
import org.apache.hadoop.mapreduce.TaskAttemptID;
|
||||||
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
|
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
|
||||||
import org.junit.Before;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -159,12 +158,6 @@ public class TestHFileOutputFormat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Before
|
|
||||||
public void cleanupDir() throws IOException {
|
|
||||||
util.cleanupTestDir();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
private void setupRandomGeneratorMapper(Job job) {
|
private void setupRandomGeneratorMapper(Job job) {
|
||||||
job.setInputFormatClass(NMapInputFormat.class);
|
job.setInputFormatClass(NMapInputFormat.class);
|
||||||
job.setMapperClass(RandomKVGeneratingMapper.class);
|
job.setMapperClass(RandomKVGeneratingMapper.class);
|
||||||
|
@ -370,16 +363,19 @@ public class TestHFileOutputFormat {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMRIncrementalLoad() throws Exception {
|
public void testMRIncrementalLoad() throws Exception {
|
||||||
|
LOG.info("\nStarting test testMRIncrementalLoad\n");
|
||||||
doIncrementalLoadTest(false);
|
doIncrementalLoadTest(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMRIncrementalLoadWithSplit() throws Exception {
|
public void testMRIncrementalLoadWithSplit() throws Exception {
|
||||||
|
LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n");
|
||||||
doIncrementalLoadTest(true);
|
doIncrementalLoadTest(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void doIncrementalLoadTest(
|
private void doIncrementalLoadTest(
|
||||||
boolean shouldChangeRegions) throws Exception {
|
boolean shouldChangeRegions) throws Exception {
|
||||||
|
util = new HBaseTestingUtility();
|
||||||
Configuration conf = util.getConfiguration();
|
Configuration conf = util.getConfiguration();
|
||||||
Path testDir = util.getDataTestDir("testLocalMRIncrementalLoad");
|
Path testDir = util.getDataTestDir("testLocalMRIncrementalLoad");
|
||||||
byte[][] startKeys = generateRandomStartKeys(5);
|
byte[][] startKeys = generateRandomStartKeys(5);
|
||||||
|
@ -442,9 +438,7 @@ public class TestHFileOutputFormat {
|
||||||
expectedRows, util.countRows(table));
|
expectedRows, util.countRows(table));
|
||||||
Scan scan = new Scan();
|
Scan scan = new Scan();
|
||||||
ResultScanner results = table.getScanner(scan);
|
ResultScanner results = table.getScanner(scan);
|
||||||
int count = 0;
|
|
||||||
for (Result res : results) {
|
for (Result res : results) {
|
||||||
count++;
|
|
||||||
assertEquals(FAMILIES.length, res.raw().length);
|
assertEquals(FAMILIES.length, res.raw().length);
|
||||||
KeyValue first = res.raw()[0];
|
KeyValue first = res.raw()[0];
|
||||||
for (KeyValue kv : res.raw()) {
|
for (KeyValue kv : res.raw()) {
|
||||||
|
|
|
@ -165,7 +165,7 @@ public class TestTimeRangeMapRed {
|
||||||
|
|
||||||
private void runTestOnTable()
|
private void runTestOnTable()
|
||||||
throws IOException, InterruptedException, ClassNotFoundException {
|
throws IOException, InterruptedException, ClassNotFoundException {
|
||||||
UTIL.startMiniMapReduceCluster(1);
|
UTIL.startMiniMapReduceCluster();
|
||||||
Job job = null;
|
Job job = null;
|
||||||
try {
|
try {
|
||||||
job = new Job(UTIL.getConfiguration(), "test123");
|
job = new Job(UTIL.getConfiguration(), "test123");
|
||||||
|
|
|
@ -69,6 +69,12 @@ public class TestSplitLogManager {
|
||||||
private final static HBaseTestingUtility TEST_UTIL =
|
private final static HBaseTestingUtility TEST_UTIL =
|
||||||
new HBaseTestingUtility();
|
new HBaseTestingUtility();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Additional amount of time we wait for events to happen. Added where unit
|
||||||
|
* test failures have been observed.
|
||||||
|
*/
|
||||||
|
private static final int EXTRA_TOLERANCE_MS = 200;
|
||||||
|
|
||||||
static Stoppable stopper = new Stoppable() {
|
static Stoppable stopper = new Stoppable() {
|
||||||
@Override
|
@Override
|
||||||
public void stop(String why) {
|
public void stop(String why) {
|
||||||
|
@ -94,7 +100,8 @@ public class TestSplitLogManager {
|
||||||
public void setup() throws Exception {
|
public void setup() throws Exception {
|
||||||
TEST_UTIL.startMiniZKCluster();
|
TEST_UTIL.startMiniZKCluster();
|
||||||
conf = TEST_UTIL.getConfiguration();
|
conf = TEST_UTIL.getConfiguration();
|
||||||
zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests", null);
|
// Use a different ZK wrapper instance for each tests.
|
||||||
|
zkw = new ZooKeeperWatcher(conf, "split-log-manager-tests" + UUID.randomUUID().toString(), null);
|
||||||
ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode);
|
ZKUtil.deleteChildrenRecursively(zkw, zkw.baseZNode);
|
||||||
ZKUtil.createAndFailSilent(zkw, zkw.baseZNode);
|
ZKUtil.createAndFailSilent(zkw, zkw.baseZNode);
|
||||||
assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1);
|
assertTrue(ZKUtil.checkExists(zkw, zkw.baseZNode) != -1);
|
||||||
|
@ -211,7 +218,7 @@ public class TestSplitLogManager {
|
||||||
assertTrue((task.last_update <= curt) &&
|
assertTrue((task.last_update <= curt) &&
|
||||||
(task.last_update > (curt - 1000)));
|
(task.last_update > (curt - 1000)));
|
||||||
LOG.info("waiting for manager to resubmit the orphan task");
|
LOG.info("waiting for manager to resubmit the orphan task");
|
||||||
waitForCounter(tot_mgr_resubmit, 0, 1, to + 100);
|
waitForCounter(tot_mgr_resubmit, 0, 1, to + 300);
|
||||||
assertTrue(task.isUnassigned());
|
assertTrue(task.isUnassigned());
|
||||||
waitForCounter(tot_mgr_rescan, 0, 1, to + 100);
|
waitForCounter(tot_mgr_rescan, 0, 1, to + 100);
|
||||||
}
|
}
|
||||||
|
@ -265,7 +272,7 @@ public class TestSplitLogManager {
|
||||||
|
|
||||||
ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker1"));
|
ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker1"));
|
||||||
waitForCounter(tot_mgr_heartbeat, 0, 1, 1000);
|
waitForCounter(tot_mgr_heartbeat, 0, 1, 1000);
|
||||||
waitForCounter(tot_mgr_resubmit, 0, 1, to + 100);
|
waitForCounter(tot_mgr_resubmit, 0, 1, to + EXTRA_TOLERANCE_MS);
|
||||||
int version1 = ZKUtil.checkExists(zkw, tasknode);
|
int version1 = ZKUtil.checkExists(zkw, tasknode);
|
||||||
assertTrue(version1 > version);
|
assertTrue(version1 > version);
|
||||||
ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker2"));
|
ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker2"));
|
||||||
|
@ -275,8 +282,8 @@ public class TestSplitLogManager {
|
||||||
assertTrue(version2 > version1);
|
assertTrue(version2 > version1);
|
||||||
ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker3"));
|
ZKUtil.setData(zkw, tasknode, TaskState.TASK_OWNED.get("worker3"));
|
||||||
waitForCounter(tot_mgr_heartbeat, 1, 2, 1000);
|
waitForCounter(tot_mgr_heartbeat, 1, 2, 1000);
|
||||||
waitForCounter(tot_mgr_resubmit_threshold_reached, 0, 1, to + 100);
|
waitForCounter(tot_mgr_resubmit_threshold_reached, 0, 1, to + EXTRA_TOLERANCE_MS);
|
||||||
Thread.sleep(to + 100);
|
Thread.sleep(to + EXTRA_TOLERANCE_MS);
|
||||||
assertEquals(2L, tot_mgr_resubmit.get());
|
assertEquals(2L, tot_mgr_resubmit.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.SmallTests;
|
import org.apache.hadoop.hbase.SmallTests;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
|
@ -66,8 +67,10 @@ import com.google.common.collect.Lists;
|
||||||
@Category(SmallTests.class)
|
@Category(SmallTests.class)
|
||||||
public class TestStoreFile extends HBaseTestCase {
|
public class TestStoreFile extends HBaseTestCase {
|
||||||
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
|
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
|
||||||
private CacheConfig cacheConf = new CacheConfig(conf);
|
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
private String ROOT_DIR;
|
private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
|
||||||
|
private static String ROOT_DIR =
|
||||||
|
TEST_UTIL.getDataTestDir("TestStoreFile").toString();
|
||||||
private Map<String, Long> startingMetrics;
|
private Map<String, Long> startingMetrics;
|
||||||
|
|
||||||
private static final ChecksumType CKTYPE = ChecksumType.CRC32;
|
private static final ChecksumType CKTYPE = ChecksumType.CRC32;
|
||||||
|
@ -77,7 +80,6 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
super.setUp();
|
super.setUp();
|
||||||
startingMetrics = SchemaMetrics.getMetricsSnapshot();
|
startingMetrics = SchemaMetrics.getMetricsSnapshot();
|
||||||
ROOT_DIR = new Path(this.testDir, "TestStoreFile").toString();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,10 +19,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.regionserver.wal;
|
package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.*;
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
|
@ -51,7 +48,6 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
import org.apache.hadoop.hdfs.protocol.FSConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -405,6 +401,7 @@ public class TestHLog {
|
||||||
LOG.error("Waiting for cluster to go down");
|
LOG.error("Waiting for cluster to go down");
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
}
|
}
|
||||||
|
assertFalse(cluster.isClusterUp());
|
||||||
|
|
||||||
// Workaround a strange issue with Hadoop's RPC system - if we don't
|
// Workaround a strange issue with Hadoop's RPC system - if we don't
|
||||||
// sleep here, the new datanodes will pick up a cached IPC connection to
|
// sleep here, the new datanodes will pick up a cached IPC connection to
|
||||||
|
@ -412,11 +409,12 @@ public class TestHLog {
|
||||||
// the idle time threshold configured in the conf above
|
// the idle time threshold configured in the conf above
|
||||||
Thread.sleep(2000);
|
Thread.sleep(2000);
|
||||||
|
|
||||||
cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
|
LOG.info("Waiting a few seconds before re-starting HDFS");
|
||||||
TEST_UTIL.setDFSCluster(cluster);
|
Thread.sleep(5000);
|
||||||
|
cluster = TEST_UTIL.startMiniDFSClusterForTestHLog(namenodePort);
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
LOG.info("START second instance.");
|
LOG.info("STARTED second instance.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// set the lease period to be 1 second so that the
|
// set the lease period to be 1 second so that the
|
||||||
|
|
|
@ -76,6 +76,9 @@ public class TestMiniClusterLoadSequential {
|
||||||
this.isMultiPut = isMultiPut;
|
this.isMultiPut = isMultiPut;
|
||||||
this.dataBlockEncoding = dataBlockEncoding;
|
this.dataBlockEncoding = dataBlockEncoding;
|
||||||
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
|
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
|
||||||
|
|
||||||
|
// We don't want any region reassignments by the load balancer during the test.
|
||||||
|
conf.setFloat(HConstants.LOAD_BALANCER_SLOP_KEY, 10.0f);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Parameters
|
@Parameters
|
||||||
|
|
|
@ -93,7 +93,7 @@ public class TestZKLeaderManager {
|
||||||
|
|
||||||
while (master.get() && !stopped) {
|
while (master.get() && !stopped) {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(200);
|
Thread.sleep(10);
|
||||||
} catch (InterruptedException ignored) {}
|
} catch (InterruptedException ignored) {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -204,8 +204,8 @@ public class TestZKLeaderManager {
|
||||||
private MockLeader getCurrentLeader() throws Exception {
|
private MockLeader getCurrentLeader() throws Exception {
|
||||||
MockLeader currentLeader = null;
|
MockLeader currentLeader = null;
|
||||||
outer:
|
outer:
|
||||||
// wait up to 2 secs for initial leader
|
// Wait up to 10 secs for initial leader
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 1000; i++) {
|
||||||
for (int j = 0; j < CANDIDATES.length; j++) {
|
for (int j = 0; j < CANDIDATES.length; j++) {
|
||||||
if (CANDIDATES[j].isMaster()) {
|
if (CANDIDATES[j].isMaster()) {
|
||||||
// should only be one leader
|
// should only be one leader
|
||||||
|
@ -218,7 +218,7 @@ public class TestZKLeaderManager {
|
||||||
if (currentLeader != null) {
|
if (currentLeader != null) {
|
||||||
break outer;
|
break outer;
|
||||||
}
|
}
|
||||||
Thread.sleep(100);
|
Thread.sleep(10);
|
||||||
}
|
}
|
||||||
return currentLeader;
|
return currentLeader;
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,4 +142,11 @@
|
||||||
version is X.X.X-SNAPSHOT"
|
version is X.X.X-SNAPSHOT"
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hbase.client.retries.number</name>
|
||||||
|
<value>100</value>
|
||||||
|
<description>
|
||||||
|
Use a lot of retries in unit tests.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
Loading…
Reference in New Issue