HBASE-536 Remove MiniDFS startup from MiniHBaseCluster
-Changed MiniHBaseCluster to not start up a MiniDFS -Changed HBaseClusterTestCase to do the work of starting up a MiniDFS. -Added pre and post setup method to HBaseClusterTestCase so you can control what happen before MiniHBaseCluster is booted up -Converted AbstractMergeTestCase to be a HBaseClusterTestCase -Converted any test that used a raw MIniDFS or MiniHBaseCluster to use HBaseClusterTestCase instead -Split TestTimestamp into two tests - one for clientside (now in o.a.h.h.client) and one for serverside (o.a.h.h.regionserver) -Merged in Stack's changes to make bin/hbase have hadoop jars first on the classpath -Updated PerformanceEvaluation (in --miniCluster mode) to start up a DFS first -Fixed a bug in BaseScanner that would have allowed NPEs to be generated git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@640526 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
aabe9f09a9
commit
65d961ae78
|
@ -103,6 +103,7 @@ Hbase Change Log
|
|||
HBASE-515 At least double default timeouts between regionserver and master
|
||||
HBASE-529 RegionServer needs to recover if datanode goes down
|
||||
HBASE-456 Clearly state which ports need to be opened in order to run HBase
|
||||
HBASE-536 Remove MiniDFS startup from MiniHBaseCluster
|
||||
|
||||
Branch 0.1
|
||||
|
||||
|
|
20
bin/hbase
20
bin/hbase
|
@ -97,6 +97,18 @@ if [ "$HBASE_HEAPSIZE" != "" ]; then
|
|||
#echo $JAVA_HEAP_MAX
|
||||
fi
|
||||
|
||||
# so that filenames w/ spaces are handled correctly in loops below
|
||||
IFS=
|
||||
|
||||
# Add libs to CLASSPATH
|
||||
# Do this early so hadoop jar comes before hbase classes; otherwise
|
||||
# complaint because way webapps are loaded, expectation is that
|
||||
# loading class is from same jar as webapps themselves (only true
|
||||
# if hadoop comes first).
|
||||
for f in $HBASE_HOME/lib/*.jar; do
|
||||
CLASSPATH=${CLASSPATH}:$f;
|
||||
done
|
||||
|
||||
# CLASSPATH initially contains $HBASE_CONF_DIR
|
||||
CLASSPATH="${CLASSPATH}:${HBASE_CONF_DIR}"
|
||||
CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
|
||||
|
@ -112,9 +124,6 @@ if [ -d "$HBASE_HOME/build/webapps" ]; then
|
|||
CLASSPATH=${CLASSPATH}:$HBASE_HOME/build
|
||||
fi
|
||||
|
||||
# so that filenames w/ spaces are handled correctly in loops below
|
||||
IFS=
|
||||
|
||||
# for releases, add hbase, hadoop & webapps to CLASSPATH
|
||||
for f in $HBASE_HOME/hbase*.jar; do
|
||||
if [ -f $f ]; then
|
||||
|
@ -125,11 +134,6 @@ if [ -d "$HBASE_HOME/webapps" ]; then
|
|||
CLASSPATH=${CLASSPATH}:$HBASE_HOME
|
||||
fi
|
||||
|
||||
# add libs to CLASSPATH
|
||||
for f in $HBASE_HOME/lib/*.jar; do
|
||||
CLASSPATH=${CLASSPATH}:$f;
|
||||
done
|
||||
|
||||
for f in $HBASE_HOME/lib/jetty-ext/*.jar; do
|
||||
CLASSPATH=${CLASSPATH}:$f;
|
||||
done
|
||||
|
|
|
@ -308,7 +308,7 @@ abstract class BaseScanner extends Chore implements HConstants {
|
|||
throws IOException {
|
||||
boolean result = false;
|
||||
HRegionInfo split =
|
||||
Writables.getHRegionInfoOrNull(rowContent.get(splitColumn).getValue());
|
||||
Writables.getHRegionInfo(rowContent.get(splitColumn));
|
||||
if (split == null) {
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -31,18 +31,25 @@ import org.apache.log4j.Logger;
|
|||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
|
||||
/** Abstract base class for merge tests */
|
||||
public abstract class AbstractMergeTestBase extends HBaseTestCase {
|
||||
public abstract class AbstractMergeTestBase extends HBaseClusterTestCase {
|
||||
static final Logger LOG =
|
||||
Logger.getLogger(AbstractMergeTestBase.class.getName());
|
||||
protected static final Text COLUMN_NAME = new Text("contents:");
|
||||
protected final Random rand = new Random();
|
||||
protected HTableDescriptor desc;
|
||||
protected ImmutableBytesWritable value;
|
||||
|
||||
/** constructor */
|
||||
protected boolean startMiniHBase;
|
||||
|
||||
public AbstractMergeTestBase() {
|
||||
this(true);
|
||||
}
|
||||
|
||||
/** constructor */
|
||||
public AbstractMergeTestBase(boolean startMiniHBase) {
|
||||
super();
|
||||
|
||||
this.startMiniHBase = startMiniHBase;
|
||||
|
||||
// We will use the same value for the rows as that is not really important here
|
||||
|
||||
String partialValue = String.valueOf(System.currentTimeMillis());
|
||||
|
@ -61,62 +68,10 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
|
|||
desc.addFamily(new HColumnDescriptor(COLUMN_NAME.toString()));
|
||||
}
|
||||
|
||||
protected MiniDFSCluster dfsCluster = null;
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
conf.setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L);
|
||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
// Note: we must call super.setUp after starting the mini cluster or
|
||||
// we will end up with a local file system
|
||||
|
||||
super.setUp();
|
||||
|
||||
// We create three data regions: The first is too large to merge since it
|
||||
// will be > 64 MB in size. The second two will be smaller and will be
|
||||
// selected for merging.
|
||||
|
||||
// To ensure that the first region is larger than 64MB we need to write at
|
||||
// least 65536 rows. We will make certain by writing 70000
|
||||
|
||||
try {
|
||||
Text row_70001 = new Text("row_70001");
|
||||
Text row_80001 = new Text("row_80001");
|
||||
|
||||
HRegion[] regions = {
|
||||
createAregion(null, row_70001, 1, 70000),
|
||||
createAregion(row_70001, row_80001, 70001, 10000),
|
||||
createAregion(row_80001, null, 80001, 10000)
|
||||
};
|
||||
|
||||
// Now create the root and meta regions and insert the data regions
|
||||
// created above into the meta
|
||||
|
||||
HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
|
||||
testDir, this.conf);
|
||||
HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
|
||||
testDir, this.conf);
|
||||
HRegion.addRegionToMETA(root, meta);
|
||||
|
||||
for(int i = 0; i < regions.length; i++) {
|
||||
HRegion.addRegionToMETA(meta, regions[i]);
|
||||
}
|
||||
|
||||
root.close();
|
||||
root.getLog().closeAndDelete();
|
||||
meta.close();
|
||||
meta.getLog().closeAndDelete();
|
||||
|
||||
} catch (Exception e) {
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
throw e;
|
||||
protected void HBaseClusterSetup() throws Exception {
|
||||
if (startMiniHBase) {
|
||||
super.HBaseClusterSetup();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -124,9 +79,42 @@ public abstract class AbstractMergeTestBase extends HBaseTestCase {
|
|||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
public void preHBaseClusterSetup() throws Exception {
|
||||
conf.setLong("hbase.hregion.max.filesize", 64L * 1024L * 1024L);
|
||||
|
||||
// We create three data regions: The first is too large to merge since it
|
||||
// will be > 64 MB in size. The second two will be smaller and will be
|
||||
// selected for merging.
|
||||
|
||||
// To ensure that the first region is larger than 64MB we need to write at
|
||||
// least 65536 rows. We will make certain by writing 70000
|
||||
|
||||
Text row_70001 = new Text("row_70001");
|
||||
Text row_80001 = new Text("row_80001");
|
||||
|
||||
HRegion[] regions = {
|
||||
createAregion(null, row_70001, 1, 70000),
|
||||
createAregion(row_70001, row_80001, 70001, 10000),
|
||||
createAregion(row_80001, null, 80001, 10000)
|
||||
};
|
||||
|
||||
// Now create the root and meta regions and insert the data regions
|
||||
// created above into the meta
|
||||
|
||||
HRegion root = HRegion.createHRegion(HRegionInfo.rootRegionInfo,
|
||||
testDir, this.conf);
|
||||
HRegion meta = HRegion.createHRegion(HRegionInfo.firstMetaRegionInfo,
|
||||
testDir, this.conf);
|
||||
HRegion.addRegionToMETA(root, meta);
|
||||
|
||||
for(int i = 0; i < regions.length; i++) {
|
||||
HRegion.addRegionToMETA(meta, regions[i]);
|
||||
}
|
||||
|
||||
root.close();
|
||||
root.getLog().closeAndDelete();
|
||||
meta.close();
|
||||
meta.getLog().closeAndDelete();
|
||||
}
|
||||
|
||||
private HRegion createAregion(Text startKey, Text endKey, int firstRow,
|
||||
|
|
|
@ -57,7 +57,7 @@ public class DFSAbort extends HBaseClusterTestCase {
|
|||
try {
|
||||
// By now the Mini DFS is running, Mini HBase is running and we have
|
||||
// created a table. Now let's yank the rug out from HBase
|
||||
cluster.getDFSCluster().shutdown();
|
||||
dfsCluster.shutdown();
|
||||
threadDumpingJoin();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
|
|
|
@ -21,10 +21,16 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import java.io.PrintWriter;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.hbase.client.HConnectionManager;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
||||
/**
|
||||
* Abstract base class for HBase cluster junit tests. Spins up an hbase
|
||||
|
@ -35,67 +41,116 @@ public abstract class HBaseClusterTestCase extends HBaseTestCase {
|
|||
LogFactory.getLog(HBaseClusterTestCase.class.getName());
|
||||
|
||||
protected MiniHBaseCluster cluster;
|
||||
final boolean miniHdfs;
|
||||
int regionServers;
|
||||
protected MiniDFSCluster dfsCluster;
|
||||
protected int regionServers;
|
||||
protected boolean startDfs;
|
||||
|
||||
/**
|
||||
* constructor
|
||||
*/
|
||||
public HBaseClusterTestCase() {
|
||||
this(true);
|
||||
this(1);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param regionServers
|
||||
*/
|
||||
* Start a MiniHBaseCluster with regionServers region servers in-process to
|
||||
* start with. Also, start a MiniDfsCluster before starting the hbase cluster.
|
||||
* The configuration used will be edited so that this works correctly.
|
||||
*/
|
||||
public HBaseClusterTestCase(int regionServers) {
|
||||
this(true);
|
||||
this(regionServers, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start a MiniHBaseCluster with regionServers region servers in-process to
|
||||
* start with. Optionally, startDfs indicates if a MiniDFSCluster should be
|
||||
* started. If startDfs is false, the assumption is that an external DFS is
|
||||
* configured in hbase-site.xml and is already started, or you have started a
|
||||
* MiniDFSCluster on your own and edited the configuration in memory. (You
|
||||
* can modify the config used by overriding the preHBaseClusterSetup method.)
|
||||
*/
|
||||
public HBaseClusterTestCase(int regionServers, boolean startDfs) {
|
||||
super();
|
||||
this.startDfs = startDfs;
|
||||
this.regionServers = regionServers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Run after dfs is ready but before hbase cluster is started up.
|
||||
*/
|
||||
protected void preHBaseClusterSetup() throws Exception {
|
||||
}
|
||||
|
||||
/**
|
||||
* @param name
|
||||
* Actually start the MiniHBase instance.
|
||||
*/
|
||||
public HBaseClusterTestCase(String name) {
|
||||
this(name, true);
|
||||
protected void HBaseClusterSetup() throws Exception {
|
||||
// start the mini cluster
|
||||
this.cluster = new MiniHBaseCluster(conf, regionServers);
|
||||
HTable meta = new HTable(conf, new Text(".META."));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param miniHdfs
|
||||
* Run after hbase cluster is started up.
|
||||
*/
|
||||
public HBaseClusterTestCase(final boolean miniHdfs) {
|
||||
super();
|
||||
this.miniHdfs = miniHdfs;
|
||||
this.regionServers = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param name
|
||||
* @param miniHdfs
|
||||
*/
|
||||
public HBaseClusterTestCase(String name, final boolean miniHdfs) {
|
||||
super(name);
|
||||
this.miniHdfs = miniHdfs;
|
||||
this.regionServers = 1;
|
||||
}
|
||||
protected void postHBaseClusterSetup() throws Exception {
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
this.cluster =
|
||||
new MiniHBaseCluster(this.conf, this.regionServers, this.miniHdfs);
|
||||
super.setUp();
|
||||
try {
|
||||
if (startDfs) {
|
||||
// start up the dfs
|
||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
|
||||
// mangle the conf so that the fs parameter points to the minidfs we
|
||||
// just started up
|
||||
FileSystem fs = dfsCluster.getFileSystem();
|
||||
conf.set("fs.default.name", fs.getName());
|
||||
Path parentdir = fs.getHomeDirectory();
|
||||
conf.set(HConstants.HBASE_DIR, parentdir.toString());
|
||||
fs.mkdirs(parentdir);
|
||||
FSUtils.setVersion(fs, parentdir);
|
||||
}
|
||||
|
||||
// do the super setup now. if we had done it first, then we would have
|
||||
// gotten our conf all mangled and a local fs started up.
|
||||
super.setUp();
|
||||
|
||||
// run the pre-cluster setup
|
||||
preHBaseClusterSetup();
|
||||
|
||||
// start the instance
|
||||
HBaseClusterSetup();
|
||||
|
||||
// run post-cluster setup
|
||||
postHBaseClusterSetup();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception in setup!", e);
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
if (dfsCluster != null) {
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
HConnectionManager.deleteConnection(conf);
|
||||
if (this.cluster != null) {
|
||||
try {
|
||||
this.cluster.shutdown();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Closing mini dfs", e);
|
||||
try {
|
||||
HConnectionManager.deleteConnection(conf);
|
||||
if (this.cluster != null) {
|
||||
try {
|
||||
this.cluster.shutdown();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Closing mini dfs", e);
|
||||
}
|
||||
}
|
||||
if (startDfs) {
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error(e);
|
||||
}
|
||||
// ReflectionUtils.printThreadInfo(new PrintWriter(System.out),
|
||||
// "Temporary end-of-test thread dump debugging HADOOP-2040: " + getName());
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* Copyright 2007 The Apache Software Foundation
|
||||
* Copyright 2008 The Apache Software Foundation
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
|
@ -23,7 +23,6 @@ import java.io.File;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.log4j.Logger;
|
||||
|
@ -42,111 +41,23 @@ public class MiniHBaseCluster implements HConstants {
|
|||
Logger.getLogger(MiniHBaseCluster.class.getName());
|
||||
|
||||
private HBaseConfiguration conf;
|
||||
private MiniDFSCluster cluster;
|
||||
private FileSystem fs;
|
||||
private boolean shutdownDFS;
|
||||
private Path parentdir;
|
||||
private LocalHBaseCluster hbaseCluster;
|
||||
private boolean deleteOnExit = true;
|
||||
|
||||
/**
|
||||
* Starts a MiniHBaseCluster on top of a new MiniDFSCluster
|
||||
*
|
||||
* @param conf
|
||||
* @param nRegionNodes
|
||||
* @throws IOException
|
||||
* Start a MiniHBaseCluster. conf is assumed to contain a valid fs name to
|
||||
* hook up to.
|
||||
*/
|
||||
public MiniHBaseCluster(HBaseConfiguration conf, int nRegionNodes)
|
||||
public MiniHBaseCluster(HBaseConfiguration conf, int numRegionServers)
|
||||
throws IOException {
|
||||
this(conf, nRegionNodes, true, true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Start a MiniHBaseCluster. Use the native file system unless
|
||||
* miniHdfsFilesystem is set to true.
|
||||
*
|
||||
* @param conf
|
||||
* @param nRegionNodes
|
||||
* @param miniHdfsFilesystem
|
||||
* @throws IOException
|
||||
*/
|
||||
public MiniHBaseCluster(HBaseConfiguration conf, int nRegionNodes,
|
||||
final boolean miniHdfsFilesystem) throws IOException {
|
||||
this(conf, nRegionNodes, miniHdfsFilesystem, true, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Starts a MiniHBaseCluster on top of an existing HDFSCluster
|
||||
*<pre>
|
||||
****************************************************************************
|
||||
* * * * * * N O T E * * * * *
|
||||
*
|
||||
* If you use this constructor, you should shut down the mini dfs cluster
|
||||
* in your test case.
|
||||
*
|
||||
* * * * * * N O T E * * * * *
|
||||
****************************************************************************
|
||||
*</pre>
|
||||
*
|
||||
* @param conf
|
||||
* @param nRegionNodes
|
||||
* @param dfsCluster
|
||||
* @param deleteOnExit
|
||||
* @throws IOException
|
||||
*/
|
||||
public MiniHBaseCluster(HBaseConfiguration conf, int nRegionNodes,
|
||||
MiniDFSCluster dfsCluster, boolean deleteOnExit) throws IOException {
|
||||
|
||||
this.conf = conf;
|
||||
this.fs = dfsCluster.getFileSystem();
|
||||
this.cluster = dfsCluster;
|
||||
this.shutdownDFS = false;
|
||||
this.deleteOnExit = deleteOnExit;
|
||||
init(nRegionNodes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param conf
|
||||
* @param nRegionNodes
|
||||
* @param miniHdfsFilesystem If true, set the hbase mini
|
||||
* cluster atop a mini hdfs cluster. Otherwise, use the
|
||||
* filesystem configured in <code>conf</code>.
|
||||
* @param format the mini hdfs cluster
|
||||
* @param deleteOnExit clean up mini hdfs files
|
||||
* @throws IOException
|
||||
*/
|
||||
public MiniHBaseCluster(HBaseConfiguration conf, int nRegionNodes,
|
||||
final boolean miniHdfsFilesystem, boolean format, boolean deleteOnExit)
|
||||
throws IOException {
|
||||
|
||||
this.conf = conf;
|
||||
this.deleteOnExit = deleteOnExit;
|
||||
this.shutdownDFS = false;
|
||||
if (miniHdfsFilesystem) {
|
||||
try {
|
||||
this.cluster = new MiniDFSCluster(this.conf, 2, format, (String[])null);
|
||||
this.fs = cluster.getFileSystem();
|
||||
this.shutdownDFS = true;
|
||||
} catch (IOException e) {
|
||||
StaticTestEnvironment.shutdownDfs(cluster);
|
||||
throw e;
|
||||
}
|
||||
} else {
|
||||
this.cluster = null;
|
||||
this.fs = FileSystem.get(conf);
|
||||
}
|
||||
init(nRegionNodes);
|
||||
init(numRegionServers);
|
||||
}
|
||||
|
||||
private void init(final int nRegionNodes) throws IOException {
|
||||
try {
|
||||
this.parentdir = this.fs.getHomeDirectory();
|
||||
this.conf.set(HConstants.HBASE_DIR, this.parentdir.toString());
|
||||
this.fs.mkdirs(parentdir);
|
||||
FSUtils.setVersion(fs, parentdir);
|
||||
this.hbaseCluster = new LocalHBaseCluster(this.conf, nRegionNodes);
|
||||
this.hbaseCluster.startup();
|
||||
// start up a LocalHBaseCluster
|
||||
hbaseCluster = new LocalHBaseCluster(conf, nRegionNodes);
|
||||
hbaseCluster.startup();
|
||||
} catch(IOException e) {
|
||||
shutdown();
|
||||
throw e;
|
||||
|
@ -166,15 +77,6 @@ public class MiniHBaseCluster implements HConstants {
|
|||
return t.getName();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cluster on which this HBase cluster is running
|
||||
*
|
||||
* @return MiniDFSCluster
|
||||
*/
|
||||
public MiniDFSCluster getDFSCluster() {
|
||||
return cluster;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Returns the rpc address actually used by the master server, because
|
||||
* the supplied port is not necessarily the actual port used.
|
||||
|
@ -240,14 +142,6 @@ public class MiniHBaseCluster implements HConstants {
|
|||
if (this.hbaseCluster != null) {
|
||||
this.hbaseCluster.shutdown();
|
||||
}
|
||||
if (shutdownDFS) {
|
||||
StaticTestEnvironment.shutdownDfs(cluster);
|
||||
}
|
||||
// Delete all DFS files
|
||||
if(deleteOnExit) {
|
||||
deleteFile(new File(System.getProperty(
|
||||
StaticTestEnvironment.TEST_DIRECTORY_KEY), "dfs"));
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteFile(File f) {
|
||||
|
|
|
@ -39,16 +39,14 @@ import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
|||
/**
|
||||
* Utility class to build a table of multiple regions.
|
||||
*/
|
||||
public class MultiRegionTable extends HBaseTestCase {
|
||||
public class MultiRegionTable extends HBaseClusterTestCase {
|
||||
static final Log LOG = LogFactory.getLog(MultiRegionTable.class.getName());
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
public MultiRegionTable() {
|
||||
super();
|
||||
// These are needed for the new and improved Map/Reduce framework
|
||||
System.setProperty("hadoop.log.dir", conf.get("hadoop.log.dir"));
|
||||
conf.set("mapred.output.dir", conf.get("hadoop.tmp.dir"));
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -31,6 +31,8 @@ import java.util.regex.Matcher;
|
|||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.LongWritable;
|
||||
|
@ -561,7 +563,18 @@ public class PerformanceEvaluation implements HConstants {
|
|||
}
|
||||
|
||||
MiniHBaseCluster hbaseMiniCluster = null;
|
||||
MiniDFSCluster dfsCluster = null;
|
||||
if (this.miniCluster) {
|
||||
dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// mangle the conf so that the fs parameter points to the minidfs we
|
||||
// just started up
|
||||
FileSystem fs = dfsCluster.getFileSystem();
|
||||
conf.set("fs.default.name", fs.getName());
|
||||
Path parentdir = fs.getHomeDirectory();
|
||||
conf.set(HConstants.HBASE_DIR, parentdir.toString());
|
||||
fs.mkdirs(parentdir);
|
||||
FSUtils.setVersion(fs, parentdir);
|
||||
|
||||
hbaseMiniCluster = new MiniHBaseCluster(this.conf, N);
|
||||
}
|
||||
|
||||
|
@ -577,6 +590,7 @@ public class PerformanceEvaluation implements HConstants {
|
|||
} finally {
|
||||
if(this.miniCluster && hbaseMiniCluster != null) {
|
||||
hbaseMiniCluster.shutdown();
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class TestHBaseCluster extends HBaseClusterTestCase {
|
|||
|
||||
/** constructor */
|
||||
public TestHBaseCluster() {
|
||||
super(true);
|
||||
super();
|
||||
this.desc = null;
|
||||
this.admin = null;
|
||||
this.table = null;
|
||||
|
|
|
@ -26,49 +26,37 @@ import java.net.URL;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
|
||||
/**
|
||||
* Testing, info servers are disabled. This test enables then and checks that
|
||||
* they serve pages.
|
||||
*/
|
||||
public class TestInfoServers extends HBaseTestCase {
|
||||
public class TestInfoServers extends HBaseClusterTestCase {
|
||||
static final Log LOG = LogFactory.getLog(TestInfoServers.class);
|
||||
|
||||
@Override
|
||||
protected void setUp() throws Exception {
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
protected void preHBaseClusterSetup() {
|
||||
// Bring up info servers on 'odd' port numbers in case the test is not
|
||||
// sourcing the src/test/hbase-default.xml.
|
||||
conf.setInt("hbase.master.info.port", 60011);
|
||||
conf.setInt("hbase.regionserver.info.port", 60031);
|
||||
}
|
||||
|
||||
/**
|
||||
* @throws Exception
|
||||
*/
|
||||
public void testInfoServersAreUp() throws Exception {
|
||||
// Bring up info servers on 'odd' port numbers in case the test is not
|
||||
// sourcing the src/test/hbase-default.xml.
|
||||
this.conf.setInt("hbase.master.info.port", 60011);
|
||||
this.conf.setInt("hbase.regionserver.info.port", 60031);
|
||||
MiniHBaseCluster miniHbase = new MiniHBaseCluster(this.conf, 1);
|
||||
// Create table so info servers are given time to spin up.
|
||||
HBaseAdmin a = new HBaseAdmin(conf);
|
||||
a.createTable(new HTableDescriptor(getName()));
|
||||
assertTrue(a.tableExists(new Text(getName())));
|
||||
try {
|
||||
int port = miniHbase.getMaster().getInfoServer().getPort();
|
||||
assertHasExpectedContent(new URL("http://localhost:" + port +
|
||||
"/index.html"), "Master");
|
||||
port = miniHbase.getRegionThreads().get(0).getRegionServer().
|
||||
getInfoServer().getPort();
|
||||
assertHasExpectedContent(new URL("http://localhost:" + port +
|
||||
"/index.html"), "Region Server");
|
||||
} finally {
|
||||
miniHbase.shutdown();
|
||||
}
|
||||
// give the cluster time to start up
|
||||
HTable table = new HTable(conf, new Text(".META."));
|
||||
|
||||
int port = cluster.getMaster().getInfoServer().getPort();
|
||||
assertHasExpectedContent(new URL("http://localhost:" + port +
|
||||
"/index.html"), "Master");
|
||||
port = cluster.getRegionThreads().get(0).getRegionServer().
|
||||
getInfoServer().getPort();
|
||||
assertHasExpectedContent(new URL("http://localhost:" + port +
|
||||
"/index.html"), "Region Server");
|
||||
}
|
||||
|
||||
private void assertHasExpectedContent(final URL u, final String expected)
|
||||
|
|
|
@ -40,7 +40,7 @@ public class TestMasterAdmin extends HBaseClusterTestCase {
|
|||
|
||||
/** constructor */
|
||||
public TestMasterAdmin() {
|
||||
super(true);
|
||||
super();
|
||||
admin = null;
|
||||
|
||||
// Make the thread wake frequency a little slower so other threads
|
||||
|
|
|
@ -25,8 +25,8 @@ import java.io.IOException;
|
|||
public class TestMergeMeta extends AbstractMergeTestBase {
|
||||
|
||||
/** constructor */
|
||||
public TestMergeMeta() {
|
||||
super();
|
||||
public TestMergeMeta() throws Exception {
|
||||
super(false);
|
||||
conf.setLong("hbase.client.pause", 1 * 1000);
|
||||
conf.setInt("hbase.client.retries.number", 2);
|
||||
}
|
||||
|
|
|
@ -32,11 +32,6 @@ public class TestMergeTable extends AbstractMergeTestBase {
|
|||
*/
|
||||
public void testMergeTable() throws IOException {
|
||||
assertNotNull(dfsCluster);
|
||||
MiniHBaseCluster hCluster = new MiniHBaseCluster(conf, 1, dfsCluster, true);
|
||||
try {
|
||||
HMerge.merge(conf, dfsCluster.getFileSystem(), desc.getName());
|
||||
} finally {
|
||||
hCluster.shutdown();
|
||||
}
|
||||
HMerge.merge(conf, dfsCluster.getFileSystem(), desc.getName());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,253 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
/**
|
||||
* Tests user specifiable time stamps putting, getting and scanning. Also
|
||||
* tests same in presence of deletes. Test cores are written so can be
|
||||
* run against an HRegion and against an HTable: i.e. both local and remote.
|
||||
*/
|
||||
public class TimestampTestBase extends HBaseTestCase {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TimestampTestBase.class.getName());
|
||||
|
||||
private static final long T0 = 10L;
|
||||
private static final long T1 = 100L;
|
||||
private static final long T2 = 200L;
|
||||
|
||||
private static final String COLUMN_NAME = "contents:";
|
||||
|
||||
private static final Text COLUMN = new Text(COLUMN_NAME);
|
||||
private static final Text ROW = new Text("row");
|
||||
|
||||
// When creating column descriptor, how many versions of a cell to allow.
|
||||
private static final int VERSIONS = 3;
|
||||
|
||||
/*
|
||||
* Run test that delete works according to description in <a
|
||||
* href="https://issues.apache.org/jira/browse/HADOOP-1784">hadoop-1784</a>.
|
||||
* @param incommon
|
||||
* @param flusher
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void doTestDelete(final Incommon incommon, FlushCache flusher)
|
||||
throws IOException {
|
||||
// Add values at various timestamps (Values are timestampes as bytes).
|
||||
put(incommon, T0);
|
||||
put(incommon, T1);
|
||||
put(incommon, T2);
|
||||
put(incommon);
|
||||
// Verify that returned versions match passed timestamps.
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
|
||||
// If I delete w/o specifying a timestamp, this means I'm deleting the
|
||||
// latest.
|
||||
delete(incommon);
|
||||
// Verify that I get back T2 through T1 -- that the latest version has
|
||||
// been deleted.
|
||||
assertVersions(incommon, new long [] {T2, T1, T0});
|
||||
|
||||
// Flush everything out to disk and then retry
|
||||
flusher.flushcache();
|
||||
assertVersions(incommon, new long [] {T2, T1, T0});
|
||||
|
||||
// Now add, back a latest so I can test remove other than the latest.
|
||||
put(incommon);
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
|
||||
delete(incommon, T2);
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
|
||||
// Flush everything out to disk and then retry
|
||||
flusher.flushcache();
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
|
||||
|
||||
// Now try deleting all from T2 back inclusive (We first need to add T2
|
||||
// back into the mix and to make things a little interesting, delete and
|
||||
// then readd T1.
|
||||
put(incommon, T2);
|
||||
delete(incommon, T1);
|
||||
put(incommon, T1);
|
||||
incommon.deleteAll(ROW, COLUMN, T2);
|
||||
// Should only be current value in set. Assert this is so
|
||||
assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
|
||||
|
||||
// Flush everything out to disk and then redo above tests
|
||||
flusher.flushcache();
|
||||
assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
private static void assertOnlyLatest(final Incommon incommon,
|
||||
final long currentTime)
|
||||
throws IOException {
|
||||
Cell[] cellValues = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
|
||||
assertEquals(1, cellValues.length);
|
||||
long time = Writables.bytesToLong(cellValues[0].getValue());
|
||||
assertEquals(time, currentTime);
|
||||
assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
|
||||
assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assert that returned versions match passed in timestamps and that results
|
||||
* are returned in the right order. Assert that values when converted to
|
||||
* longs match the corresponding passed timestamp.
|
||||
* @param r
|
||||
* @param tss
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void assertVersions(final Incommon incommon, final long [] tss)
|
||||
throws IOException {
|
||||
// Assert that 'latest' is what we expect.
|
||||
byte [] bytes = incommon.get(ROW, COLUMN).getValue();
|
||||
assertEquals(Writables.bytesToLong(bytes), tss[0]);
|
||||
// Now assert that if we ask for multiple versions, that they come out in
|
||||
// order.
|
||||
Cell[] cellValues = incommon.get(ROW, COLUMN, tss.length);
|
||||
assertEquals(tss.length, cellValues.length);
|
||||
for (int i = 0; i < cellValues.length; i++) {
|
||||
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||
assertEquals(ts, tss[i]);
|
||||
}
|
||||
// Specify a timestamp get multiple versions.
|
||||
cellValues = incommon.get(ROW, COLUMN, tss[0], cellValues.length - 1);
|
||||
for (int i = 1; i < cellValues.length; i++) {
|
||||
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||
assertEquals(ts, tss[i]);
|
||||
}
|
||||
// Test scanner returns expected version
|
||||
assertScanContentTimestamp(incommon, tss[0]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Run test scanning different timestamps.
|
||||
* @param incommon
|
||||
* @param flusher
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void doTestTimestampScanning(final Incommon incommon,
|
||||
final FlushCache flusher)
|
||||
throws IOException {
|
||||
// Add a couple of values for three different timestamps.
|
||||
put(incommon, T0);
|
||||
put(incommon, T1);
|
||||
put(incommon, HConstants.LATEST_TIMESTAMP);
|
||||
// Get count of latest items.
|
||||
int count = assertScanContentTimestamp(incommon,
|
||||
HConstants.LATEST_TIMESTAMP);
|
||||
// Assert I get same count when I scan at each timestamp.
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T0));
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T1));
|
||||
// Flush everything out to disk and then retry
|
||||
flusher.flushcache();
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T0));
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Assert that the scan returns only values < timestamp.
|
||||
* @param r
|
||||
* @param ts
|
||||
* @return Count of items scanned.
|
||||
* @throws IOException
|
||||
*/
|
||||
public static int assertScanContentTimestamp(final Incommon in, final long ts)
|
||||
throws IOException {
|
||||
HScannerInterface scanner =
|
||||
in.getScanner(COLUMNS, HConstants.EMPTY_START_ROW, ts);
|
||||
int count = 0;
|
||||
try {
|
||||
HStoreKey key = new HStoreKey();
|
||||
TreeMap<Text, byte []>value = new TreeMap<Text, byte[]>();
|
||||
while (scanner.next(key, value)) {
|
||||
assertTrue(key.getTimestamp() <= ts);
|
||||
// Content matches the key or HConstants.LATEST_TIMESTAMP.
|
||||
// (Key does not match content if we 'put' with LATEST_TIMESTAMP).
|
||||
long l = Writables.bytesToLong(value.get(COLUMN));
|
||||
assertTrue(key.getTimestamp() == l ||
|
||||
HConstants.LATEST_TIMESTAMP == l);
|
||||
count++;
|
||||
value.clear();
|
||||
}
|
||||
} finally {
|
||||
scanner.close();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
public static void put(final Incommon loader, final long ts)
|
||||
throws IOException {
|
||||
put(loader, Writables.longToBytes(ts), ts);
|
||||
}
|
||||
|
||||
public static void put(final Incommon loader)
|
||||
throws IOException {
|
||||
long ts = HConstants.LATEST_TIMESTAMP;
|
||||
put(loader, Writables.longToBytes(ts), ts);
|
||||
}
|
||||
|
||||
/*
|
||||
* Put values.
|
||||
* @param loader
|
||||
* @param bytes
|
||||
* @param ts
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void put(final Incommon loader, final byte [] bytes,
|
||||
final long ts)
|
||||
throws IOException {
|
||||
long lockid = loader.startUpdate(ROW);
|
||||
loader.put(lockid, COLUMN, bytes);
|
||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
||||
loader.commit(lockid);
|
||||
} else {
|
||||
loader.commit(lockid, ts);
|
||||
}
|
||||
}
|
||||
|
||||
public static void delete(final Incommon loader) throws IOException {
|
||||
delete(loader, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
public static void delete(final Incommon loader, final long ts) throws IOException {
|
||||
long lockid = loader.startUpdate(ROW);
|
||||
loader.delete(lockid, COLUMN);
|
||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
||||
loader.commit(lockid);
|
||||
} else {
|
||||
loader.commit(lockid, ts);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
import org.apache.hadoop.hbase.TimestampTestBase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
/**
|
||||
* Tests user specifiable time stamps putting, getting and scanning. Also
|
||||
* tests same in presence of deletes. Test cores are written so can be
|
||||
* run against an HRegion and against an HTable: i.e. both local and remote.
|
||||
*/
|
||||
public class TestTimestamp extends HBaseClusterTestCase {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestTimestamp.class.getName());
|
||||
|
||||
private static final String COLUMN_NAME = "contents:";
|
||||
private static final Text COLUMN = new Text(COLUMN_NAME);
|
||||
// When creating column descriptor, how many versions of a cell to allow.
|
||||
private static final int VERSIONS = 3;
|
||||
|
||||
/** constructor */
|
||||
public TestTimestamp() {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Basic test of timestamps.
|
||||
* Do the above tests from client side.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testTimestamps() throws IOException {
|
||||
HTable t = createTable();
|
||||
Incommon incommon = new HTableIncommon(t);
|
||||
TimestampTestBase.doTestDelete(incommon, new FlushCache() {
|
||||
public void flushcache() throws IOException {
|
||||
cluster.flushcache();
|
||||
}
|
||||
});
|
||||
|
||||
// Perhaps drop and readd the table between tests so the former does
|
||||
// not pollute this latter? Or put into separate tests.
|
||||
TimestampTestBase.doTestTimestampScanning(incommon, new FlushCache() {
|
||||
public void flushcache() throws IOException {
|
||||
cluster.flushcache();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a table named TABLE_NAME.
|
||||
* @return An instance of an HTable connected to the created table.
|
||||
* @throws IOException
|
||||
*/
|
||||
private HTable createTable() throws IOException {
|
||||
HTableDescriptor desc = new HTableDescriptor(getName());
|
||||
desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(desc);
|
||||
return new HTable(conf, new Text(getName()));
|
||||
}
|
||||
}
|
|
@ -30,7 +30,6 @@ import junit.textui.TestRunner;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -42,7 +41,6 @@ import org.apache.hadoop.hbase.HScannerInterface;
|
|||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MultiRegionTable;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.io.Text;
|
||||
|
@ -79,13 +77,9 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
|
||||
private HTableDescriptor desc;
|
||||
|
||||
private MiniDFSCluster dfsCluster = null;
|
||||
private Path dir;
|
||||
private MiniHBaseCluster hCluster = null;
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
public TestTableIndex() {
|
||||
// Enable DEBUG-level MR logging.
|
||||
Logger.getLogger("org.apache.hadoop.mapred").setLevel(Level.DEBUG);
|
||||
|
||||
|
@ -103,52 +97,23 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
desc = new HTableDescriptor(TABLE_NAME);
|
||||
desc.addFamily(new HColumnDescriptor(INPUT_COLUMN));
|
||||
desc.addFamily(new HColumnDescriptor(OUTPUT_COLUMN));
|
||||
|
||||
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[]) null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
// Must call super.setUp after mini dfs cluster is started or else
|
||||
// filesystem ends up being local
|
||||
|
||||
super.setUp();
|
||||
|
||||
try {
|
||||
dir = new Path("/hbase");
|
||||
fs.mkdirs(dir);
|
||||
|
||||
// Start up HBase cluster
|
||||
hCluster = new MiniHBaseCluster(conf, 1, dfsCluster, true);
|
||||
|
||||
// Create a table.
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(desc);
|
||||
|
||||
// Populate a table into multiple regions
|
||||
makeMultiRegionTable(conf, hCluster, this.fs, TABLE_NAME, INPUT_COLUMN);
|
||||
|
||||
// Verify table indeed has multiple regions
|
||||
HTable table = new HTable(conf, new Text(TABLE_NAME));
|
||||
Text[] startKeys = table.getStartKeys();
|
||||
assertTrue(startKeys.length > 1);
|
||||
} catch (Exception e) {
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
throw e;
|
||||
}
|
||||
LOG.debug("\n\n\n\n\t\t\tSetup Complete\n\n\n\n");
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
protected void postHBaseClusterSetup() throws Exception {
|
||||
// Create a table.
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(desc);
|
||||
|
||||
if (hCluster != null) {
|
||||
hCluster.shutdown();
|
||||
}
|
||||
// Populate a table into multiple regions
|
||||
makeMultiRegionTable(conf, cluster, dfsCluster.getFileSystem(), TABLE_NAME,
|
||||
INPUT_COLUMN);
|
||||
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
// Verify table indeed has multiple regions
|
||||
HTable table = new HTable(conf, new Text(TABLE_NAME));
|
||||
Text[] startKeys = table.getStartKeys();
|
||||
assertTrue(startKeys.length > 1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -260,7 +225,7 @@ public class TestTableIndex extends MultiRegionTable {
|
|||
private void verify() throws IOException {
|
||||
// Force a cache flush for every online region to ensure that when the
|
||||
// scanner takes its snapshot, all the updates have made it into the cache.
|
||||
for (HRegion r : hCluster.getRegionThreads().get(0).getRegionServer().
|
||||
for (HRegion r : cluster.getRegionThreads().get(0).getRegionServer().
|
||||
getOnlineRegions().values()) {
|
||||
HRegionIncommon region = new HRegionIncommon(r);
|
||||
region.flushcache();
|
||||
|
|
|
@ -26,7 +26,6 @@ import java.util.TreeMap;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -35,7 +34,6 @@ import org.apache.hadoop.hbase.HScannerInterface;
|
|||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.MultiRegionTable;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.io.BatchUpdate;
|
||||
|
@ -68,9 +66,7 @@ public class TestTableMapReduce extends MultiRegionTable {
|
|||
TEXT_OUTPUT_COLUMN
|
||||
};
|
||||
|
||||
private MiniDFSCluster dfsCluster = null;
|
||||
private Path dir;
|
||||
private MiniHBaseCluster hCluster = null;
|
||||
|
||||
private static byte[][] values = null;
|
||||
|
||||
|
@ -110,46 +106,6 @@ public class TestTableMapReduce extends MultiRegionTable {
|
|||
conf.setInt("hbase.client.pause", 10 * 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
dfsCluster = new MiniDFSCluster(conf, 1, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfsCluster.getFileSystem().getHomeDirectory().toString());
|
||||
|
||||
// Must call super.setup() after starting mini dfs cluster. Otherwise
|
||||
// we get a local file system instead of hdfs
|
||||
|
||||
super.setUp();
|
||||
try {
|
||||
dir = new Path("/hbase");
|
||||
fs.mkdirs(dir);
|
||||
// Start up HBase cluster
|
||||
// Only one region server. MultiRegionServer manufacturing code below
|
||||
// depends on there being one region server only.
|
||||
hCluster = new MiniHBaseCluster(conf, 1, dfsCluster, true);
|
||||
LOG.info("Master is at " + this.conf.get(HConstants.MASTER_ADDRESS));
|
||||
} catch (Exception e) {
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
if(hCluster != null) {
|
||||
hCluster.shutdown();
|
||||
}
|
||||
StaticTestEnvironment.shutdownDfs(dfsCluster);
|
||||
}
|
||||
|
||||
/**
|
||||
* Pass the given key and processed record reduce
|
||||
*/
|
||||
|
@ -276,8 +232,8 @@ public class TestTableMapReduce extends MultiRegionTable {
|
|||
admin.createTable(desc);
|
||||
|
||||
// Populate a table into multiple regions
|
||||
makeMultiRegionTable(conf, hCluster, fs, MULTI_REGION_TABLE_NAME,
|
||||
INPUT_COLUMN);
|
||||
makeMultiRegionTable(conf, cluster, dfsCluster.getFileSystem(),
|
||||
MULTI_REGION_TABLE_NAME, INPUT_COLUMN);
|
||||
|
||||
// Verify table indeed has multiple regions
|
||||
HTable table = new HTable(conf, new Text(MULTI_REGION_TABLE_NAME));
|
||||
|
|
|
@ -28,8 +28,7 @@ import org.apache.hadoop.dfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.HBaseClusterTestCase;
|
||||
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
|
@ -40,10 +39,8 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
|
|||
/**
|
||||
* Test log deletion as logs are rolled.
|
||||
*/
|
||||
public class TestLogRolling extends HBaseTestCase {
|
||||
public class TestLogRolling extends HBaseClusterTestCase {
|
||||
private static final Log LOG = LogFactory.getLog(TestLogRolling.class);
|
||||
private MiniDFSCluster dfs;
|
||||
private MiniHBaseCluster cluster;
|
||||
private HRegionServer server;
|
||||
private HLog log;
|
||||
private String tableName;
|
||||
|
@ -54,37 +51,13 @@ public class TestLogRolling extends HBaseTestCase {
|
|||
* @throws Exception
|
||||
*/
|
||||
public TestLogRolling() throws Exception {
|
||||
// start one regionserver and a minidfs.
|
||||
super();
|
||||
try {
|
||||
this.dfs = null;
|
||||
this.cluster = null;
|
||||
this.server = null;
|
||||
this.log = null;
|
||||
this.tableName = null;
|
||||
this.value = null;
|
||||
|
||||
// Force a region split after every 768KB
|
||||
conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
|
||||
|
||||
// We roll the log after every 32 writes
|
||||
conf.setInt("hbase.regionserver.maxlogentries", 32);
|
||||
|
||||
// For less frequently updated regions flush after every 2 flushes
|
||||
conf.setInt("hbase.hregion.memcache.optionalflushcount", 2);
|
||||
|
||||
// We flush the cache after every 8192 bytes
|
||||
conf.setInt("hbase.hregion.memcache.flush.size", 8192);
|
||||
|
||||
// Make lease timeout longer, lease checks less frequent
|
||||
conf.setInt("hbase.master.lease.period", 10 * 1000);
|
||||
conf.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000);
|
||||
|
||||
// Increase the amount of time between client retries
|
||||
conf.setLong("hbase.client.pause", 15 * 1000);
|
||||
|
||||
// Reduce thread wake frequency so that other threads can get
|
||||
// a chance to run.
|
||||
conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
|
||||
|
||||
String className = this.getClass().getName();
|
||||
StringBuilder v = new StringBuilder(className);
|
||||
|
@ -99,50 +72,40 @@ public class TestLogRolling extends HBaseTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
// Need to override this setup so we can edit the config before it gets sent
|
||||
// to the cluster startup.
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
try {
|
||||
dfs = new MiniDFSCluster(conf, 2, true, (String[]) null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.dfs.getFileSystem().getHomeDirectory().toString());
|
||||
super.setUp();
|
||||
} catch (Exception e) {
|
||||
StaticTestEnvironment.shutdownDfs(dfs);
|
||||
LOG.fatal("error during setUp: ", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
protected void preHBaseClusterSetup() {
|
||||
// Force a region split after every 768KB
|
||||
conf.setLong("hbase.hregion.max.filesize", 768L * 1024L);
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
try {
|
||||
super.tearDown();
|
||||
if (cluster != null) { // shutdown mini HBase cluster
|
||||
cluster.shutdown();
|
||||
}
|
||||
StaticTestEnvironment.shutdownDfs(dfs);
|
||||
} catch (Exception e) {
|
||||
LOG.fatal("error in tearDown", e);
|
||||
throw e;
|
||||
}
|
||||
// We roll the log after every 32 writes
|
||||
conf.setInt("hbase.regionserver.maxlogentries", 32);
|
||||
|
||||
// For less frequently updated regions flush after every 2 flushes
|
||||
conf.setInt("hbase.hregion.memcache.optionalflushcount", 2);
|
||||
|
||||
// We flush the cache after every 8192 bytes
|
||||
conf.setInt("hbase.hregion.memcache.flush.size", 8192);
|
||||
|
||||
// Make lease timeout longer, lease checks less frequent
|
||||
conf.setInt("hbase.master.lease.period", 10 * 1000);
|
||||
conf.setInt("hbase.master.lease.thread.wakefrequency", 5 * 1000);
|
||||
|
||||
// Increase the amount of time between client retries
|
||||
conf.setLong("hbase.client.pause", 15 * 1000);
|
||||
|
||||
// Reduce thread wake frequency so that other threads can get
|
||||
// a chance to run.
|
||||
conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
|
||||
}
|
||||
|
||||
private void startAndWriteData() throws Exception {
|
||||
cluster = new MiniHBaseCluster(conf, 1, dfs, true);
|
||||
try {
|
||||
Thread.sleep(10 * 1000); // Wait for region server to start
|
||||
} catch (InterruptedException e) {
|
||||
// continue
|
||||
}
|
||||
// When the META table can be opened, the region servers are running
|
||||
new HTable(conf, HConstants.META_TABLE_NAME);
|
||||
|
||||
this.server = cluster.getRegionThreads().get(0).getRegionServer();
|
||||
this.log = server.getLog();
|
||||
|
||||
// When the META table can be opened, the region servers are running
|
||||
new HTable(conf, HConstants.META_TABLE_NAME);
|
||||
|
||||
// Create the test table and open it
|
||||
HTableDescriptor desc = new HTableDescriptor(tableName);
|
||||
|
|
|
@ -72,13 +72,8 @@ public class TestSplit extends MultiRegionTable {
|
|||
* @throws Exception
|
||||
*/
|
||||
public void testBasicSplit() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
HRegion region = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
cluster.getFileSystem().getHomeDirectory().toString());
|
||||
HTableDescriptor htd = createTableDescriptor(getName());
|
||||
region = createNewHRegion(htd, null, null);
|
||||
basicSplit(region);
|
||||
|
@ -87,9 +82,6 @@ public class TestSplit extends MultiRegionTable {
|
|||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
if (cluster != null) {
|
||||
StaticTestEnvironment.shutdownDfs(cluster);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,22 +21,22 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import java.io.IOException;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.dfs.MiniDFSCluster;
|
||||
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor.CompressionType;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HStoreKey;
|
||||
import org.apache.hadoop.hbase.StaticTestEnvironment;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.HScannerInterface;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.io.Cell;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.TimestampTestBase;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
/**
|
||||
* Tests user specifiable time stamps putting, getting and scanning. Also
|
||||
|
@ -44,44 +44,18 @@ import org.apache.hadoop.hbase.io.Cell;
|
|||
* run against an HRegion and against an HTable: i.e. both local and remote.
|
||||
*/
|
||||
public class TestTimestamp extends HBaseTestCase {
|
||||
private static final long T0 = 10L;
|
||||
private static final long T1 = 100L;
|
||||
private static final long T2 = 200L;
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestTimestamp.class.getName());
|
||||
|
||||
private static final String COLUMN_NAME = "contents:";
|
||||
|
||||
private static final Text COLUMN = new Text(COLUMN_NAME);
|
||||
private static final Text ROW = new Text("row");
|
||||
|
||||
// When creating column descriptor, how many versions of a cell to allow.
|
||||
private static final int VERSIONS = 3;
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
|
||||
/** constructor */
|
||||
public TestTimestamp() {
|
||||
super();
|
||||
this.cluster = null;
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
this.cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
|
||||
// Set the hbase.rootdir to be the home directory in mini dfs.
|
||||
this.conf.set(HConstants.HBASE_DIR,
|
||||
this.cluster.getFileSystem().getHomeDirectory().toString());
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
/** {@inheritDoc} */
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
if (this.cluster != null) {
|
||||
StaticTestEnvironment.shutdownDfs(cluster);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that delete works according to description in <a
|
||||
* href="https://issues.apache.org/jira/browse/HADOOP-1784">hadoop-1784</a>.
|
||||
|
@ -91,11 +65,12 @@ public class TestTimestamp extends HBaseTestCase {
|
|||
final HRegion r = createRegion();
|
||||
try {
|
||||
final HRegionIncommon region = new HRegionIncommon(r);
|
||||
doTestDelete(region, region);
|
||||
TimestampTestBase.doTestDelete(region, region);
|
||||
} finally {
|
||||
r.close();
|
||||
r.getLog().closeAndDelete();
|
||||
}
|
||||
LOG.info("testDelete() finished");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -106,246 +81,12 @@ public class TestTimestamp extends HBaseTestCase {
|
|||
final HRegion r = createRegion();
|
||||
try {
|
||||
final HRegionIncommon region = new HRegionIncommon(r);
|
||||
doTestTimestampScanning(region, region);
|
||||
TimestampTestBase.doTestTimestampScanning(region, region);
|
||||
} finally {
|
||||
r.close();
|
||||
r.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Basic test of timestamps.
|
||||
* Do the above tests from client side.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void testTimestamps() throws IOException {
|
||||
final MiniHBaseCluster cluster =
|
||||
new MiniHBaseCluster(this.conf, 1, this.cluster, true);
|
||||
try {
|
||||
HTable t = createTable();
|
||||
Incommon incommon = new HTableIncommon(t);
|
||||
doTestDelete(incommon, new FlushCache() {
|
||||
public void flushcache() throws IOException {
|
||||
cluster.flushcache();
|
||||
}
|
||||
});
|
||||
|
||||
// Perhaps drop and readd the table between tests so the former does
|
||||
// not pollute this latter? Or put into separate tests.
|
||||
doTestTimestampScanning(incommon, new FlushCache() {
|
||||
public void flushcache() throws IOException {
|
||||
cluster.flushcache();
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Run test that delete works according to description in <a
|
||||
* href="https://issues.apache.org/jira/browse/HADOOP-1784">hadoop-1784</a>.
|
||||
* @param incommon
|
||||
* @param flusher
|
||||
* @throws IOException
|
||||
*/
|
||||
private void doTestDelete(final Incommon incommon, FlushCache flusher)
|
||||
throws IOException {
|
||||
// Add values at various timestamps (Values are timestampes as bytes).
|
||||
put(incommon, T0);
|
||||
put(incommon, T1);
|
||||
put(incommon, T2);
|
||||
put(incommon);
|
||||
// Verify that returned versions match passed timestamps.
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
|
||||
// If I delete w/o specifying a timestamp, this means I'm deleting the
|
||||
// latest.
|
||||
delete(incommon);
|
||||
// Verify that I get back T2 through T1 -- that the latest version has
|
||||
// been deleted.
|
||||
assertVersions(incommon, new long [] {T2, T1, T0});
|
||||
|
||||
// Flush everything out to disk and then retry
|
||||
flusher.flushcache();
|
||||
assertVersions(incommon, new long [] {T2, T1, T0});
|
||||
|
||||
// Now add, back a latest so I can test remove other than the latest.
|
||||
put(incommon);
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T2, T1});
|
||||
delete(incommon, T2);
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
|
||||
// Flush everything out to disk and then retry
|
||||
flusher.flushcache();
|
||||
assertVersions(incommon, new long [] {HConstants.LATEST_TIMESTAMP, T1, T0});
|
||||
|
||||
// Now try deleting all from T2 back inclusive (We first need to add T2
|
||||
// back into the mix and to make things a little interesting, delete and
|
||||
// then readd T1.
|
||||
put(incommon, T2);
|
||||
delete(incommon, T1);
|
||||
put(incommon, T1);
|
||||
incommon.deleteAll(ROW, COLUMN, T2);
|
||||
// Should only be current value in set. Assert this is so
|
||||
assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
|
||||
|
||||
// Flush everything out to disk and then redo above tests
|
||||
flusher.flushcache();
|
||||
assertOnlyLatest(incommon, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
private void assertOnlyLatest(final Incommon incommon,
|
||||
final long currentTime)
|
||||
throws IOException {
|
||||
Cell[] cellValues = incommon.get(ROW, COLUMN, 3/*Ask for too much*/);
|
||||
assertEquals(1, cellValues.length);
|
||||
long time = Writables.bytesToLong(cellValues[0].getValue());
|
||||
assertEquals(time, currentTime);
|
||||
assertNull(incommon.get(ROW, COLUMN, T1, 3 /*Too many*/));
|
||||
assertTrue(assertScanContentTimestamp(incommon, T1) == 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Assert that returned versions match passed in timestamps and that results
|
||||
* are returned in the right order. Assert that values when converted to
|
||||
* longs match the corresponding passed timestamp.
|
||||
* @param r
|
||||
* @param tss
|
||||
* @throws IOException
|
||||
*/
|
||||
private void assertVersions(final Incommon incommon, final long [] tss)
|
||||
throws IOException {
|
||||
// Assert that 'latest' is what we expect.
|
||||
byte [] bytes = incommon.get(ROW, COLUMN).getValue();
|
||||
assertEquals(Writables.bytesToLong(bytes), tss[0]);
|
||||
// Now assert that if we ask for multiple versions, that they come out in
|
||||
// order.
|
||||
Cell[] cellValues = incommon.get(ROW, COLUMN, tss.length);
|
||||
assertEquals(tss.length, cellValues.length);
|
||||
for (int i = 0; i < cellValues.length; i++) {
|
||||
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||
assertEquals(ts, tss[i]);
|
||||
}
|
||||
// Specify a timestamp get multiple versions.
|
||||
cellValues = incommon.get(ROW, COLUMN, tss[0], cellValues.length - 1);
|
||||
for (int i = 1; i < cellValues.length; i++) {
|
||||
long ts = Writables.bytesToLong(cellValues[i].getValue());
|
||||
assertEquals(ts, tss[i]);
|
||||
}
|
||||
// Test scanner returns expected version
|
||||
assertScanContentTimestamp(incommon, tss[0]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Run test scanning different timestamps.
|
||||
* @param incommon
|
||||
* @param flusher
|
||||
* @throws IOException
|
||||
*/
|
||||
private void doTestTimestampScanning(final Incommon incommon,
|
||||
final FlushCache flusher)
|
||||
throws IOException {
|
||||
// Add a couple of values for three different timestamps.
|
||||
put(incommon, T0);
|
||||
put(incommon, T1);
|
||||
put(incommon, HConstants.LATEST_TIMESTAMP);
|
||||
// Get count of latest items.
|
||||
int count = assertScanContentTimestamp(incommon,
|
||||
HConstants.LATEST_TIMESTAMP);
|
||||
// Assert I get same count when I scan at each timestamp.
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T0));
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T1));
|
||||
// Flush everything out to disk and then retry
|
||||
flusher.flushcache();
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T0));
|
||||
assertEquals(count, assertScanContentTimestamp(incommon, T1));
|
||||
}
|
||||
|
||||
/*
|
||||
* Assert that the scan returns only values < timestamp.
|
||||
* @param r
|
||||
* @param ts
|
||||
* @return Count of items scanned.
|
||||
* @throws IOException
|
||||
*/
|
||||
private int assertScanContentTimestamp(final Incommon in, final long ts)
|
||||
throws IOException {
|
||||
HScannerInterface scanner =
|
||||
in.getScanner(COLUMNS, HConstants.EMPTY_START_ROW, ts);
|
||||
int count = 0;
|
||||
try {
|
||||
HStoreKey key = new HStoreKey();
|
||||
TreeMap<Text, byte []>value = new TreeMap<Text, byte[]>();
|
||||
while (scanner.next(key, value)) {
|
||||
assertTrue(key.getTimestamp() <= ts);
|
||||
// Content matches the key or HConstants.LATEST_TIMESTAMP.
|
||||
// (Key does not match content if we 'put' with LATEST_TIMESTAMP).
|
||||
long l = Writables.bytesToLong(value.get(COLUMN));
|
||||
assertTrue(key.getTimestamp() == l ||
|
||||
HConstants.LATEST_TIMESTAMP == l);
|
||||
count++;
|
||||
value.clear();
|
||||
}
|
||||
} finally {
|
||||
scanner.close();
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
private void put(final Incommon loader, final long ts)
|
||||
throws IOException {
|
||||
put(loader, Writables.longToBytes(ts), ts);
|
||||
}
|
||||
|
||||
private void put(final Incommon loader)
|
||||
throws IOException {
|
||||
long ts = HConstants.LATEST_TIMESTAMP;
|
||||
put(loader, Writables.longToBytes(ts), ts);
|
||||
}
|
||||
|
||||
/*
|
||||
* Put values.
|
||||
* @param loader
|
||||
* @param bytes
|
||||
* @param ts
|
||||
* @throws IOException
|
||||
*/
|
||||
private void put(final Incommon loader, final byte [] bytes,
|
||||
final long ts)
|
||||
throws IOException {
|
||||
long lockid = loader.startUpdate(ROW);
|
||||
loader.put(lockid, COLUMN, bytes);
|
||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
||||
loader.commit(lockid);
|
||||
} else {
|
||||
loader.commit(lockid, ts);
|
||||
}
|
||||
}
|
||||
|
||||
private void delete(final Incommon loader) throws IOException {
|
||||
delete(loader, HConstants.LATEST_TIMESTAMP);
|
||||
}
|
||||
|
||||
private void delete(final Incommon loader, final long ts) throws IOException {
|
||||
long lockid = loader.startUpdate(ROW);
|
||||
loader.delete(lockid, COLUMN);
|
||||
if (ts == HConstants.LATEST_TIMESTAMP) {
|
||||
loader.commit(lockid);
|
||||
} else {
|
||||
loader.commit(lockid, ts);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a table named TABLE_NAME.
|
||||
* @return An instance of an HTable connected to the created table.
|
||||
* @throws IOException
|
||||
*/
|
||||
private HTable createTable() throws IOException {
|
||||
HTableDescriptor desc = new HTableDescriptor(getName());
|
||||
desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
|
||||
HBaseAdmin admin = new HBaseAdmin(conf);
|
||||
admin.createTable(desc);
|
||||
return new HTable(conf, new Text(getName()));
|
||||
LOG.info("testTimestampScanning() finished");
|
||||
}
|
||||
|
||||
private HRegion createRegion() throws IOException {
|
||||
|
|
Loading…
Reference in New Issue