HBASE-5833 0.92 build has been failing pretty consistently on TestMasterFailover
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1329400 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
f8c7f1b0fb
commit
fabaf478ed
@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.HasThread;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
@ -579,16 +580,17 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
||||
*/
|
||||
private static class EvictionThread extends HasThread {
|
||||
private WeakReference<LruBlockCache> cache;
|
||||
private boolean go = true;
|
||||
|
||||
public EvictionThread(LruBlockCache cache) {
|
||||
super("LruBlockCache.EvictionThread");
|
||||
super(Thread.currentThread().getName() + ".LruBlockCache.EvictionThread");
|
||||
setDaemon(true);
|
||||
this.cache = new WeakReference<LruBlockCache>(cache);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while(true) {
|
||||
while (this.go) {
|
||||
synchronized(this) {
|
||||
try {
|
||||
this.wait();
|
||||
@ -599,11 +601,17 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
||||
cache.evict();
|
||||
}
|
||||
}
|
||||
|
||||
public void evict() {
|
||||
synchronized(this) {
|
||||
this.notify(); // FindBugs NN_NAKED_NOTIFY
|
||||
}
|
||||
}
|
||||
|
||||
void shutdown() {
|
||||
this.go = false;
|
||||
interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -727,6 +735,14 @@ public class LruBlockCache implements BlockCache, HeapSize {
|
||||
|
||||
public void shutdown() {
|
||||
this.scheduleThreadPool.shutdown();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
if (!this.scheduleThreadPool.isShutdown()) Threads.sleep(10);
|
||||
}
|
||||
if (!this.scheduleThreadPool.isShutdown()) {
|
||||
List<Runnable> runnables = this.scheduleThreadPool.shutdownNow();
|
||||
LOG.debug("Still running " + runnables);
|
||||
}
|
||||
this.evictionThread.shutdown();
|
||||
}
|
||||
|
||||
/** Clears the cache. Used in tests. */
|
||||
|
@ -43,8 +43,8 @@ public class ExponentiallyDecayingSample implements Sample {
|
||||
private static final long RESCALE_THRESHOLD = TimeUnit.HOURS.toNanos(1);
|
||||
|
||||
private static final ScheduledExecutorService TICK_SERVICE =
|
||||
Executors.newScheduledThreadPool(1,
|
||||
getNamedDaemonThreadFactory("decayingSampleTick"));
|
||||
Executors.newScheduledThreadPool(1,
|
||||
getNamedDaemonThreadFactory(Thread.currentThread().getName() + ".decayingSampleTick."));
|
||||
|
||||
private static volatile long CURRENT_TICK =
|
||||
TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis());
|
||||
|
@ -3525,7 +3525,11 @@ public class HRegion implements HeapSize { // , Writable{
|
||||
* bootstrap code in the HMaster constructor.
|
||||
* Note, this method creates an {@link HLog} for the created region. It
|
||||
* needs to be closed explicitly. Use {@link HRegion#getLog()} to get
|
||||
* access.
|
||||
* access. <b>When done with a region created using this method, you will
|
||||
* need to explicitly close the {@link HLog} it created too; it will not be
|
||||
* done for you. Not closing the log will leave at least a daemon thread
|
||||
* running.</b> Call {@link #closeHRegion(HRegion)} and it will do
|
||||
* necessary cleanup for you.
|
||||
* @param info Info for region to create.
|
||||
* @param rootDir Root directory for HBase instance
|
||||
* @param conf
|
||||
@ -3540,6 +3544,23 @@ public class HRegion implements HeapSize { // , Writable{
|
||||
return createHRegion(info, rootDir, conf, hTableDescriptor, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* This will do the necessary cleanup a call to {@link #createHRegion(HRegionInfo, Path, Configuration, HTableDescriptor)}
|
||||
* requires. This method will close the region and then close its
|
||||
* associated {@link HLog} file. You use it if you call the other createHRegion,
|
||||
* the one that takes an {@link HLog} instance but don't be surprised by the
|
||||
* call to the {@link HLog#closeAndDelete()} on the {@link HLog} the
|
||||
* HRegion was carrying.
|
||||
* @param r
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void closeHRegion(final HRegion r) throws IOException {
|
||||
if (r == null) return;
|
||||
r.close();
|
||||
if (r.getLog() == null) return;
|
||||
r.getLog().closeAndDelete();
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method creating new HRegions. Used by createTable.
|
||||
* The {@link HLog} for the created region needs to be closed explicitly.
|
||||
|
@ -223,13 +223,17 @@ public class JVMClusterUtil {
|
||||
final List<RegionServerThread> regionservers) {
|
||||
LOG.debug("Shutting down HBase Cluster");
|
||||
if (masters != null) {
|
||||
// Do backups first.
|
||||
JVMClusterUtil.MasterThread activeMaster = null;
|
||||
for (JVMClusterUtil.MasterThread t : masters) {
|
||||
if (t.master.isActiveMaster()) {
|
||||
t.master.shutdown();
|
||||
} else {
|
||||
if (!t.master.isActiveMaster()) {
|
||||
t.master.stopMaster();
|
||||
} else {
|
||||
activeMaster = t;
|
||||
}
|
||||
}
|
||||
// Do active after.
|
||||
if (activeMaster != null) activeMaster.master.shutdown();
|
||||
}
|
||||
if (regionservers != null) {
|
||||
for (RegionServerThread t : regionservers) {
|
||||
|
@ -151,6 +151,16 @@ public abstract class HBaseTestCase extends TestCase {
|
||||
return testUtil.getDataTestDir(testName);
|
||||
}
|
||||
|
||||
/**
|
||||
* You must call close on the returned region and then close on the log file
|
||||
* it created. Do {@link HRegion#close()} followed by {@link HRegion#getLog()}
|
||||
* and on it call close.
|
||||
* @param desc
|
||||
* @param startKey
|
||||
* @param endKey
|
||||
* @return An {@link HRegion}
|
||||
* @throws IOException
|
||||
*/
|
||||
protected HRegion createNewHRegion(HTableDescriptor desc, byte [] startKey,
|
||||
byte [] endKey)
|
||||
throws IOException {
|
||||
@ -636,6 +646,11 @@ public abstract class HBaseTestCase extends TestCase {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* You must call {@link #closeRootAndMeta()} when done after calling this
|
||||
* method. It does cleanup.
|
||||
* @throws IOException
|
||||
*/
|
||||
protected void createRootAndMetaRegions() throws IOException {
|
||||
root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, testDir,
|
||||
conf, HTableDescriptor.ROOT_TABLEDESC);
|
||||
|
@ -50,52 +50,56 @@ public class TestColumnPrefixFilter {
|
||||
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
|
||||
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
|
||||
try {
|
||||
List<String> rows = generateRandomWords(100, "row");
|
||||
List<String> columns = generateRandomWords(10000, "column");
|
||||
long maxTimestamp = 2;
|
||||
|
||||
List<String> rows = generateRandomWords(100, "row");
|
||||
List<String> columns = generateRandomWords(10000, "column");
|
||||
long maxTimestamp = 2;
|
||||
List<KeyValue> kvList = new ArrayList<KeyValue>();
|
||||
|
||||
List<KeyValue> kvList = new ArrayList<KeyValue>();
|
||||
Map<String, List<KeyValue>> prefixMap = new HashMap<String,
|
||||
List<KeyValue>>();
|
||||
|
||||
Map<String, List<KeyValue>> prefixMap = new HashMap<String,
|
||||
List<KeyValue>>();
|
||||
prefixMap.put("p", new ArrayList<KeyValue>());
|
||||
prefixMap.put("s", new ArrayList<KeyValue>());
|
||||
|
||||
prefixMap.put("p", new ArrayList<KeyValue>());
|
||||
prefixMap.put("s", new ArrayList<KeyValue>());
|
||||
String valueString = "ValueString";
|
||||
|
||||
String valueString = "ValueString";
|
||||
|
||||
for (String row: rows) {
|
||||
Put p = new Put(Bytes.toBytes(row));
|
||||
p.setWriteToWAL(false);
|
||||
for (String column: columns) {
|
||||
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
|
||||
KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
|
||||
valueString);
|
||||
p.add(kv);
|
||||
kvList.add(kv);
|
||||
for (String s: prefixMap.keySet()) {
|
||||
if (column.startsWith(s)) {
|
||||
prefixMap.get(s).add(kv);
|
||||
for (String row: rows) {
|
||||
Put p = new Put(Bytes.toBytes(row));
|
||||
p.setWriteToWAL(false);
|
||||
for (String column: columns) {
|
||||
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
|
||||
KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
|
||||
valueString);
|
||||
p.add(kv);
|
||||
kvList.add(kv);
|
||||
for (String s: prefixMap.keySet()) {
|
||||
if (column.startsWith(s)) {
|
||||
prefixMap.get(s).add(kv);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
region.put(p);
|
||||
}
|
||||
region.put(p);
|
||||
}
|
||||
|
||||
ColumnPrefixFilter filter;
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxVersions();
|
||||
for (String s: prefixMap.keySet()) {
|
||||
filter = new ColumnPrefixFilter(Bytes.toBytes(s));
|
||||
ColumnPrefixFilter filter;
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxVersions();
|
||||
for (String s: prefixMap.keySet()) {
|
||||
filter = new ColumnPrefixFilter(Bytes.toBytes(s));
|
||||
|
||||
scan.setFilter(filter);
|
||||
scan.setFilter(filter);
|
||||
|
||||
InternalScanner scanner = region.getScanner(scan);
|
||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||
while(scanner.next(results));
|
||||
assertEquals(prefixMap.get(s).size(), results.size());
|
||||
InternalScanner scanner = region.getScanner(scan);
|
||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||
while(scanner.next(results));
|
||||
assertEquals(prefixMap.get(s).size(), results.size());
|
||||
}
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
|
||||
region.close();
|
||||
@ -110,55 +114,59 @@ public class TestColumnPrefixFilter {
|
||||
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
|
||||
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
|
||||
try {
|
||||
List<String> rows = generateRandomWords(100, "row");
|
||||
List<String> columns = generateRandomWords(10000, "column");
|
||||
long maxTimestamp = 2;
|
||||
|
||||
List<String> rows = generateRandomWords(100, "row");
|
||||
List<String> columns = generateRandomWords(10000, "column");
|
||||
long maxTimestamp = 2;
|
||||
List<KeyValue> kvList = new ArrayList<KeyValue>();
|
||||
|
||||
List<KeyValue> kvList = new ArrayList<KeyValue>();
|
||||
Map<String, List<KeyValue>> prefixMap = new HashMap<String,
|
||||
List<KeyValue>>();
|
||||
|
||||
Map<String, List<KeyValue>> prefixMap = new HashMap<String,
|
||||
List<KeyValue>>();
|
||||
prefixMap.put("p", new ArrayList<KeyValue>());
|
||||
prefixMap.put("s", new ArrayList<KeyValue>());
|
||||
|
||||
prefixMap.put("p", new ArrayList<KeyValue>());
|
||||
prefixMap.put("s", new ArrayList<KeyValue>());
|
||||
String valueString = "ValueString";
|
||||
|
||||
String valueString = "ValueString";
|
||||
|
||||
for (String row: rows) {
|
||||
Put p = new Put(Bytes.toBytes(row));
|
||||
p.setWriteToWAL(false);
|
||||
for (String column: columns) {
|
||||
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
|
||||
KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
|
||||
valueString);
|
||||
p.add(kv);
|
||||
kvList.add(kv);
|
||||
for (String s: prefixMap.keySet()) {
|
||||
if (column.startsWith(s)) {
|
||||
prefixMap.get(s).add(kv);
|
||||
for (String row: rows) {
|
||||
Put p = new Put(Bytes.toBytes(row));
|
||||
p.setWriteToWAL(false);
|
||||
for (String column: columns) {
|
||||
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
|
||||
KeyValue kv = KeyValueTestUtil.create(row, family, column, timestamp,
|
||||
valueString);
|
||||
p.add(kv);
|
||||
kvList.add(kv);
|
||||
for (String s: prefixMap.keySet()) {
|
||||
if (column.startsWith(s)) {
|
||||
prefixMap.get(s).add(kv);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
region.put(p);
|
||||
}
|
||||
region.put(p);
|
||||
}
|
||||
|
||||
ColumnPrefixFilter filter;
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxVersions();
|
||||
for (String s: prefixMap.keySet()) {
|
||||
filter = new ColumnPrefixFilter(Bytes.toBytes(s));
|
||||
ColumnPrefixFilter filter;
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxVersions();
|
||||
for (String s: prefixMap.keySet()) {
|
||||
filter = new ColumnPrefixFilter(Bytes.toBytes(s));
|
||||
|
||||
//this is how this test differs from the one above
|
||||
FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
|
||||
filterList.addFilter(filter);
|
||||
scan.setFilter(filterList);
|
||||
//this is how this test differs from the one above
|
||||
FilterList filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL);
|
||||
filterList.addFilter(filter);
|
||||
scan.setFilter(filterList);
|
||||
|
||||
InternalScanner scanner = region.getScanner(scan);
|
||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||
while(scanner.next(results));
|
||||
assertEquals(prefixMap.get(s).size(), results.size());
|
||||
InternalScanner scanner = region.getScanner(scan);
|
||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||
while(scanner.next(results));
|
||||
assertEquals(prefixMap.get(s).size(), results.size());
|
||||
}
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
|
||||
region.close();
|
||||
|
@ -24,6 +24,7 @@ import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
@ -56,6 +57,55 @@ import org.junit.experimental.categories.Category;
|
||||
public class TestMasterFailover {
|
||||
private static final Log LOG = LogFactory.getLog(TestMasterFailover.class);
|
||||
|
||||
@Test (timeout=180000)
|
||||
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
|
||||
throws Exception {
|
||||
LOG.info("Starting testShouldCheckMasterFailOverWhenMETAIsInOpenedState");
|
||||
final int NUM_MASTERS = 1;
|
||||
final int NUM_RS = 2;
|
||||
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000);
|
||||
conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000);
|
||||
// Start the cluster
|
||||
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
|
||||
|
||||
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
|
||||
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
||||
|
||||
// Find regionserver carrying meta.
|
||||
List<RegionServerThread> regionServerThreads =
|
||||
cluster.getRegionServerThreads();
|
||||
int count = -1;
|
||||
HRegion metaRegion = null;
|
||||
for (RegionServerThread regionServerThread : regionServerThreads) {
|
||||
HRegionServer regionServer = regionServerThread.getRegionServer();
|
||||
metaRegion = regionServer.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
|
||||
count++;
|
||||
regionServer.abort("");
|
||||
if (null != metaRegion) break;
|
||||
}
|
||||
HRegionServer regionServer = cluster.getRegionServer(count);
|
||||
|
||||
TEST_UTIL.shutdownMiniHBaseCluster();
|
||||
|
||||
// Create a ZKW to use in the test
|
||||
ZooKeeperWatcher zkw =
|
||||
HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
|
||||
metaRegion, regionServer.getServerName());
|
||||
|
||||
LOG.info("Staring cluster for second time");
|
||||
TEST_UTIL.startMiniHBaseCluster(NUM_MASTERS, NUM_RS);
|
||||
|
||||
// Failover should be completed, now wait for no RIT
|
||||
log("Waiting for no more RIT");
|
||||
ZKAssign.blockUntilNoRIT(zkw);
|
||||
|
||||
zkw.close();
|
||||
// Stop the cluster
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple test of master failover.
|
||||
* <p>
|
||||
@ -101,6 +151,7 @@ public class TestMasterFailover {
|
||||
}
|
||||
assertEquals(1, numActive);
|
||||
assertEquals(NUM_MASTERS, masterThreads.size());
|
||||
LOG.info("Active master " + activeName);
|
||||
|
||||
// Check that ClusterStatus reports the correct active and backup masters
|
||||
assertNotNull(active);
|
||||
@ -110,16 +161,16 @@ public class TestMasterFailover {
|
||||
assertEquals(2, status.getBackupMasters().size());
|
||||
|
||||
// attempt to stop one of the inactive masters
|
||||
LOG.debug("\n\nStopping a backup master\n");
|
||||
int backupIndex = (activeIndex == 0 ? 1 : activeIndex - 1);
|
||||
HMaster master = cluster.getMaster(backupIndex);
|
||||
LOG.debug("\n\nStopping a backup master: " + master.getServerName() + "\n");
|
||||
cluster.stopMaster(backupIndex, false);
|
||||
cluster.waitOnMaster(backupIndex);
|
||||
|
||||
// verify still one active master and it's the same
|
||||
// Verify still one active master and it's the same
|
||||
for (int i = 0; i < masterThreads.size(); i++) {
|
||||
if (masterThreads.get(i).getMaster().isActiveMaster()) {
|
||||
assertTrue(activeName.equals(
|
||||
masterThreads.get(i).getMaster().getServerName()));
|
||||
assertTrue(activeName.equals(masterThreads.get(i).getMaster().getServerName()));
|
||||
activeIndex = i;
|
||||
active = masterThreads.get(activeIndex).getMaster();
|
||||
}
|
||||
@ -127,7 +178,7 @@ public class TestMasterFailover {
|
||||
assertEquals(1, numActive);
|
||||
assertEquals(2, masterThreads.size());
|
||||
int rsCount = masterThreads.get(activeIndex).getMaster().getClusterStatus().getServersSize();
|
||||
LOG.info("Active master managing " + rsCount + " regions servers");
|
||||
LOG.info("Active master " + active.getServerName() + " managing " + rsCount + " regions servers");
|
||||
assertEquals(3, rsCount);
|
||||
|
||||
// Check that ClusterStatus reports the correct active and backup masters
|
||||
@ -138,7 +189,7 @@ public class TestMasterFailover {
|
||||
assertEquals(1, status.getBackupMasters().size());
|
||||
|
||||
// kill the active master
|
||||
LOG.debug("\n\nStopping the active master\n");
|
||||
LOG.debug("\n\nStopping the active master " + active.getServerName() + "\n");
|
||||
cluster.stopMaster(activeIndex, false);
|
||||
cluster.waitOnMaster(activeIndex);
|
||||
|
||||
@ -159,7 +210,7 @@ public class TestMasterFailover {
|
||||
assertEquals(0, status.getBackupMastersSize());
|
||||
assertEquals(0, status.getBackupMasters().size());
|
||||
int rss = status.getServersSize();
|
||||
LOG.info("Active master " + mastername.getHostname() + " managing " +
|
||||
LOG.info("Active master " + mastername.getServerName() + " managing " +
|
||||
rss + " region servers");
|
||||
assertEquals(3, rss);
|
||||
|
||||
@ -167,83 +218,6 @@ public class TestMasterFailover {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testShouldCheckMasterFailOverWhenMETAIsInOpenedState()
|
||||
throws Exception {
|
||||
final int NUM_MASTERS = 1;
|
||||
final int NUM_RS = 2;
|
||||
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
conf.setInt("hbase.master.assignment.timeoutmonitor.period", 2000);
|
||||
conf.setInt("hbase.master.assignment.timeoutmonitor.timeout", 8000);
|
||||
// Start the cluster
|
||||
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
|
||||
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
|
||||
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
||||
|
||||
// get all the master threads
|
||||
List<MasterThread> masterThreads = cluster.getMasterThreads();
|
||||
|
||||
// wait for each to come online
|
||||
for (MasterThread mt : masterThreads) {
|
||||
assertTrue(mt.isAlive());
|
||||
}
|
||||
|
||||
// verify only one is the active master and we have right number
|
||||
int numActive = 0;
|
||||
ServerName activeName = null;
|
||||
for (int i = 0; i < masterThreads.size(); i++) {
|
||||
if (masterThreads.get(i).getMaster().isActiveMaster()) {
|
||||
numActive++;
|
||||
activeName = masterThreads.get(i).getMaster().getServerName();
|
||||
}
|
||||
}
|
||||
assertEquals(1, numActive);
|
||||
assertEquals(NUM_MASTERS, masterThreads.size());
|
||||
|
||||
// verify still one active master and it's the same
|
||||
for (int i = 0; i < masterThreads.size(); i++) {
|
||||
if (masterThreads.get(i).getMaster().isActiveMaster()) {
|
||||
assertTrue(activeName.equals(masterThreads.get(i).getMaster()
|
||||
.getServerName()));
|
||||
}
|
||||
}
|
||||
assertEquals(1, numActive);
|
||||
assertEquals(1, masterThreads.size());
|
||||
|
||||
List<RegionServerThread> regionServerThreads = cluster
|
||||
.getRegionServerThreads();
|
||||
int count = -1;
|
||||
HRegion metaRegion = null;
|
||||
for (RegionServerThread regionServerThread : regionServerThreads) {
|
||||
HRegionServer regionServer = regionServerThread.getRegionServer();
|
||||
metaRegion = regionServer
|
||||
.getOnlineRegion(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
|
||||
count++;
|
||||
regionServer.abort("");
|
||||
if (null != metaRegion) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
HRegionServer regionServer = cluster.getRegionServer(count);
|
||||
|
||||
cluster.shutdown();
|
||||
// Create a ZKW to use in the test
|
||||
ZooKeeperWatcher zkw =
|
||||
HBaseTestingUtility.createAndForceNodeToOpenedState(TEST_UTIL,
|
||||
metaRegion, regionServer.getServerName());
|
||||
|
||||
TEST_UTIL.startMiniHBaseCluster(1, 1);
|
||||
|
||||
// Failover should be completed, now wait for no RIT
|
||||
log("Waiting for no more RIT");
|
||||
ZKAssign.blockUntilNoRIT(zkw);
|
||||
|
||||
// Stop the cluster
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Complex test of master failover that tests as many permutations of the
|
||||
* different possible states that regions in transition could be in within ZK.
|
||||
@ -379,7 +353,7 @@ public class TestMasterFailover {
|
||||
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
|
||||
|
||||
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
|
||||
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||
createRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||
|
||||
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
|
||||
@ -390,7 +364,7 @@ public class TestMasterFailover {
|
||||
// Write the .tableinfo
|
||||
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
|
||||
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
||||
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||
createRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
|
||||
|
||||
@ -692,7 +666,7 @@ public class TestMasterFailover {
|
||||
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
|
||||
HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
|
||||
null, null);
|
||||
HRegion.createHRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||
createRegion(hriEnabled, rootdir, conf, htdEnabled);
|
||||
|
||||
List<HRegionInfo> enabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||
TEST_UTIL.getConfiguration(), htdEnabled, SPLIT_KEYS);
|
||||
@ -703,7 +677,7 @@ public class TestMasterFailover {
|
||||
// Write the .tableinfo
|
||||
FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled);
|
||||
HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null);
|
||||
HRegion.createHRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||
createRegion(hriDisabled, rootdir, conf, htdDisabled);
|
||||
|
||||
List<HRegionInfo> disabledRegions = TEST_UTIL.createMultiRegionsInMeta(
|
||||
TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS);
|
||||
@ -1024,6 +998,19 @@ public class TestMasterFailover {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
HRegion createRegion(final HRegionInfo hri, final Path rootdir, final Configuration c,
|
||||
final HTableDescriptor htd)
|
||||
throws IOException {
|
||||
HRegion r = HRegion.createHRegion(hri, rootdir, c, htd);
|
||||
// The above call to create a region will create an hlog file. Each
|
||||
// log file create will also create a running thread to do syncing. We need
|
||||
// to close out this log else we will have a running thread trying to sync
|
||||
// the file system continuously which is ugly when dfs is taken away at the
|
||||
// end of the test.
|
||||
HRegion.closeHRegion(r);
|
||||
return r;
|
||||
}
|
||||
|
||||
// TODO: Next test to add is with testing permutations of the RIT or the RS
|
||||
// killed are hosting ROOT and META regions.
|
||||
|
||||
|
@ -112,6 +112,7 @@ public class TestOpenedRegionHandler {
|
||||
@Test
|
||||
public void testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches()
|
||||
throws Exception {
|
||||
HRegion region = null;
|
||||
try {
|
||||
int testIndex = 0;
|
||||
TEST_UTIL.startMiniZKCluster();
|
||||
@ -120,8 +121,7 @@ public class TestOpenedRegionHandler {
|
||||
"testShouldNotCompeleteOpenedRegionSuccessfullyIfVersionMismatches");
|
||||
HRegionInfo hri = new HRegionInfo(htd.getName(),
|
||||
Bytes.toBytes(testIndex), Bytes.toBytes(testIndex + 1));
|
||||
HRegion region = HRegion.createHRegion(hri, TEST_UTIL
|
||||
.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
|
||||
region = HRegion.createHRegion(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
|
||||
assertNotNull(region);
|
||||
AssignmentManager am = Mockito.mock(AssignmentManager.class);
|
||||
when(am.isRegionInTransition(hri)).thenReturn(
|
||||
@ -160,6 +160,8 @@ public class TestOpenedRegionHandler {
|
||||
assertEquals("The region should not be opened successfully.", regionName,
|
||||
region.getRegionInfo().getEncodedName());
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
TEST_UTIL.shutdownMiniZKCluster();
|
||||
}
|
||||
}
|
||||
|
@ -85,7 +85,16 @@ public class TestBlocksRead extends HBaseTestCase {
|
||||
EnvironmentEdgeManagerTestHelper.reset();
|
||||
}
|
||||
|
||||
private void initHRegion(byte[] tableName, String callingMethod,
|
||||
/**
|
||||
* Callers must afterward call {@link HRegion#closeHRegion(HRegion)}
|
||||
* @param tableName
|
||||
* @param callingMethod
|
||||
* @param conf
|
||||
* @param families
|
||||
* @throws IOException
|
||||
* @return created and initialized region.
|
||||
*/
|
||||
private HRegion initHRegion(byte[] tableName, String callingMethod,
|
||||
HBaseConfiguration conf, String family) throws IOException {
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
HColumnDescriptor familyDesc;
|
||||
@ -99,8 +108,9 @@ public class TestBlocksRead extends HBaseTestCase {
|
||||
|
||||
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||
Path path = new Path(DIR + callingMethod);
|
||||
region = HRegion.createHRegion(info, path, conf, htd);
|
||||
HRegion r = HRegion.createHRegion(info, path, conf, htd);
|
||||
blockCache = new CacheConfig(conf).getBlockCache();
|
||||
return r;
|
||||
}
|
||||
|
||||
private void putData(String family, String row, String col, long version)
|
||||
@ -212,45 +222,50 @@ public class TestBlocksRead extends HBaseTestCase {
|
||||
String FAMILY = "cf1";
|
||||
KeyValue kvs[];
|
||||
HBaseConfiguration conf = getConf();
|
||||
initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
this.region = initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
|
||||
putData(FAMILY, "row", "col1", 1);
|
||||
putData(FAMILY, "row", "col2", 2);
|
||||
putData(FAMILY, "row", "col3", 3);
|
||||
putData(FAMILY, "row", "col4", 4);
|
||||
putData(FAMILY, "row", "col5", 5);
|
||||
putData(FAMILY, "row", "col6", 6);
|
||||
putData(FAMILY, "row", "col7", 7);
|
||||
region.flushcache();
|
||||
try {
|
||||
putData(FAMILY, "row", "col1", 1);
|
||||
putData(FAMILY, "row", "col2", 2);
|
||||
putData(FAMILY, "row", "col3", 3);
|
||||
putData(FAMILY, "row", "col4", 4);
|
||||
putData(FAMILY, "row", "col5", 5);
|
||||
putData(FAMILY, "row", "col6", 6);
|
||||
putData(FAMILY, "row", "col7", 7);
|
||||
region.flushcache();
|
||||
|
||||
// Expected block reads: 1
|
||||
// The top block has the KV we are
|
||||
// interested. So only 1 seek is needed.
|
||||
kvs = getData(FAMILY, "row", "col1", 1);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 1);
|
||||
// Expected block reads: 1
|
||||
// The top block has the KV we are
|
||||
// interested. So only 1 seek is needed.
|
||||
kvs = getData(FAMILY, "row", "col1", 1);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 1);
|
||||
|
||||
// Expected block reads: 2
|
||||
// The top block and next block has the KVs we are
|
||||
// interested. So only 2 seek is needed.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 2);
|
||||
assertEquals(2, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 1);
|
||||
verifyData(kvs[1], "row", "col2", 2);
|
||||
// Expected block reads: 2
|
||||
// The top block and next block has the KVs we are
|
||||
// interested. So only 2 seek is needed.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 2);
|
||||
assertEquals(2, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 1);
|
||||
verifyData(kvs[1], "row", "col2", 2);
|
||||
|
||||
// Expected block reads: 3
|
||||
// The first 2 seeks is to find out col2. [HBASE-4443]
|
||||
// One additional seek for col3
|
||||
// So 3 seeks are needed.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col2", "col3"), 3);
|
||||
assertEquals(2, kvs.length);
|
||||
verifyData(kvs[0], "row", "col2", 2);
|
||||
verifyData(kvs[1], "row", "col3", 3);
|
||||
// Expected block reads: 3
|
||||
// The first 2 seeks is to find out col2. [HBASE-4443]
|
||||
// One additional seek for col3
|
||||
// So 3 seeks are needed.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col2", "col3"), 3);
|
||||
assertEquals(2, kvs.length);
|
||||
verifyData(kvs[0], "row", "col2", 2);
|
||||
verifyData(kvs[1], "row", "col3", 3);
|
||||
|
||||
// Expected block reads: 2. [HBASE-4443]
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col5"), 2);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col5", 5);
|
||||
// Expected block reads: 2. [HBASE-4443]
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col5"), 2);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col5", 5);
|
||||
} finally {
|
||||
HRegion.closeHRegion(this.region);
|
||||
this.region = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -264,97 +279,102 @@ public class TestBlocksRead extends HBaseTestCase {
|
||||
String FAMILY = "cf1";
|
||||
KeyValue kvs[];
|
||||
HBaseConfiguration conf = getConf();
|
||||
initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
this.region = initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
|
||||
// File 1
|
||||
putData(FAMILY, "row", "col1", 1);
|
||||
putData(FAMILY, "row", "col2", 2);
|
||||
region.flushcache();
|
||||
try {
|
||||
// File 1
|
||||
putData(FAMILY, "row", "col1", 1);
|
||||
putData(FAMILY, "row", "col2", 2);
|
||||
region.flushcache();
|
||||
|
||||
// File 2
|
||||
putData(FAMILY, "row", "col1", 3);
|
||||
putData(FAMILY, "row", "col2", 4);
|
||||
region.flushcache();
|
||||
// File 2
|
||||
putData(FAMILY, "row", "col1", 3);
|
||||
putData(FAMILY, "row", "col2", 4);
|
||||
region.flushcache();
|
||||
|
||||
// Expected blocks read: 1.
|
||||
// File 2's top block is also the KV we are
|
||||
// interested. So only 1 seek is needed.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1"), 1);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 3);
|
||||
// Expected blocks read: 1.
|
||||
// File 2's top block is also the KV we are
|
||||
// interested. So only 1 seek is needed.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1"), 1);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 3);
|
||||
|
||||
// Expected blocks read: 2
|
||||
// File 2's top block has the "col1" KV we are
|
||||
// interested. We also need "col2" which is in a block
|
||||
// of its own. So, we need that block as well.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 2);
|
||||
assertEquals(2, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 3);
|
||||
verifyData(kvs[1], "row", "col2", 4);
|
||||
// Expected blocks read: 2
|
||||
// File 2's top block has the "col1" KV we are
|
||||
// interested. We also need "col2" which is in a block
|
||||
// of its own. So, we need that block as well.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2"), 2);
|
||||
assertEquals(2, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 3);
|
||||
verifyData(kvs[1], "row", "col2", 4);
|
||||
|
||||
// File 3: Add another column
|
||||
putData(FAMILY, "row", "col3", 5);
|
||||
region.flushcache();
|
||||
// File 3: Add another column
|
||||
putData(FAMILY, "row", "col3", 5);
|
||||
region.flushcache();
|
||||
|
||||
// Expected blocks read: 1
|
||||
// File 3's top block has the "col3" KV we are
|
||||
// interested. So only 1 seek is needed.
|
||||
kvs = getData(FAMILY, "row", "col3", 1);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col3", 5);
|
||||
// Expected blocks read: 1
|
||||
// File 3's top block has the "col3" KV we are
|
||||
// interested. So only 1 seek is needed.
|
||||
kvs = getData(FAMILY, "row", "col3", 1);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col3", 5);
|
||||
|
||||
// Get a column from older file.
|
||||
// For ROWCOL Bloom filter: Expected blocks read: 1.
|
||||
// For ROW Bloom filter: Expected blocks read: 2.
|
||||
// For NONE Bloom filter: Expected blocks read: 2.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1"), 1, 2, 2);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 3);
|
||||
// Get a column from older file.
|
||||
// For ROWCOL Bloom filter: Expected blocks read: 1.
|
||||
// For ROW Bloom filter: Expected blocks read: 2.
|
||||
// For NONE Bloom filter: Expected blocks read: 2.
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1"), 1, 2, 2);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 3);
|
||||
|
||||
// File 4: Delete the entire row.
|
||||
deleteFamily(FAMILY, "row", 6);
|
||||
region.flushcache();
|
||||
// File 4: Delete the entire row.
|
||||
deleteFamily(FAMILY, "row", 6);
|
||||
region.flushcache();
|
||||
|
||||
// For ROWCOL Bloom filter: Expected blocks read: 2.
|
||||
// For ROW Bloom filter: Expected blocks read: 3.
|
||||
// For NONE Bloom filter: Expected blocks read: 3.
|
||||
kvs = getData(FAMILY, "row", "col1", 2, 3, 3);
|
||||
assertEquals(0, kvs.length);
|
||||
kvs = getData(FAMILY, "row", "col2", 3, 4, 4);
|
||||
assertEquals(0, kvs.length);
|
||||
kvs = getData(FAMILY, "row", "col3", 2);
|
||||
assertEquals(0, kvs.length);
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 4);
|
||||
assertEquals(0, kvs.length);
|
||||
// For ROWCOL Bloom filter: Expected blocks read: 2.
|
||||
// For ROW Bloom filter: Expected blocks read: 3.
|
||||
// For NONE Bloom filter: Expected blocks read: 3.
|
||||
kvs = getData(FAMILY, "row", "col1", 2, 3, 3);
|
||||
assertEquals(0, kvs.length);
|
||||
kvs = getData(FAMILY, "row", "col2", 3, 4, 4);
|
||||
assertEquals(0, kvs.length);
|
||||
kvs = getData(FAMILY, "row", "col3", 2);
|
||||
assertEquals(0, kvs.length);
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 4);
|
||||
assertEquals(0, kvs.length);
|
||||
|
||||
// File 5: Delete
|
||||
deleteFamily(FAMILY, "row", 10);
|
||||
region.flushcache();
|
||||
// File 5: Delete
|
||||
deleteFamily(FAMILY, "row", 10);
|
||||
region.flushcache();
|
||||
|
||||
// File 6: some more puts, but with timestamps older than the
|
||||
// previous delete.
|
||||
putData(FAMILY, "row", "col1", 7);
|
||||
putData(FAMILY, "row", "col2", 8);
|
||||
putData(FAMILY, "row", "col3", 9);
|
||||
region.flushcache();
|
||||
// File 6: some more puts, but with timestamps older than the
|
||||
// previous delete.
|
||||
putData(FAMILY, "row", "col1", 7);
|
||||
putData(FAMILY, "row", "col2", 8);
|
||||
putData(FAMILY, "row", "col3", 9);
|
||||
region.flushcache();
|
||||
|
||||
// Baseline expected blocks read: 8. [HBASE-4532]
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 5);
|
||||
assertEquals(0, kvs.length);
|
||||
|
||||
// File 7: Put back new data
|
||||
putData(FAMILY, "row", "col1", 11);
|
||||
putData(FAMILY, "row", "col2", 12);
|
||||
putData(FAMILY, "row", "col3", 13);
|
||||
region.flushcache();
|
||||
// Baseline expected blocks read: 8. [HBASE-4532]
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 5);
|
||||
assertEquals(0, kvs.length);
|
||||
|
||||
// File 7: Put back new data
|
||||
putData(FAMILY, "row", "col1", 11);
|
||||
putData(FAMILY, "row", "col2", 12);
|
||||
putData(FAMILY, "row", "col3", 13);
|
||||
region.flushcache();
|
||||
|
||||
|
||||
// Expected blocks read: 5. [HBASE-4585]
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 5);
|
||||
assertEquals(3, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 11);
|
||||
verifyData(kvs[1], "row", "col2", 12);
|
||||
verifyData(kvs[2], "row", "col3", 13);
|
||||
// Expected blocks read: 5. [HBASE-4585]
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col1", "col2", "col3"), 5);
|
||||
assertEquals(3, kvs.length);
|
||||
verifyData(kvs[0], "row", "col1", 11);
|
||||
verifyData(kvs[1], "row", "col2", 12);
|
||||
verifyData(kvs[2], "row", "col3", 13);
|
||||
} finally {
|
||||
HRegion.closeHRegion(this.region);
|
||||
this.region = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -367,62 +387,71 @@ public class TestBlocksRead extends HBaseTestCase {
|
||||
String FAMILY = "cf1";
|
||||
|
||||
HBaseConfiguration conf = getConf();
|
||||
initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
this.region = initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
|
||||
putData(FAMILY, "row", "col1", 1);
|
||||
putData(FAMILY, "row", "col2", 2);
|
||||
region.flushcache();
|
||||
try {
|
||||
putData(FAMILY, "row", "col1", 1);
|
||||
putData(FAMILY, "row", "col2", 2);
|
||||
region.flushcache();
|
||||
|
||||
// Execute a scan with caching turned off
|
||||
// Expected blocks stored: 0
|
||||
long blocksStart = getBlkCount();
|
||||
Scan scan = new Scan();
|
||||
scan.setCacheBlocks(false);
|
||||
RegionScanner rs = region.getScanner(scan);
|
||||
List<KeyValue> result = new ArrayList<KeyValue>(2);
|
||||
rs.next(result);
|
||||
assertEquals(2 * BLOOM_TYPE.length, result.size());
|
||||
rs.close();
|
||||
long blocksEnd = getBlkCount();
|
||||
// Execute a scan with caching turned off
|
||||
// Expected blocks stored: 0
|
||||
long blocksStart = getBlkCount();
|
||||
Scan scan = new Scan();
|
||||
scan.setCacheBlocks(false);
|
||||
RegionScanner rs = region.getScanner(scan);
|
||||
List<KeyValue> result = new ArrayList<KeyValue>(2);
|
||||
rs.next(result);
|
||||
assertEquals(2 * BLOOM_TYPE.length, result.size());
|
||||
rs.close();
|
||||
long blocksEnd = getBlkCount();
|
||||
|
||||
assertEquals(blocksStart, blocksEnd);
|
||||
assertEquals(blocksStart, blocksEnd);
|
||||
|
||||
// Execute with caching turned on
|
||||
// Expected blocks stored: 2
|
||||
blocksStart = blocksEnd;
|
||||
scan.setCacheBlocks(true);
|
||||
rs = region.getScanner(scan);
|
||||
result = new ArrayList<KeyValue>(2);
|
||||
rs.next(result);
|
||||
assertEquals(2 * BLOOM_TYPE.length, result.size());
|
||||
rs.close();
|
||||
blocksEnd = getBlkCount();
|
||||
// Execute with caching turned on
|
||||
// Expected blocks stored: 2
|
||||
blocksStart = blocksEnd;
|
||||
scan.setCacheBlocks(true);
|
||||
rs = region.getScanner(scan);
|
||||
result = new ArrayList<KeyValue>(2);
|
||||
rs.next(result);
|
||||
assertEquals(2 * BLOOM_TYPE.length, result.size());
|
||||
rs.close();
|
||||
blocksEnd = getBlkCount();
|
||||
|
||||
assertEquals(2 * BLOOM_TYPE.length, blocksEnd - blocksStart);
|
||||
}
|
||||
assertEquals(2 * BLOOM_TYPE.length, blocksEnd - blocksStart);
|
||||
} finally {
|
||||
HRegion.closeHRegion(this.region);
|
||||
this.region = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@Test
|
||||
public void testLazySeekBlocksReadWithDelete() throws Exception {
|
||||
byte[] TABLE = Bytes.toBytes("testLazySeekBlocksReadWithDelete");
|
||||
String FAMILY = "cf1";
|
||||
KeyValue kvs[];
|
||||
HBaseConfiguration conf = getConf();
|
||||
initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
this.region = initHRegion(TABLE, getName(), conf, FAMILY);
|
||||
try {
|
||||
deleteFamily(FAMILY, "row", 200);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
putData(FAMILY, "row", "col" + i, i);
|
||||
}
|
||||
putData(FAMILY, "row", "col99", 201);
|
||||
region.flushcache();
|
||||
|
||||
deleteFamily(FAMILY, "row", 200);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
putData(FAMILY, "row", "col" + i, i);
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col0"), 2);
|
||||
assertEquals(0, kvs.length);
|
||||
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col99"), 2);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col99", 201);
|
||||
} finally {
|
||||
HRegion.closeHRegion(this.region);
|
||||
this.region = null;
|
||||
}
|
||||
putData(FAMILY, "row", "col99", 201);
|
||||
region.flushcache();
|
||||
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col0"), 2);
|
||||
assertEquals(0, kvs.length);
|
||||
|
||||
kvs = getData(FAMILY, "row", Arrays.asList("col99"), 2);
|
||||
assertEquals(1, kvs.length);
|
||||
verifyData(kvs[0], "row", "col99", 201);
|
||||
}
|
||||
}
|
||||
|
||||
@org.junit.Rule
|
||||
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
|
||||
|
@ -62,95 +62,98 @@ public class TestColumnSeeking {
|
||||
HRegion region =
|
||||
HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(), TEST_UTIL
|
||||
.getConfiguration(), htd);
|
||||
try {
|
||||
List<String> rows = generateRandomWords(10, "row");
|
||||
List<String> allColumns = generateRandomWords(10, "column");
|
||||
List<String> values = generateRandomWords(100, "value");
|
||||
|
||||
List<String> rows = generateRandomWords(10, "row");
|
||||
List<String> allColumns = generateRandomWords(10, "column");
|
||||
List<String> values = generateRandomWords(100, "value");
|
||||
long maxTimestamp = 2;
|
||||
double selectPercent = 0.5;
|
||||
int numberOfTests = 5;
|
||||
double flushPercentage = 0.2;
|
||||
double minorPercentage = 0.2;
|
||||
double majorPercentage = 0.2;
|
||||
double putPercentage = 0.2;
|
||||
|
||||
long maxTimestamp = 2;
|
||||
double selectPercent = 0.5;
|
||||
int numberOfTests = 5;
|
||||
double flushPercentage = 0.2;
|
||||
double minorPercentage = 0.2;
|
||||
double majorPercentage = 0.2;
|
||||
double putPercentage = 0.2;
|
||||
HashMap<String, KeyValue> allKVMap = new HashMap<String, KeyValue>();
|
||||
|
||||
HashMap<String, KeyValue> allKVMap = new HashMap<String, KeyValue>();
|
||||
HashMap<String, KeyValue>[] kvMaps = new HashMap[numberOfTests];
|
||||
ArrayList<String>[] columnLists = new ArrayList[numberOfTests];
|
||||
|
||||
HashMap<String, KeyValue>[] kvMaps = new HashMap[numberOfTests];
|
||||
ArrayList<String>[] columnLists = new ArrayList[numberOfTests];
|
||||
|
||||
for (int i = 0; i < numberOfTests; i++) {
|
||||
kvMaps[i] = new HashMap<String, KeyValue>();
|
||||
columnLists[i] = new ArrayList<String>();
|
||||
for (String column : allColumns) {
|
||||
if (Math.random() < selectPercent) {
|
||||
columnLists[i].add(column);
|
||||
for (int i = 0; i < numberOfTests; i++) {
|
||||
kvMaps[i] = new HashMap<String, KeyValue>();
|
||||
columnLists[i] = new ArrayList<String>();
|
||||
for (String column : allColumns) {
|
||||
if (Math.random() < selectPercent) {
|
||||
columnLists[i].add(column);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (String value : values) {
|
||||
for (String row : rows) {
|
||||
Put p = new Put(Bytes.toBytes(row));
|
||||
p.setWriteToWAL(false);
|
||||
for (String column : allColumns) {
|
||||
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
|
||||
KeyValue kv =
|
||||
KeyValueTestUtil.create(row, family, column, timestamp, value);
|
||||
if (Math.random() < putPercentage) {
|
||||
p.add(kv);
|
||||
allKVMap.put(kv.getKeyString(), kv);
|
||||
for (int i = 0; i < numberOfTests; i++) {
|
||||
if (columnLists[i].contains(column)) {
|
||||
kvMaps[i].put(kv.getKeyString(), kv);
|
||||
for (String value : values) {
|
||||
for (String row : rows) {
|
||||
Put p = new Put(Bytes.toBytes(row));
|
||||
p.setWriteToWAL(false);
|
||||
for (String column : allColumns) {
|
||||
for (long timestamp = 1; timestamp <= maxTimestamp; timestamp++) {
|
||||
KeyValue kv =
|
||||
KeyValueTestUtil.create(row, family, column, timestamp, value);
|
||||
if (Math.random() < putPercentage) {
|
||||
p.add(kv);
|
||||
allKVMap.put(kv.getKeyString(), kv);
|
||||
for (int i = 0; i < numberOfTests; i++) {
|
||||
if (columnLists[i].contains(column)) {
|
||||
kvMaps[i].put(kv.getKeyString(), kv);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
region.put(p);
|
||||
if (Math.random() < flushPercentage) {
|
||||
LOG.info("Flushing... ");
|
||||
region.flushcache();
|
||||
}
|
||||
region.put(p);
|
||||
if (Math.random() < flushPercentage) {
|
||||
LOG.info("Flushing... ");
|
||||
region.flushcache();
|
||||
}
|
||||
|
||||
if (Math.random() < minorPercentage) {
|
||||
LOG.info("Minor compacting... ");
|
||||
region.compactStores(false);
|
||||
}
|
||||
if (Math.random() < minorPercentage) {
|
||||
LOG.info("Minor compacting... ");
|
||||
region.compactStores(false);
|
||||
}
|
||||
|
||||
if (Math.random() < majorPercentage) {
|
||||
LOG.info("Major compacting... ");
|
||||
region.compactStores(true);
|
||||
if (Math.random() < majorPercentage) {
|
||||
LOG.info("Major compacting... ");
|
||||
region.compactStores(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = 0; i < numberOfTests + 1; i++) {
|
||||
Collection<KeyValue> kvSet;
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxVersions();
|
||||
if (i < numberOfTests) {
|
||||
kvSet = kvMaps[i].values();
|
||||
for (String column : columnLists[i]) {
|
||||
scan.addColumn(familyBytes, Bytes.toBytes(column));
|
||||
for (int i = 0; i < numberOfTests + 1; i++) {
|
||||
Collection<KeyValue> kvSet;
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxVersions();
|
||||
if (i < numberOfTests) {
|
||||
kvSet = kvMaps[i].values();
|
||||
for (String column : columnLists[i]) {
|
||||
scan.addColumn(familyBytes, Bytes.toBytes(column));
|
||||
}
|
||||
LOG.info("ExplicitColumns scanner");
|
||||
LOG.info("Columns: " + columnLists[i].size() + " Keys: "
|
||||
+ kvSet.size());
|
||||
} else {
|
||||
kvSet = allKVMap.values();
|
||||
LOG.info("Wildcard scanner");
|
||||
LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size());
|
||||
|
||||
}
|
||||
LOG.info("ExplicitColumns scanner");
|
||||
LOG.info("Columns: " + columnLists[i].size() + " Keys: "
|
||||
+ kvSet.size());
|
||||
} else {
|
||||
kvSet = allKVMap.values();
|
||||
LOG.info("Wildcard scanner");
|
||||
LOG.info("Columns: " + allColumns.size() + " Keys: " + kvSet.size());
|
||||
|
||||
InternalScanner scanner = region.getScanner(scan);
|
||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||
while (scanner.next(results))
|
||||
;
|
||||
assertEquals(kvSet.size(), results.size());
|
||||
assertTrue(results.containsAll(kvSet));
|
||||
}
|
||||
InternalScanner scanner = region.getScanner(scan);
|
||||
List<KeyValue> results = new ArrayList<KeyValue>();
|
||||
while (scanner.next(results))
|
||||
;
|
||||
assertEquals(kvSet.size(), results.size());
|
||||
assertTrue(results.containsAll(kvSet));
|
||||
} finally {
|
||||
HRegion.closeHRegion(region);
|
||||
}
|
||||
|
||||
region.close();
|
||||
|
@ -86,9 +86,10 @@ public class TestCompactSelection extends TestCase {
|
||||
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||
|
||||
HLog hlog = new HLog(fs, logdir, oldLogDir, conf);
|
||||
HRegion.createHRegion(info, basedir, conf, htd);
|
||||
HRegion region = HRegion.createHRegion(info, basedir, conf, htd);
|
||||
HRegion.closeHRegion(region);
|
||||
Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));
|
||||
HRegion region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
|
||||
region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
|
||||
|
||||
store = new Store(basedir, region, hcd, fs, conf);
|
||||
TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,145 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Tests that need to spin up a cluster testing an {@link HRegion}. Use
|
||||
* {@link TestHRegion} if you don't need a cluster, if you can test w/ a
|
||||
* standalone {@link HRegion}.
|
||||
*/
|
||||
@Category(MediumTests.class)
|
||||
public class TestHRegionOnCluster {
|
||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
|
||||
@Test (timeout=180000)
|
||||
public void testDataCorrectnessReplayingRecoveredEdits() throws Exception {
|
||||
final int NUM_MASTERS = 1;
|
||||
final int NUM_RS = 3;
|
||||
TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
|
||||
|
||||
try {
|
||||
final byte[] TABLENAME = Bytes
|
||||
.toBytes("testDataCorrectnessReplayingRecoveredEdits");
|
||||
final byte[] FAMILY = Bytes.toBytes("family");
|
||||
MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
|
||||
HMaster master = cluster.getMaster();
|
||||
|
||||
// Create table
|
||||
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
|
||||
desc.addFamily(new HColumnDescriptor(FAMILY));
|
||||
HBaseAdmin hbaseAdmin = TEST_UTIL.getHBaseAdmin();
|
||||
hbaseAdmin.createTable(desc);
|
||||
|
||||
assertTrue(hbaseAdmin.isTableAvailable(TABLENAME));
|
||||
|
||||
// Put data: r1->v1
|
||||
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
|
||||
putDataAndVerify(table, "r1", FAMILY, "v1", 1);
|
||||
|
||||
// Move region to target server
|
||||
HRegionInfo regionInfo = table.getRegionLocation("r1").getRegionInfo();
|
||||
int originServerNum = cluster.getServerWith(regionInfo.getRegionName());
|
||||
HRegionServer originServer = cluster.getRegionServer(originServerNum);
|
||||
int targetServerNum = (originServerNum + 1) % NUM_RS;
|
||||
HRegionServer targetServer = cluster.getRegionServer(targetServerNum);
|
||||
assertFalse(originServer.equals(targetServer));
|
||||
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
|
||||
Bytes.toBytes(targetServer.getServerName().getServerName()));
|
||||
do {
|
||||
Thread.sleep(1);
|
||||
} while (cluster.getServerWith(regionInfo.getRegionName()) == originServerNum);
|
||||
|
||||
// Put data: r2->v2
|
||||
putDataAndVerify(table, "r2", FAMILY, "v2", 2);
|
||||
|
||||
// Move region to origin server
|
||||
hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
|
||||
Bytes.toBytes(originServer.getServerName().getServerName()));
|
||||
do {
|
||||
Thread.sleep(1);
|
||||
} while (cluster.getServerWith(regionInfo.getRegionName()) == targetServerNum);
|
||||
|
||||
// Put data: r3->v3
|
||||
putDataAndVerify(table, "r3", FAMILY, "v3", 3);
|
||||
|
||||
// Kill target server
|
||||
targetServer.kill();
|
||||
cluster.getRegionServerThreads().get(targetServerNum).join();
|
||||
// Wait until finish processing of shutdown
|
||||
while (master.getServerManager().areDeadServersInProgress()) {
|
||||
Thread.sleep(5);
|
||||
}
|
||||
// Kill origin server
|
||||
originServer.kill();
|
||||
cluster.getRegionServerThreads().get(originServerNum).join();
|
||||
|
||||
// Put data: r4->v4
|
||||
putDataAndVerify(table, "r4", FAMILY, "v4", 4);
|
||||
|
||||
} finally {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
}
|
||||
|
||||
private void putDataAndVerify(HTable table, String row, byte[] family,
|
||||
String value, int verifyNum) throws IOException {
|
||||
System.out.println("=========Putting data :" + row);
|
||||
Put put = new Put(Bytes.toBytes(row));
|
||||
put.add(family, Bytes.toBytes("q1"), Bytes.toBytes(value));
|
||||
table.put(put);
|
||||
ResultScanner resultScanner = table.getScanner(new Scan());
|
||||
List<Result> results = new ArrayList<Result>();
|
||||
while (true) {
|
||||
Result r = resultScanner.next();
|
||||
if (r == null)
|
||||
break;
|
||||
results.add(r);
|
||||
}
|
||||
resultScanner.close();
|
||||
if (results.size() != verifyNum) {
|
||||
System.out.println(results);
|
||||
}
|
||||
assertEquals(verifyNum, results.size());
|
||||
}
|
||||
|
||||
@org.junit.Rule
|
||||
public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
|
||||
new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();
|
||||
}
|
@ -52,40 +52,45 @@ public class TestMinVersions extends HBaseTestCase {
|
||||
public void testGetClosestBefore() throws Exception {
|
||||
HTableDescriptor htd = createTableDescriptor(getName(), 1, 1000, 1, false);
|
||||
HRegion region = createNewHRegion(htd, null, null);
|
||||
try {
|
||||
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
|
||||
Put p = new Put(T1, ts);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
Put p = new Put(T1, ts);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts+1);
|
||||
p.add(c0, c0, T4);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts+1);
|
||||
p.add(c0, c0, T4);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T3, ts);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
p = new Put(T3, ts);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
|
||||
// now make sure that getClosestBefore(...) get can
|
||||
// rows that would be expired without minVersion.
|
||||
// also make sure it gets the latest version
|
||||
Result r = region.getClosestRowBefore(T1, c0);
|
||||
checkResult(r, c0, T4);
|
||||
// now make sure that getClosestBefore(...) get can
|
||||
// rows that would be expired without minVersion.
|
||||
// also make sure it gets the latest version
|
||||
Result r = region.getClosestRowBefore(T1, c0);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
r = region.getClosestRowBefore(T2, c0);
|
||||
checkResult(r, c0, T4);
|
||||
r = region.getClosestRowBefore(T2, c0);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
|
||||
r = region.getClosestRowBefore(T1, c0);
|
||||
checkResult(r, c0, T4);
|
||||
r = region.getClosestRowBefore(T1, c0);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
r = region.getClosestRowBefore(T2, c0);
|
||||
checkResult(r, c0, T4);
|
||||
r = region.getClosestRowBefore(T2, c0);
|
||||
checkResult(r, c0, T4);
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -96,48 +101,52 @@ public class TestMinVersions extends HBaseTestCase {
|
||||
// keep 3 versions minimum
|
||||
HTableDescriptor htd = createTableDescriptor(getName(), 3, 1000, 1, false);
|
||||
HRegion region = createNewHRegion(htd, null, null);
|
||||
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
|
||||
Put p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
try {
|
||||
Put p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T0);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T0);
|
||||
region.put(p);
|
||||
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T0);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T0);
|
||||
region.put(p);
|
||||
|
||||
// newest version in the memstore
|
||||
// the 2nd oldest in the store file
|
||||
// and the 3rd, 4th oldest also in the memstore
|
||||
// newest version in the memstore
|
||||
// the 2nd oldest in the store file
|
||||
// and the 3rd, 4th oldest also in the memstore
|
||||
|
||||
Get g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T3,T2,T1);
|
||||
Get g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T3,T2,T1);
|
||||
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T3,T2,T1);
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T3,T2,T1);
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -150,47 +159,52 @@ public class TestMinVersions extends HBaseTestCase {
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
|
||||
Put p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
try {
|
||||
Put p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
|
||||
Delete d = new Delete(T1, ts-1, null);
|
||||
region.delete(d, null, true);
|
||||
Delete d = new Delete(T1, ts-1, null);
|
||||
region.delete(d, null, true);
|
||||
|
||||
Get g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
Get g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
Result r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
|
||||
// try again
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
// try again
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T3);
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -203,63 +217,68 @@ public class TestMinVersions extends HBaseTestCase {
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
|
||||
// 2nd version
|
||||
Put p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
try {
|
||||
// 2nd version
|
||||
Put p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
|
||||
// 3rd version
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
// 3rd version
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
|
||||
// 4th version
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T4);
|
||||
region.put(p);
|
||||
// 4th version
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T4);
|
||||
region.put(p);
|
||||
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
|
||||
// now put the first version (backdated)
|
||||
p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
// now put the first version (backdated)
|
||||
p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
|
||||
// now the latest change is in the memstore,
|
||||
// but it is not the latest version
|
||||
// now the latest change is in the memstore,
|
||||
// but it is not the latest version
|
||||
|
||||
Result r = region.get(new Get(T1), null);
|
||||
checkResult(r, c0, T4);
|
||||
Result r = region.get(new Get(T1), null);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
Get g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
Get g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
|
||||
p = new Put(T1, ts+1);
|
||||
p.add(c0, c0, T5);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts+1);
|
||||
p.add(c0, c0, T5);
|
||||
region.put(p);
|
||||
|
||||
// now the latest version is in the memstore
|
||||
// now the latest version is in the memstore
|
||||
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T5,T4);
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T5,T4);
|
||||
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T5,T4);
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T5,T4);
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -269,83 +288,88 @@ public class TestMinVersions extends HBaseTestCase {
|
||||
// 1 version minimum, 1000 versions maximum, ttl = 1s
|
||||
HTableDescriptor htd = createTableDescriptor(getName(), 2, 1000, 1, false);
|
||||
HRegion region = createNewHRegion(htd, null, null);
|
||||
try {
|
||||
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
|
||||
// 1st version
|
||||
Put p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
// 1st version
|
||||
Put p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T1);
|
||||
region.put(p);
|
||||
|
||||
// 2nd version
|
||||
p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
// 2nd version
|
||||
p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T2);
|
||||
region.put(p);
|
||||
|
||||
// 3rd version
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
// 3rd version
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T3);
|
||||
region.put(p);
|
||||
|
||||
// 4th version
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T4);
|
||||
region.put(p);
|
||||
// 4th version
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T4);
|
||||
region.put(p);
|
||||
|
||||
Result r = region.get(new Get(T1), null);
|
||||
checkResult(r, c0, T4);
|
||||
Result r = region.get(new Get(T1), null);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
Get g = new Get(T1);
|
||||
g.setTimeRange(0L, ts+1);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T4);
|
||||
Get g = new Get(T1);
|
||||
g.setTimeRange(0L, ts+1);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
// oldest version still exists
|
||||
g.setTimeRange(0L, ts-2);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T1);
|
||||
// oldest version still exists
|
||||
g.setTimeRange(0L, ts-2);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T1);
|
||||
|
||||
// gets see only available versions
|
||||
// even before compactions
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
// gets see only available versions
|
||||
// even before compactions
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null); // this'll use ScanWildcardColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
g = new Get(T1);
|
||||
g.setMaxVersions();
|
||||
g.addColumn(c0, c0);
|
||||
r = region.get(g, null); // this'll use ExplicitColumnTracker
|
||||
checkResult(r, c0, T4,T3);
|
||||
|
||||
// now flush
|
||||
region.flushcache();
|
||||
// now flush
|
||||
region.flushcache();
|
||||
|
||||
// with HBASE-4241 a flush will eliminate the expired rows
|
||||
g = new Get(T1);
|
||||
g.setTimeRange(0L, ts-2);
|
||||
r = region.get(g, null);
|
||||
assertTrue(r.isEmpty());
|
||||
// with HBASE-4241 a flush will eliminate the expired rows
|
||||
g = new Get(T1);
|
||||
g.setTimeRange(0L, ts-2);
|
||||
r = region.get(g, null);
|
||||
assertTrue(r.isEmpty());
|
||||
|
||||
// major compaction
|
||||
region.compactStores(true);
|
||||
// major compaction
|
||||
region.compactStores(true);
|
||||
|
||||
// after compaction the 4th version is still available
|
||||
g = new Get(T1);
|
||||
g.setTimeRange(0L, ts+1);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T4);
|
||||
// after compaction the 4th version is still available
|
||||
g = new Get(T1);
|
||||
g.setTimeRange(0L, ts+1);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T4);
|
||||
|
||||
// so is the 3rd
|
||||
g.setTimeRange(0L, ts);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T3);
|
||||
// so is the 3rd
|
||||
g.setTimeRange(0L, ts);
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T3);
|
||||
|
||||
// but the 2nd and earlier versions are gone
|
||||
g.setTimeRange(0L, ts-1);
|
||||
r = region.get(g, null);
|
||||
assertTrue(r.isEmpty());
|
||||
// but the 2nd and earlier versions are gone
|
||||
g.setTimeRange(0L, ts-1);
|
||||
r = region.get(g, null);
|
||||
assertTrue(r.isEmpty());
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -359,62 +383,67 @@ public class TestMinVersions extends HBaseTestCase {
|
||||
|
||||
// 2s in the past
|
||||
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000;
|
||||
try {
|
||||
|
||||
Put p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T0);
|
||||
p.add(c1, c1, T0);
|
||||
region.put(p);
|
||||
Put p = new Put(T1, ts-3);
|
||||
p.add(c0, c0, T0);
|
||||
p.add(c1, c1, T0);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T1);
|
||||
p.add(c1, c1, T1);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts-2);
|
||||
p.add(c0, c0, T1);
|
||||
p.add(c1, c1, T1);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T2);
|
||||
p.add(c1, c1, T2);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts-1);
|
||||
p.add(c0, c0, T2);
|
||||
p.add(c1, c1, T2);
|
||||
region.put(p);
|
||||
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T3);
|
||||
p.add(c1, c1, T3);
|
||||
region.put(p);
|
||||
p = new Put(T1, ts);
|
||||
p.add(c0, c0, T3);
|
||||
p.add(c1, c1, T3);
|
||||
region.put(p);
|
||||
|
||||
List<Long> tss = new ArrayList<Long>();
|
||||
tss.add(ts-1);
|
||||
tss.add(ts-2);
|
||||
List<Long> tss = new ArrayList<Long>();
|
||||
tss.add(ts-1);
|
||||
tss.add(ts-2);
|
||||
|
||||
Get g = new Get(T1);
|
||||
g.addColumn(c1,c1);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
Result r = region.get(g, null);
|
||||
checkResult(r, c1, T2,T1);
|
||||
Get g = new Get(T1);
|
||||
g.addColumn(c1,c1);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
Result r = region.get(g, null);
|
||||
checkResult(r, c1, T2,T1);
|
||||
|
||||
g = new Get(T1);
|
||||
g.addColumn(c0,c0);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T2,T1);
|
||||
g = new Get(T1);
|
||||
g.addColumn(c0,c0);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T2,T1);
|
||||
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
// now flush/compact
|
||||
region.flushcache();
|
||||
region.compactStores(true);
|
||||
|
||||
g = new Get(T1);
|
||||
g.addColumn(c1,c1);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c1, T2);
|
||||
g = new Get(T1);
|
||||
g.addColumn(c1,c1);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c1, T2);
|
||||
|
||||
g = new Get(T1);
|
||||
g.addColumn(c0,c0);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T2);
|
||||
g = new Get(T1);
|
||||
g.addColumn(c0,c0);
|
||||
g.setFilter(new TimestampsFilter(tss));
|
||||
g.setMaxVersions();
|
||||
r = region.get(g, null);
|
||||
checkResult(r, c0, T2);
|
||||
} finally {
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
|
||||
private void checkResult(Result r, byte[] col, byte[] ... vals) {
|
||||
|
@ -68,31 +68,34 @@ public class TestResettingCounters {
|
||||
}
|
||||
}
|
||||
HRegion region = HRegion.createHRegion(hri, path, conf, htd);
|
||||
try {
|
||||
Increment odd = new Increment(rows[0]);
|
||||
Increment even = new Increment(rows[0]);
|
||||
Increment all = new Increment(rows[0]);
|
||||
for (int i=0;i<numQualifiers;i++) {
|
||||
if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
|
||||
else odd.addColumn(families[0], qualifiers[i], 1);
|
||||
all.addColumn(families[0], qualifiers[i], 1);
|
||||
}
|
||||
|
||||
Increment odd = new Increment(rows[0]);
|
||||
Increment even = new Increment(rows[0]);
|
||||
Increment all = new Increment(rows[0]);
|
||||
for (int i=0;i<numQualifiers;i++) {
|
||||
if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1);
|
||||
else odd.addColumn(families[0], qualifiers[i], 1);
|
||||
all.addColumn(families[0], qualifiers[i], 1);
|
||||
}
|
||||
// increment odd qualifiers 5 times and flush
|
||||
for (int i=0;i<5;i++) region.increment(odd, null, false);
|
||||
region.flushcache();
|
||||
|
||||
// increment odd qualifiers 5 times and flush
|
||||
for (int i=0;i<5;i++) region.increment(odd, null, false);
|
||||
region.flushcache();
|
||||
// increment even qualifiers 5 times
|
||||
for (int i=0;i<5;i++) region.increment(even, null, false);
|
||||
|
||||
// increment even qualifiers 5 times
|
||||
for (int i=0;i<5;i++) region.increment(even, null, false);
|
||||
|
||||
// increment all qualifiers, should have value=6 for all
|
||||
Result result = region.increment(all, null, false);
|
||||
assertEquals(numQualifiers, result.size());
|
||||
KeyValue [] kvs = result.raw();
|
||||
for (int i=0;i<kvs.length;i++) {
|
||||
System.out.println(kvs[i].toString());
|
||||
assertTrue(Bytes.equals(kvs[i].getQualifier(), qualifiers[i]));
|
||||
assertEquals(6, Bytes.toLong(kvs[i].getValue()));
|
||||
// increment all qualifiers, should have value=6 for all
|
||||
Result result = region.increment(all, null, false);
|
||||
assertEquals(numQualifiers, result.size());
|
||||
KeyValue [] kvs = result.raw();
|
||||
for (int i=0;i<kvs.length;i++) {
|
||||
System.out.println(kvs[i].toString());
|
||||
assertTrue(Bytes.equals(kvs[i].getQualifier(), qualifiers[i]));
|
||||
assertEquals(6, Bytes.toLong(kvs[i].getValue()));
|
||||
}
|
||||
} finally {
|
||||
HRegion.closeHRegion(region);
|
||||
}
|
||||
region.close();
|
||||
region.getLog().closeAndDelete();
|
||||
|
@ -319,7 +319,9 @@ public class TestSplitTransaction {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(CF);
|
||||
htd.addFamily(hcd);
|
||||
HRegionInfo hri = new HRegionInfo(htd.getName(), STARTROW, ENDROW);
|
||||
HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
|
||||
HRegion r = HRegion.createHRegion(hri, testdir, TEST_UTIL.getConfiguration(), htd);
|
||||
r.close();
|
||||
r.getLog().closeAndDelete();
|
||||
return HRegion.openHRegion(testdir, hri, htd, wal,
|
||||
TEST_UTIL.getConfiguration());
|
||||
}
|
||||
|
@ -99,28 +99,32 @@ public class TestCloseRegionHandler {
|
||||
HRegion region =
|
||||
HRegion.createHRegion(hri, HTU.getDataTestDir(),
|
||||
HTU.getConfiguration(), htd);
|
||||
assertNotNull(region);
|
||||
// Spy on the region so can throw exception when close is called.
|
||||
HRegion spy = Mockito.spy(region);
|
||||
final boolean abort = false;
|
||||
Mockito.when(spy.close(abort)).
|
||||
thenThrow(new RuntimeException("Mocked failed close!"));
|
||||
// The CloseRegionHandler will try to get an HRegion that corresponds
|
||||
// to the passed hri -- so insert the region into the online region Set.
|
||||
rss.addToOnlineRegions(spy);
|
||||
// Assert the Server is NOT stopped before we call close region.
|
||||
assertFalse(server.isStopped());
|
||||
CloseRegionHandler handler =
|
||||
new CloseRegionHandler(server, rss, hri, false, false, -1);
|
||||
boolean throwable = false;
|
||||
try {
|
||||
handler.process();
|
||||
} catch (Throwable t) {
|
||||
throwable = true;
|
||||
assertNotNull(region);
|
||||
// Spy on the region so can throw exception when close is called.
|
||||
HRegion spy = Mockito.spy(region);
|
||||
final boolean abort = false;
|
||||
Mockito.when(spy.close(abort)).
|
||||
thenThrow(new RuntimeException("Mocked failed close!"));
|
||||
// The CloseRegionHandler will try to get an HRegion that corresponds
|
||||
// to the passed hri -- so insert the region into the online region Set.
|
||||
rss.addToOnlineRegions(spy);
|
||||
// Assert the Server is NOT stopped before we call close region.
|
||||
assertFalse(server.isStopped());
|
||||
CloseRegionHandler handler =
|
||||
new CloseRegionHandler(server, rss, hri, false, false, -1);
|
||||
boolean throwable = false;
|
||||
try {
|
||||
handler.process();
|
||||
} catch (Throwable t) {
|
||||
throwable = true;
|
||||
} finally {
|
||||
assertTrue(throwable);
|
||||
// Abort calls stop so stopped flag should be set.
|
||||
assertTrue(server.isStopped());
|
||||
}
|
||||
} finally {
|
||||
assertTrue(throwable);
|
||||
// Abort calls stop so stopped flag should be set.
|
||||
assertTrue(server.isStopped());
|
||||
HRegion.closeHRegion(region);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,30 +98,34 @@ public class TestOpenRegionHandler {
|
||||
HRegion.createHRegion(hri, HTU.getDataTestDir(), HTU
|
||||
.getConfiguration(), htd);
|
||||
assertNotNull(region);
|
||||
OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {
|
||||
HRegion openRegion() {
|
||||
// Open region first, then remove znode as though it'd been hijacked.
|
||||
HRegion region = super.openRegion();
|
||||
|
||||
// Don't actually open region BUT remove the znode as though it'd
|
||||
// been hijacked on us.
|
||||
ZooKeeperWatcher zkw = this.server.getZooKeeper();
|
||||
String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
|
||||
try {
|
||||
ZKUtil.deleteNodeFailSilent(zkw, node);
|
||||
} catch (KeeperException e) {
|
||||
throw new RuntimeException("Ugh failed delete of " + node, e);
|
||||
try {
|
||||
OpenRegionHandler handler = new OpenRegionHandler(server, rss, hri, htd) {
|
||||
HRegion openRegion() {
|
||||
// Open region first, then remove znode as though it'd been hijacked.
|
||||
HRegion region = super.openRegion();
|
||||
|
||||
// Don't actually open region BUT remove the znode as though it'd
|
||||
// been hijacked on us.
|
||||
ZooKeeperWatcher zkw = this.server.getZooKeeper();
|
||||
String node = ZKAssign.getNodeName(zkw, hri.getEncodedName());
|
||||
try {
|
||||
ZKUtil.deleteNodeFailSilent(zkw, node);
|
||||
} catch (KeeperException e) {
|
||||
throw new RuntimeException("Ugh failed delete of " + node, e);
|
||||
}
|
||||
return region;
|
||||
}
|
||||
return region;
|
||||
}
|
||||
};
|
||||
// Call process without first creating OFFLINE region in zk, see if
|
||||
// exception or just quiet return (expected).
|
||||
handler.process();
|
||||
ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
|
||||
// Call process again but this time yank the zk znode out from under it
|
||||
// post OPENING; again will expect it to come back w/o NPE or exception.
|
||||
handler.process();
|
||||
};
|
||||
// Call process without first creating OFFLINE region in zk, see if
|
||||
// exception or just quiet return (expected).
|
||||
handler.process();
|
||||
ZKAssign.createNodeOffline(server.getZooKeeper(), hri, server.getServerName());
|
||||
// Call process again but this time yank the zk znode out from under it
|
||||
// post OPENING; again will expect it to come back w/o NPE or exception.
|
||||
handler.process();
|
||||
} finally {
|
||||
HRegion.closeHRegion(region);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -133,7 +133,8 @@ public class TestWALReplay {
|
||||
HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||
HRegion region2 = HRegion.createHRegion(hri,
|
||||
hbaseRootDir, this.conf, htd);
|
||||
|
||||
region2.close();
|
||||
region2.getLog().closeAndDelete();
|
||||
final byte [] tableName = Bytes.toBytes(tableNameStr);
|
||||
final byte [] rowName = tableName;
|
||||
|
||||
@ -193,6 +194,8 @@ public class TestWALReplay {
|
||||
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||
HRegion region2 = HRegion.createHRegion(hri,
|
||||
hbaseRootDir, this.conf, htd);
|
||||
region2.close();
|
||||
region2.getLog().closeAndDelete();
|
||||
HLog wal = createWAL(this.conf);
|
||||
HRegion region = HRegion.openHRegion(hri, htd, wal, this.conf);
|
||||
Path f = new Path(basedir, "hfile");
|
||||
@ -252,7 +255,8 @@ public class TestWALReplay {
|
||||
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||
HRegion region3 = HRegion.createHRegion(hri,
|
||||
hbaseRootDir, this.conf, htd);
|
||||
|
||||
region3.close();
|
||||
region3.getLog().closeAndDelete();
|
||||
// Write countPerFamily edits into the three families. Do a flush on one
|
||||
// of the families during the load of edits so its seqid is not same as
|
||||
// others to test we do right thing when different seqids.
|
||||
@ -369,7 +373,8 @@ public class TestWALReplay {
|
||||
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||
HRegion region3 = HRegion.createHRegion(hri,
|
||||
hbaseRootDir, this.conf, htd);
|
||||
|
||||
region3.close();
|
||||
region3.getLog().closeAndDelete();
|
||||
// Write countPerFamily edits into the three families. Do a flush on one
|
||||
// of the families during the load of edits so its seqid is not same as
|
||||
// others to test we do right thing when different seqids.
|
||||
@ -435,7 +440,8 @@ public class TestWALReplay {
|
||||
final HTableDescriptor htd = createBasic3FamilyHTD(tableNameStr);
|
||||
HRegion region2 = HRegion.createHRegion(hri,
|
||||
hbaseRootDir, this.conf, htd);
|
||||
|
||||
region2.close();
|
||||
region2.getLog().closeAndDelete();
|
||||
final HLog wal = createWAL(this.conf);
|
||||
final byte[] tableName = Bytes.toBytes(tableNameStr);
|
||||
final byte[] rowName = tableName;
|
||||
|
@ -169,6 +169,13 @@ public class TestMergeTool extends HBaseTestCase {
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
for (int i = 0; i < sourceRegions.length; i++) {
|
||||
HRegion r = regions[i];
|
||||
if (r != null) {
|
||||
r.close();
|
||||
r.getLog().closeAndDelete();
|
||||
}
|
||||
}
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user