HBASE-6832. [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on implicit RS timing

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1437643 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Enis Soztutar 2013-01-23 19:32:06 +00:00
parent 731d288f19
commit 73d6cbb4cf
8 changed files with 123 additions and 34 deletions

View File

@ -26,7 +26,22 @@ import org.apache.hadoop.classification.InterfaceAudience;
@InterfaceAudience.Private
public class IncrementingEnvironmentEdge implements EnvironmentEdge {
private long timeIncrement = 1;
private long timeIncrement;
/**
* Construct an incremental edge starting from currentTimeMillis
*/
public IncrementingEnvironmentEdge() {
this(System.currentTimeMillis());
}
/**
* Construct an incremental edge with an initial amount
* @param initialAmount the initial value to start with
*/
public IncrementingEnvironmentEdge(long initialAmount) {
this.timeIncrement = initialAmount;
}
/**
* {@inheritDoc}
@ -38,4 +53,12 @@ public class IncrementingEnvironmentEdge implements EnvironmentEdge {
public synchronized long currentTimeMillis() {
return timeIncrement++;
}
/**
* Increment the time by the given amount
*/
public synchronized long incrementTime(long amount) {
timeIncrement += amount;
return timeIncrement;
}
}

View File

@ -18,31 +18,35 @@
*/
package org.apache.hadoop.hbase.coprocessor;
import static junit.framework.Assert.assertEquals;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import static org.junit.Assert.assertEquals;
@Category(MediumTests.class)
public class TestRegionObserverBypass {
private static HBaseTestingUtility util;
@ -60,7 +64,6 @@ public class TestRegionObserverBypass {
TestCoprocessor.class.getName());
util = new HBaseTestingUtility(conf);
util.startMiniCluster();
util.createTable(tableName, new byte[][] {dummy, test});
}
@AfterClass
@ -68,6 +71,18 @@ public class TestRegionObserverBypass {
util.shutdownMiniCluster();
}
@Before
public void setUp() throws Exception {
HBaseAdmin admin = util.getHBaseAdmin();
if (admin.tableExists(tableName)) {
if (admin.isTableEnabled(tableName)) {
admin.disableTable(tableName);
}
admin.deleteTable(tableName);
}
util.createTable(tableName, new byte[][] {dummy, test});
}
/**
* do a single put that is bypassed by a RegionObserver
* @throws Exception
@ -89,6 +104,10 @@ public class TestRegionObserverBypass {
*/
@Test
public void testMulti() throws Exception {
//ensure that server time increments every time we do an operation, otherwise
//previous deletes will eclipse successive puts having the same timestamp
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
HTable t = new HTable(util.getConfiguration(), tableName);
List<Put> puts = new ArrayList<Put>();
Put p = new Put(row1);
@ -170,6 +189,8 @@ public class TestRegionObserverBypass {
checkRowAndDelete(t,row2,1);
checkRowAndDelete(t,row3,0);
t.close();
EnvironmentEdgeManager.reset();
}
private void checkRowAndDelete(HTable t, byte[] row, int count) throws IOException {

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -109,20 +110,24 @@ public class TestScannerSelectionUsingTTL {
HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(info.getEncodedName()),
conf, htd);
long ts = EnvironmentEdgeManager.currentTimeMillis();
long version = 0; //make sure each new set of Put's have a new ts
for (int iFile = 0; iFile < totalNumFiles; ++iFile) {
if (iFile == NUM_EXPIRED_FILES) {
Threads.sleepWithoutInterrupt(TTL_MS);
version += TTL_MS;
}
for (int iRow = 0; iRow < NUM_ROWS; ++iRow) {
Put put = new Put(Bytes.toBytes("row" + iRow));
for (int iCol = 0; iCol < NUM_COLS_PER_ROW; ++iCol) {
put.add(FAMILY_BYTES, Bytes.toBytes("col" + iCol),
Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
ts + version, Bytes.toBytes("value" + iFile + "_" + iRow + "_" + iCol));
}
region.put(put);
}
region.flushcache();
version++;
}
Scan scan = new Scan();

View File

@ -63,7 +63,7 @@ public class TestLogsCleaner {
public void testLogCleaning() throws Exception{
Configuration conf = TEST_UTIL.getConfiguration();
// set TTL
long ttl = 2000;
long ttl = 10000;
conf.setLong("hbase.master.logcleaner.ttl", ttl);
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
Replication.decorateMasterConfiguration(conf);

View File

@ -21,13 +21,21 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.junit.experimental.categories.Category;
@Category(SmallTests.class)
@ -43,6 +51,28 @@ public class TestKeepDeletes extends HBaseTestCase {
private final byte[] c0 = COLUMNS[0];
private final byte[] c1 = COLUMNS[1];
@Override
protected void setUp() throws Exception {
super.setUp();
/* HBASE-6832: [WINDOWS] Tests should use explicit timestamp for Puts, and not rely on
* implicit RS timing.
* Use an explicit timer (IncrementingEnvironmentEdge) so that the put, delete
* compact timestamps are tracked. Otherwise, forced major compaction will not purge
* Delete's having the same timestamp. see ScanQueryMatcher.match():
* if (retainDeletesInOutput
* || (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp)
* <= timeToPurgeDeletes) ... )
*
*/
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
@Override
protected void tearDown() throws Exception {
super.tearDown();
EnvironmentEdgeManager.reset();
}
/**
* Make sure that deleted rows are retained.
* Family delete markers are deleted.
@ -55,7 +85,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@ -138,7 +168,7 @@ public class TestKeepDeletes extends HBaseTestCase {
}
/**
* Even when the store does not keep deletes a "raw" scan will
* Even when the store does not keep deletes a "raw" scan will
* return everything it can find (unless discarding cells is guaranteed
* to have no effect).
* Assuming this the desired behavior. Could also disallow "raw" scanning
@ -151,7 +181,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, false);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@ -195,7 +225,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, false);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@ -242,7 +272,7 @@ public class TestKeepDeletes extends HBaseTestCase {
s.setRaw(true);
s.setMaxVersions();
s.addColumn(c0, c0);
try {
InternalScanner scan = region.getScanner(s);
fail("raw scanner with columns should have failed");
@ -261,7 +291,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@ -307,7 +337,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Delete d = new Delete(T1, ts);
d.deleteColumns(c0, c0, ts);
@ -320,7 +350,7 @@ public class TestKeepDeletes extends HBaseTestCase {
d = new Delete(T1, ts);
d.deleteColumn(c0, c0, ts+1);
region.delete(d, true);
d = new Delete(T1, ts);
d.deleteColumn(c0, c0, ts+2);
region.delete(d, true);
@ -349,7 +379,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
@ -372,7 +402,7 @@ public class TestKeepDeletes extends HBaseTestCase {
d = new Delete(T1, ts);
d.deleteColumn(c0, c0, ts+1);
region.delete(d, true);
d = new Delete(T1, ts);
d.deleteColumn(c0, c0, ts+2);
region.delete(d, true);
@ -411,7 +441,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
p.add(c0, c1, T1);
@ -492,7 +522,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
region.put(p);
@ -502,7 +532,7 @@ public class TestKeepDeletes extends HBaseTestCase {
p = new Put(T1, ts-10);
p.add(c0, c1, T1);
region.put(p);
Delete d = new Delete(T1, ts);
// test corner case (Put and Delete have same TS)
d.deleteColumns(c0, c0, ts);
@ -511,7 +541,7 @@ public class TestKeepDeletes extends HBaseTestCase {
d = new Delete(T1, ts+1);
d.deleteColumn(c0, c0, ts+1);
region.delete(d, true);
d = new Delete(T1, ts+3);
d.deleteColumn(c0, c0, ts+3);
region.delete(d, true);
@ -527,7 +557,7 @@ public class TestKeepDeletes extends HBaseTestCase {
p = new Put(T1, ts+2);
p.add(c0, c0, T2);
region.put(p);
// delete, put, delete, delete, put
assertEquals(3, countDeleteMarkers(region));
@ -584,7 +614,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HConstants.FOREVER, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis();
long ts = EnvironmentEdgeManager.currentTimeMillis();
Put p = new Put(T1, ts);
p.add(c0, c0, T1);
@ -634,7 +664,7 @@ public class TestKeepDeletes extends HBaseTestCase {
HTableDescriptor htd = createTableDescriptor(getName(), 3, 1000, 1, true);
HRegion region = createNewHRegion(htd, null, null);
long ts = System.currentTimeMillis() - 2000; // 2s in the past
long ts = EnvironmentEdgeManager.currentTimeMillis() - 2000; // 2s in the past
Put p = new Put(T1, ts);
p.add(c0, c0, T3);

View File

@ -63,6 +63,7 @@ import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.apache.hadoop.util.Progressable;
import org.junit.experimental.categories.Category;
@ -186,6 +187,8 @@ public class TestStore extends TestCase {
public void testDeleteExpiredStoreFiles() throws Exception {
int storeFileNum = 4;
int ttl = 4;
IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
EnvironmentEdgeManagerTestHelper.injectEdge(edge);
Configuration conf = HBaseConfiguration.create();
// Enable the expired store file deletion
@ -205,7 +208,7 @@ public class TestStore extends TestCase {
this.store.add(new KeyValue(row, family, qf2, timeStamp, (byte[]) null));
this.store.add(new KeyValue(row, family, qf3, timeStamp, (byte[]) null));
flush(i);
Thread.sleep(sleepTime);
edge.incrementTime(sleepTime);
}
// Verify the total number of store files
@ -220,8 +223,8 @@ public class TestStore extends TestCase {
// If not the first compaction, there is another empty store file,
assertEquals(Math.min(i, 2), cr.getFiles().size());
for (int j = 0; i < cr.getFiles().size(); j++) {
assertTrue(cr.getFiles().get(j).getReader().getMaxTimestamp() < (System
.currentTimeMillis() - this.store.scanInfo.getTtl()));
assertTrue(cr.getFiles().get(j).getReader().getMaxTimestamp() <
(EnvironmentEdgeManager.currentTimeMillis() - this.store.scanInfo.getTtl()));
}
// Verify that the expired store file is compacted to an empty store file.
StoreFile compactedFile = this.store.compact(cr);
@ -229,7 +232,7 @@ public class TestStore extends TestCase {
assertEquals(0, compactedFile.getReader().getEntries());
// Let the next store file expired.
Thread.sleep(sleepTime);
edge.incrementTime(sleepTime);
}
}

View File

@ -32,6 +32,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.thrift.ThriftServerRunner.ImplType;
import org.apache.hadoop.hbase.thrift.generated.Hbase;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.thrift.protocol.TBinaryProtocol;
import org.apache.thrift.protocol.TCompactProtocol;
@ -115,11 +118,15 @@ public class TestThriftServerCmdLine {
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.startMiniCluster();
//ensure that server time increments every time we do an operation, otherwise
//successive puts having the same timestamp will override each other
EnvironmentEdgeManagerTestHelper.injectEdge(new IncrementingEnvironmentEdge());
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
EnvironmentEdgeManager.reset();
}
private void startCmdLineThread(final String[] args) {

View File

@ -34,7 +34,7 @@ public class TestIncrementingEnvironmentEdge {
@Test
public void testGetCurrentTimeUsesSystemClock() {
IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge();
IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(1);
assertEquals(1, edge.currentTimeMillis());
assertEquals(2, edge.currentTimeMillis());
assertEquals(3, edge.currentTimeMillis());