HBASE-10118 Major compact keeps deletes with future timestamps (Liu Shaohui)

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1584383 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
sershe 2014-04-03 20:29:30 +00:00
parent 6892ae258d
commit 60079f0cd0
3 changed files with 76 additions and 3 deletions

View File

@ -336,9 +336,12 @@ public class ScanQueryMatcher {
} }
// Can't early out now, because DelFam come before any other keys // Can't early out now, because DelFam come before any other keys
} }
if (retainDeletesInOutput
|| (!isUserScan && (EnvironmentEdgeManager.currentTimeMillis() - timestamp) <= timeToPurgeDeletes) if ((!isUserScan)
|| kv.getMvccVersion() > maxReadPointToTrackVersions) { && timeToPurgeDeletes > 0
&& (EnvironmentEdgeManager.currentTimeMillis() - timestamp) <= timeToPurgeDeletes) {
return MatchCode.INCLUDE;
} else if (retainDeletesInOutput || kv.getMvccVersion() > maxReadPointToTrackVersions) {
// always include or it is not time yet to check whether it is OK // always include or it is not time yet to check whether it is OK
// to purge deltes or not // to purge deltes or not
if (!isUserScan) { if (!isUserScan) {

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint; import org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint;
@ -83,6 +84,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.ipc.RpcClient; import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcServer; import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService; import org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
@ -227,6 +229,65 @@ public class TestFromClientSide {
h.close(); h.close();
} }
/**
* Basic client side validation of HBASE-10118
*/
@Test
public void testPurgeFutureDeletes() throws Exception {
final byte[] TABLENAME = Bytes.toBytes("testPurgeFutureDeletes");
final byte[] ROW = Bytes.toBytes("row");
final byte[] FAMILY = Bytes.toBytes("family");
final byte[] COLUMN = Bytes.toBytes("column");
final byte[] VALUE = Bytes.toBytes("value");
HTable table = TEST_UTIL.createTable(TABLENAME, FAMILY);
// future timestamp
long ts = System.currentTimeMillis() * 2;
Put put = new Put(ROW, ts);
put.add(FAMILY, COLUMN, VALUE);
table.put(put);
Get get = new Get(ROW);
Result result = table.get(get);
assertArrayEquals(VALUE, result.getValue(FAMILY, COLUMN));
Delete del = new Delete(ROW);
del.deleteColumn(FAMILY, COLUMN, ts);
table.delete(del);
get = new Get(ROW);
result = table.get(get);
assertNull(result.getValue(FAMILY, COLUMN));
// major compaction, purged future deletes
TEST_UTIL.getHBaseAdmin().flush(TABLENAME);
TEST_UTIL.getHBaseAdmin().majorCompact(TABLENAME);
// waiting for the major compaction to complete
TEST_UTIL.waitFor(6000, new Waiter.Predicate<IOException>() {
@Override
public boolean evaluate() throws IOException {
try {
return TEST_UTIL.getHBaseAdmin().getCompactionState(TABLENAME) ==
AdminProtos.GetRegionInfoResponse.CompactionState.NONE;
} catch (InterruptedException e) {
throw new IOException(e);
}
}
});
put = new Put(ROW, ts);
put.add(FAMILY, COLUMN, VALUE);
table.put(put);
get = new Get(ROW);
result = table.get(get);
assertArrayEquals(VALUE, result.getValue(FAMILY, COLUMN));
table.close();
}
@Test @Test
public void testSharedZooKeeper() throws Exception { public void testSharedZooKeeper() throws Exception {
Configuration newConfig = new Configuration(TEST_UTIL.getConfiguration()); Configuration newConfig = new Configuration(TEST_UTIL.getConfiguration());

View File

@ -568,6 +568,15 @@ htable.put(put);
up on the user mailing list.</para> up on the user mailing list.</para>
<para>Also see <xref linkend="keyvalue"/> for more information on the internal KeyValue format. <para>Also see <xref linkend="keyvalue"/> for more information on the internal KeyValue format.
</para> </para>
<para>Delete markers are purged during the major compaction of store,
unless the KEEP_DELETED_CELLS is set in the column family. In some
scenarios, users want to keep the deletes for a time and you can set the
delete TTL: hbase.hstore.time.to.purge.deletes in the configuration.
If this delete TTL is not set, or set to 0, all delete markers including those
with future timestamp are purged during the later major compaction.
Otherwise, a delete marker is kept until the major compaction after
marker's timestamp + delete TTL.
</para>
</section> </section>
</section> </section>