HBASE-1003 If cell exceeds TTL but not VERSIONs, will not be removed during major compaction

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@718370 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-11-17 20:37:34 +00:00
parent fc56f9ca0d
commit 6aec3c3e37
5 changed files with 40 additions and 26 deletions

View File

@ -74,6 +74,8 @@ Release 0.19.0 - Unreleased
HBASE-996 Migration script to up the versions in catalog tables
HBASE-991 Update the mapred package document examples so they work with
TRUNK/0.19.0.
HBASE-1003 If cell exceeds TTL but not VERSIONs, will not be removed during
major compaction
IMPROVEMENTS
HBASE-901 Add a limit to key length, check key and value length on client side

View File

@ -1109,21 +1109,19 @@ public class HStore implements HConstants {
timesSeen = 1;
}
// Added majorCompaction here to make sure all versions make it to
// the major compaction so we do not remove the wrong last versions
// this effected HBASE-826
if (timesSeen <= family.getMaxVersions() || !majorCompaction) {
// Keep old versions until we have maxVersions worth.
// Then just skip them.
if (sk.getRow().length != 0 && sk.getColumn().length != 0) {
// Only write out objects with non-zero length key and value
if (!isExpired(sk, ttl, now)) {
// Don't write empty rows or columns. Only remove cells on major
// compaction. Remove if expired of > VERSIONS
if (sk.getRow().length != 0 && sk.getColumn().length != 0) {
boolean expired = false;
if (!majorCompaction ||
(timesSeen <= family.getMaxVersions() &&
!(expired = isExpired(sk, ttl, now)))) {
compactedOut.append(sk, vals[smallestKey]);
} else {
// HBASE-855 remove one from timesSeen because it did not make it
// past expired check -- don't count against max versions.
timesSeen--;
}
}
if (expired) {
// HBASE-855 remove one from timesSeen because it did not make it
// past expired check -- don't count against max versions.
timesSeen--;
}
}
@ -1144,7 +1142,7 @@ public class HStore implements HConstants {
closeCompactionReaders(Arrays.asList(rdrs));
}
}
private void closeCompactionReaders(final List<MapFile.Reader> rdrs) {
for (MapFile.Reader r: rdrs) {
try {
@ -1712,12 +1710,7 @@ public class HStore implements HConstants {
static boolean isExpired(final HStoreKey hsk, final long ttl,
final long now) {
boolean result = ttl != HConstants.FOREVER && now > hsk.getTimestamp() + ttl;
if (result && LOG.isDebugEnabled()) {
LOG.debug("rowAtOrBeforeCandidate 1:" + hsk +
": expired, skipped");
}
return result;
return ttl != HConstants.FOREVER && now > hsk.getTimestamp() + ttl;
}
/* Find a candidate for row that is at or before passed key, sk, in mapfile.

View File

@ -24,6 +24,9 @@ import org.apache.hadoop.conf.Configuration;
* This class represents a common API for hashing functions.
*/
public abstract class Hash {
// TODO: Fix the design tangle that has classes over in org.onelab.filter
// referring to this class. Would need to also move the Jenkins and Murmur
// hashing function too.
/** Constant to denote invalid hash type. */
public static final int INVALID_HASH = -1;
/** Constant to denote {@link JenkinsHash}. */

View File

@ -50,8 +50,6 @@
package org.onelab.filter;
import org.apache.hadoop.hbase.util.Hash;
import org.apache.hadoop.hbase.util.JenkinsHash;
import org.apache.hadoop.hbase.util.MurmurHash;
/**
* Implements a hash object that returns a certain number of hashed values.
@ -102,15 +100,14 @@ public final class HashFunction {
}//end constructor
/** Clears <i>this</i> hash function. A NOOP */
public void clear(){
}//end clear()
public void clear() {
}
/**
* Hashes a specified key into several integers.
* @param k The specified key.
* @return The array of hashed values.
*/
@SuppressWarnings("unchecked")
public int[] hash(Key k){
byte[] b = k.getBytes();
if(b == null) {

View File

@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.io.MapFile;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HStoreKey;
import org.apache.hadoop.hbase.HTableDescriptor;
@ -147,6 +148,24 @@ public class TestCompaction extends HBaseTestCase {
}
}
assertTrue(containsStartRow);
// Do a simple TTL test.
final int ttlInSeconds = 1;
for (HStore store: this.r.stores.values()) {
store.ttl = ttlInSeconds * 1000;
}
Thread.sleep(ttlInSeconds * 1000);
r.compactStores(true);
int count = 0;
for (MapFile.Reader reader: this.r.stores.
get(Bytes.mapKey(COLUMN_FAMILY_TEXT_MINUS_COLON)).getReaders()) {
reader.reset();
HStoreKey key = new HStoreKey();
ImmutableBytesWritable val = new ImmutableBytesWritable();
while(reader.next(key, val)) {
count++;
}
}
assertTrue(count == 0);
}
private void createStoreFile(final HRegion region) throws IOException {