HBASE-17237 Override the correct compact method in HMobStore

This commit is contained in:
Jingcheng Du 2016-12-12 11:54:21 +08:00
parent c24a055db8
commit 1615f45b31
3 changed files with 16 additions and 19 deletions

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.mob.MobStoreEngine;
import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.util.HFileArchiveUtil;
@ -463,28 +464,24 @@ public class HMobStore extends HStore {
} }
/** /**
* The compaction in the store of mob. * The compaction in the mob store.
* The cells in this store contains the path of the mob files. There might be race * The cells in this store contains the path of the mob files. There might be race
* condition between the major compaction and the sweeping in mob files. * condition between the major compaction and the mob major compaction.
* In order to avoid this, we need mutually exclude the running of the major compaction and * In order to avoid this, we need mutually exclude the running of the major compaction
* sweeping in mob files. * and the mob major compaction.
* The minor compaction is not affected. * The minor compaction is not affected.
* The major compaction is marked as retainDeleteMarkers when a sweeping is in progress. * The major compaction is marked as retainDeleteMarkers when a mob major
* compaction is in progress.
*/ */
@Override @Override
public List<StoreFile> compact(CompactionContext compaction, public List<StoreFile> compact(CompactionContext compaction,
ThroughputController throughputController) throws IOException { ThroughputController throughputController, User user) throws IOException {
// If it's major compaction, try to find whether there's a sweeper is running // If it's major compaction, try to find whether there's a mob major compaction is running
// If yes, mark the major compaction as retainDeleteMarkers // If yes, mark the major compaction as retainDeleteMarkers
if (compaction.getRequest().isAllFiles()) { if (compaction.getRequest().isAllFiles()) {
// Use the ZooKeeper to coordinate. // Acquire a table lock to coordinate.
// 1. Acquire a operation lock. // 1. If no, mark the major compaction as retainDeleteMarkers and continue the compaction.
// 1.1. If no, mark the major compaction as retainDeleteMarkers and continue the compaction. // 2. If the lock is obtained, run the compaction directly.
// 1.2. If the lock is obtained, search the node of sweeping.
// 1.2.1. If the node is there, the sweeping is in progress, mark the major
// compaction as retainDeleteMarkers and continue the compaction.
// 1.2.2. If the node is not there, add a child to the major compaction node, and
// run the compaction directly.
TableLock lock = null; TableLock lock = null;
if (tableLockManager != null) { if (tableLockManager != null) {
lock = tableLockManager.readLock(tableLockName, "Major compaction in HMobStore"); lock = tableLockManager.readLock(tableLockName, "Major compaction in HMobStore");
@ -510,7 +507,7 @@ public class HMobStore extends HStore {
+ tableName + "], forcing the delete markers to be retained"); + tableName + "], forcing the delete markers to be retained");
compaction.getRequest().forceRetainDeleteMarkers(); compaction.getRequest().forceRetainDeleteMarkers();
} }
return super.compact(compaction, throughputController); return super.compact(compaction, throughputController, user);
} finally { } finally {
if (tableLocked && lock != null) { if (tableLocked && lock != null) {
try { try {
@ -522,7 +519,7 @@ public class HMobStore extends HStore {
} }
} else { } else {
// If it's not a major compaction, continue the compaction. // If it's not a major compaction, continue the compaction.
return super.compact(compaction, throughputController); return super.compact(compaction, throughputController, user);
} }
} }

View File

@ -537,7 +537,7 @@ public class TestHMobStore {
// Trigger major compaction // Trigger major compaction
this.store.triggerMajorCompaction(); this.store.triggerMajorCompaction();
CompactionContext requestCompaction = this.store.requestCompaction(1, null); CompactionContext requestCompaction = this.store.requestCompaction(1, null);
this.store.compact(requestCompaction, NoLimitThroughputController.INSTANCE); this.store.compact(requestCompaction, NoLimitThroughputController.INSTANCE, null);
Assert.assertEquals(1, this.store.getStorefiles().size()); Assert.assertEquals(1, this.store.getStorefiles().size());
//Check encryption after compaction //Check encryption after compaction

View File

@ -381,7 +381,7 @@ public class TestStore {
Assert.assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS); Assert.assertEquals(lowestTimeStampFromManager,lowestTimeStampFromFS);
// after compact; check the lowest time stamp // after compact; check the lowest time stamp
store.compact(store.requestCompaction(), NoLimitThroughputController.INSTANCE); store.compact(store.requestCompaction(), NoLimitThroughputController.INSTANCE, null);
lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles()); lowestTimeStampFromManager = StoreUtils.getLowestTimestamp(store.getStorefiles());
lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles()); lowestTimeStampFromFS = getLowestTimeStampFromFS(fs, store.getStorefiles());
Assert.assertEquals(lowestTimeStampFromManager, lowestTimeStampFromFS); Assert.assertEquals(lowestTimeStampFromManager, lowestTimeStampFromFS);