HBASE-22163 Should not archive the compacted store files when region warmup

This commit is contained in:
Guanghao 2019-04-08 22:11:07 +08:00 committed by Guanghao Zhang
parent a10ac6f4a1
commit 66fffaa6d0
17 changed files with 228 additions and 66 deletions

View File

@ -109,9 +109,9 @@ public class TestRefreshHFilesEndpoint extends TestRefreshHFilesBase {
} }
public static class HStoreWithFaultyRefreshHFilesAPI extends HStore { public static class HStoreWithFaultyRefreshHFilesAPI extends HStore {
public HStoreWithFaultyRefreshHFilesAPI(final HRegion region, final ColumnFamilyDescriptor family, public HStoreWithFaultyRefreshHFilesAPI(final HRegion region,
final Configuration confParam) throws IOException { final ColumnFamilyDescriptor family, final Configuration confParam) throws IOException {
super(region, family, confParam); super(region, family, confParam, false);
} }
@Override @Override

View File

@ -191,7 +191,7 @@ public class CompactionTool extends Configured implements Tool {
} }
}; };
HRegion region = new HRegion(regionFs, null, conf, htd, null); HRegion region = new HRegion(regionFs, null, conf, htd, null);
return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf); return new HStore(region, htd.getColumnFamily(Bytes.toBytes(familyName)), conf, false);
} }
} }

View File

@ -134,6 +134,7 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil; import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure; import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch; import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait;
import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure; import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure; import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
@ -2004,13 +2005,15 @@ public class HMaster extends HRegionServer implements MasterServices {
if (this.cpHost != null) { if (this.cpHost != null) {
this.cpHost.preMove(hri, rp.getSource(), rp.getDestination()); this.cpHost.preMove(hri, rp.getSource(), rp.getDestination());
} }
TransitRegionStateProcedure proc =
this.assignmentManager.createMoveRegionProcedure(rp.getRegionInfo(), rp.getDestination());
// Warmup the region on the destination before initiating the move. this call // Warmup the region on the destination before initiating the move. this call
// is synchronous and takes some time. doing it before the source region gets // is synchronous and takes some time. doing it before the source region gets
// closed // closed
serverManager.sendRegionWarmup(rp.getDestination(), hri); serverManager.sendRegionWarmup(rp.getDestination(), hri);
LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer"); LOG.info(getClientIdAuditPrefix() + " move " + rp + ", running balancer");
Future<byte []> future = this.assignmentManager.moveAsync(rp); Future<byte[]> future = ProcedureSyncWait.submitProcedure(this.procedureExecutor, proc);
try { try {
// Is this going to work? Will we throw exception on error? // Is this going to work? Will we throw exception on error?
// TODO: CompletableFuture rather than this stunted Future. // TODO: CompletableFuture rather than this stunted Future.

View File

@ -592,7 +592,7 @@ public class AssignmentManager {
return proc.getProcId(); return proc.getProcId();
} }
private TransitRegionStateProcedure createMoveRegionProcedure(RegionInfo regionInfo, public TransitRegionStateProcedure createMoveRegionProcedure(RegionInfo regionInfo,
ServerName targetServer) throws HBaseIOException { ServerName targetServer) throws HBaseIOException {
RegionStateNode regionNode = this.regionStates.getRegionStateNode(regionInfo); RegionStateNode regionNode = this.regionStates.getRegionStateNode(regionInfo);
if (regionNode == null) { if (regionNode == null) {

View File

@ -104,8 +104,8 @@ public class HMobStore extends HStore {
private final byte[] refCellTags; private final byte[] refCellTags;
public HMobStore(final HRegion region, final ColumnFamilyDescriptor family, public HMobStore(final HRegion region, final ColumnFamilyDescriptor family,
final Configuration confParam) throws IOException { final Configuration confParam, boolean warmup) throws IOException {
super(region, family, confParam); super(region, family, confParam, warmup);
this.family = family; this.family = family;
this.mobFileCache = region.getMobFileCache(); this.mobFileCache = region.getMobFileCache();
this.homePath = MobUtils.getMobHome(conf); this.homePath = MobUtils.getMobHome(conf);

View File

@ -1045,6 +1045,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
*/ */
private long initializeStores(CancelableProgressable reporter, MonitoredTask status) private long initializeStores(CancelableProgressable reporter, MonitoredTask status)
throws IOException { throws IOException {
return initializeStores(reporter, status, false);
}
private long initializeStores(CancelableProgressable reporter, MonitoredTask status,
boolean warmup) throws IOException {
// Load in all the HStores. // Load in all the HStores.
long maxSeqId = -1; long maxSeqId = -1;
// initialized to -1 so that we pick up MemstoreTS from column families // initialized to -1 so that we pick up MemstoreTS from column families
@ -1062,7 +1067,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
completionService.submit(new Callable<HStore>() { completionService.submit(new Callable<HStore>() {
@Override @Override
public HStore call() throws IOException { public HStore call() throws IOException {
return instantiateHStore(family); return instantiateHStore(family, warmup);
} }
}); });
} }
@ -1122,7 +1127,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
// Initialize all the HStores // Initialize all the HStores
status.setStatus("Warming up all the Stores"); status.setStatus("Warming up all the Stores");
try { try {
initializeStores(reporter, status); initializeStores(reporter, status, true);
} finally { } finally {
status.markComplete("Done warming up."); status.markComplete("Done warming up.");
} }
@ -5760,17 +5765,17 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return true; return true;
} }
protected HStore instantiateHStore(final ColumnFamilyDescriptor family) throws IOException { protected HStore instantiateHStore(final ColumnFamilyDescriptor family, boolean warmup)
throws IOException {
if (family.isMobEnabled()) { if (family.isMobEnabled()) {
if (HFile.getFormatVersion(this.conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { if (HFile.getFormatVersion(this.conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
throw new IOException("A minimum HFile version of " throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS +
+ HFile.MIN_FORMAT_VERSION_WITH_TAGS " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY +
+ " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY " accordingly.");
+ " accordingly.");
} }
return new HMobStore(this, family, this.conf); return new HMobStore(this, family, this.conf, warmup);
} }
return new HStore(this, family, this.conf); return new HStore(this, family, this.conf, warmup);
} }
@Override @Override

View File

@ -238,7 +238,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
* @throws IOException * @throws IOException
*/ */
protected HStore(final HRegion region, final ColumnFamilyDescriptor family, protected HStore(final HRegion region, final ColumnFamilyDescriptor family,
final Configuration confParam) throws IOException { final Configuration confParam, boolean warmup) throws IOException {
this.fs = region.getRegionFileSystem(); this.fs = region.getRegionFileSystem();
@ -300,7 +300,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
} }
this.storeEngine = createStoreEngine(this, this.conf, this.comparator); this.storeEngine = createStoreEngine(this, this.conf, this.comparator);
List<HStoreFile> hStoreFiles = loadStoreFiles(); List<HStoreFile> hStoreFiles = loadStoreFiles(warmup);
// Move the storeSize calculation out of loadStoreFiles() method, because the secondary read // Move the storeSize calculation out of loadStoreFiles() method, because the secondary read
// replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and // replica's refreshStoreFiles() will also use loadStoreFiles() to refresh its store files and
// update the storeSize in the completeCompaction(..) finally (just like compaction) , so // update the storeSize in the completeCompaction(..) finally (just like compaction) , so
@ -552,12 +552,13 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
* from the given directory. * from the given directory.
* @throws IOException * @throws IOException
*/ */
private List<HStoreFile> loadStoreFiles() throws IOException { private List<HStoreFile> loadStoreFiles(boolean warmup) throws IOException {
Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName()); Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName());
return openStoreFiles(files); return openStoreFiles(files, warmup);
} }
private List<HStoreFile> openStoreFiles(Collection<StoreFileInfo> files) throws IOException { private List<HStoreFile> openStoreFiles(Collection<StoreFileInfo> files, boolean warmup)
throws IOException {
if (CollectionUtils.isEmpty(files)) { if (CollectionUtils.isEmpty(files)) {
return Collections.emptyList(); return Collections.emptyList();
} }
@ -611,6 +612,8 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
throw ioe; throw ioe;
} }
// Should not archive the compacted store files when region warmup. See HBASE-22163.
if (!warmup) {
// Remove the compacted files from result // Remove the compacted files from result
List<HStoreFile> filesToRemove = new ArrayList<>(compactedStoreFiles.size()); List<HStoreFile> filesToRemove = new ArrayList<>(compactedStoreFiles.size());
for (HStoreFile storeFile : results) { for (HStoreFile storeFile : results) {
@ -625,6 +628,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
LOG.debug("Moving the files {} to archive", filesToRemove); LOG.debug("Moving the files {} to archive", filesToRemove);
this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove); this.fs.removeStoreFiles(this.getColumnFamilyDescriptor().getNameAsString(), filesToRemove);
} }
}
return results; return results;
} }
@ -691,7 +695,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat
} }
// try to open the files // try to open the files
List<HStoreFile> openedFiles = openStoreFiles(toBeAddedFiles); List<HStoreFile> openedFiles = openStoreFiles(toBeAddedFiles, false);
// propogate the file changes to the underlying store file manager // propogate the file changes to the underlying store file manager
replaceStoreFiles(toBeRemovedStoreFiles, openedFiles); //won't throw an exception replaceStoreFiles(toBeRemovedStoreFiles, openedFiles); //won't throw an exception

View File

@ -2139,10 +2139,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
return response; return response;
} }
if (LOG.isDebugEnabled()) {
LOG.debug("Warming up Region " + region.getRegionNameAsString());
}
htd = regionServer.tableDescriptors.get(region.getTable()); htd = regionServer.tableDescriptors.get(region.getTable());
if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) { if (regionServer.getRegionsInTransitionInRS().containsKey(encodedNameBytes)) {
@ -2150,6 +2146,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
return response; return response;
} }
LOG.info("Warming up region " + region.getRegionNameAsString());
HRegion.warmupHRegion(region, htd, regionServer.getWAL(region), HRegion.warmupHRegion(region, htd, regionServer.getWAL(region),
regionServer.getConfiguration(), regionServer, null); regionServer.getConfiguration(), regionServer, null);

View File

@ -194,17 +194,19 @@ public class TestIOFencing {
TableDescriptor htd, RegionServerServices rsServices) { TableDescriptor htd, RegionServerServices rsServices) {
super(tableDir, log, fs, confParam, info, htd, rsServices); super(tableDir, log, fs, confParam, info, htd, rsServices);
} }
@Override @Override
protected HStore instantiateHStore(final ColumnFamilyDescriptor family) throws IOException { protected HStore instantiateHStore(final ColumnFamilyDescriptor family, boolean warmup)
return new BlockCompactionsInCompletionHStore(this, family, this.conf); throws IOException {
return new BlockCompactionsInCompletionHStore(this, family, this.conf, warmup);
} }
} }
public static class BlockCompactionsInCompletionHStore extends HStore { public static class BlockCompactionsInCompletionHStore extends HStore {
CompactionBlockerRegion r; CompactionBlockerRegion r;
protected BlockCompactionsInCompletionHStore(HRegion region, ColumnFamilyDescriptor family, protected BlockCompactionsInCompletionHStore(HRegion region, ColumnFamilyDescriptor family,
Configuration confParam) throws IOException { Configuration confParam, boolean warmup) throws IOException {
super(region, family, confParam); super(region, family, confParam, warmup);
r = (CompactionBlockerRegion) region; r = (CompactionBlockerRegion) region;
} }

View File

@ -116,16 +116,17 @@ public class TestFromClientSideScanExcpetion {
} }
@Override @Override
protected HStore instantiateHStore(ColumnFamilyDescriptor family) throws IOException { protected HStore instantiateHStore(ColumnFamilyDescriptor family, boolean warmup)
return new MyHStore(this, family, conf); throws IOException {
return new MyHStore(this, family, conf, warmup);
} }
} }
public static final class MyHStore extends HStore { public static final class MyHStore extends HStore {
public MyHStore(HRegion region, ColumnFamilyDescriptor family, Configuration confParam) public MyHStore(HRegion region, ColumnFamilyDescriptor family, Configuration confParam,
throws IOException { boolean warmup) throws IOException {
super(region, family, confParam); super(region, family, confParam, warmup);
} }
@Override @Override

View File

@ -182,7 +182,7 @@ public class TestCacheOnWriteInSchema {
region = TEST_UTIL.createLocalHRegion(info, htd, walFactory.getWAL(info)); region = TEST_UTIL.createLocalHRegion(info, htd, walFactory.getWAL(info));
region.setBlockCache(BlockCacheFactory.createBlockCache(conf)); region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
store = new HStore(region, hcd, conf); store = new HStore(region, hcd, conf, false);
} }
@After @After

View File

@ -118,7 +118,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
this.regionServicesForStores = Mockito.spy(region.getRegionServicesForStores()); this.regionServicesForStores = Mockito.spy(region.getRegionServicesForStores());
ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(1); ThreadPoolExecutor pool = (ThreadPoolExecutor) Executors.newFixedThreadPool(1);
Mockito.when(regionServicesForStores.getInMemoryCompactionPool()).thenReturn(pool); Mockito.when(regionServicesForStores.getInMemoryCompactionPool()).thenReturn(pool);
this.store = new HStore(region, hcd, conf); this.store = new HStore(region, hcd, conf, false);
long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage() long globalMemStoreLimit = (long) (ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()
.getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false)); .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));

View File

@ -106,7 +106,7 @@ public class TestCompactionPolicy {
Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName()); Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
store = new HStore(region, hcd, conf); store = new HStore(region, hcd, conf, false);
TEST_FILE = region.getRegionFileSystem().createTempName(); TEST_FILE = region.getRegionFileSystem().createTempName();
fs.createNewFile(TEST_FILE); fs.createNewFile(TEST_FILE);

View File

@ -166,7 +166,7 @@ public class TestHMobStore {
final WALFactory wals = new WALFactory(walConf, methodName); final WALFactory wals = new WALFactory(walConf, methodName);
region = new HRegion(tableDir, wals.getWAL(info), fs, conf, info, td, null); region = new HRegion(tableDir, wals.getWAL(info), fs, conf, info, td, null);
region.setMobFileCache(new MobFileCache(conf)); region.setMobFileCache(new MobFileCache(conf));
store = new HMobStore(region, cfd, conf); store = new HMobStore(region, cfd, conf, false);
if (testStore) { if (testStore) {
init(conf, cfd); init(conf, cfd);
} }

View File

@ -6302,17 +6302,17 @@ public class TestHRegion {
* @return If Mob is enabled, return HMobStore, otherwise return HStoreForTesting. * @return If Mob is enabled, return HMobStore, otherwise return HStoreForTesting.
*/ */
@Override @Override
protected HStore instantiateHStore(final ColumnFamilyDescriptor family) throws IOException { protected HStore instantiateHStore(final ColumnFamilyDescriptor family, boolean warmup)
throws IOException {
if (family.isMobEnabled()) { if (family.isMobEnabled()) {
if (HFile.getFormatVersion(this.conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) { if (HFile.getFormatVersion(this.conf) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
throw new IOException("A minimum HFile version of " throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS +
+ HFile.MIN_FORMAT_VERSION_WITH_TAGS " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY +
+ " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY " accordingly.");
+ " accordingly.");
} }
return new HMobStore(this, family, this.conf); return new HMobStore(this, family, this.conf, warmup);
} }
return new HStoreForTesting(this, family, this.conf); return new HStoreForTesting(this, family, this.conf, warmup);
} }
} }
@ -6328,8 +6328,8 @@ public class TestHRegion {
protected HStoreForTesting(final HRegion region, protected HStoreForTesting(final HRegion region,
final ColumnFamilyDescriptor family, final ColumnFamilyDescriptor family,
final Configuration confParam) throws IOException { final Configuration confParam, boolean warmup) throws IOException {
super(region, family, confParam); super(region, family, confParam, warmup);
} }
@Override @Override

View File

@ -229,7 +229,7 @@ public class TestHStore {
ColumnFamilyDescriptor hcd, MyStoreHook hook, boolean switchToPread) throws IOException { ColumnFamilyDescriptor hcd, MyStoreHook hook, boolean switchToPread) throws IOException {
initHRegion(methodName, conf, builder, hcd, hook, switchToPread); initHRegion(methodName, conf, builder, hcd, hook, switchToPread);
if (hook == null) { if (hook == null) {
store = new HStore(region, hcd, conf); store = new HStore(region, hcd, conf, false);
} else { } else {
store = new MyStore(region, hcd, conf, hook, switchToPread); store = new MyStore(region, hcd, conf, hook, switchToPread);
} }
@ -494,7 +494,8 @@ public class TestHStore {
w.close(); w.close();
this.store.close(); this.store.close();
// Reopen it... should pick up two files // Reopen it... should pick up two files
this.store = new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c); this.store =
new HStore(this.store.getHRegion(), this.store.getColumnFamilyDescriptor(), c, false);
assertEquals(2, this.store.getStorefilesCount()); assertEquals(2, this.store.getStorefilesCount());
result = HBaseTestingUtility.getFromStoreFile(store, result = HBaseTestingUtility.getFromStoreFile(store,
@ -1524,7 +1525,7 @@ public class TestHStore {
ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(family); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.of(family);
initHRegion(name.getMethodName(), conf, initHRegion(name.getMethodName(), conf,
TableDescriptorBuilder.newBuilder(TableName.valueOf(table)), hcd, null, false); TableDescriptorBuilder.newBuilder(TableName.valueOf(table)), hcd, null, false);
HStore store = new HStore(region, hcd, conf) { HStore store = new HStore(region, hcd, conf, false) {
@Override @Override
protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, Configuration conf, protected StoreEngine<?, ?, ?, ?> createStoreEngine(HStore store, Configuration conf,
@ -1566,7 +1567,7 @@ public class TestHStore {
MyStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration MyStore(final HRegion region, final ColumnFamilyDescriptor family, final Configuration
confParam, MyStoreHook hook, boolean switchToPread) throws IOException { confParam, MyStoreHook hook, boolean switchToPread) throws IOException {
super(region, family, confParam); super(region, family, confParam, false);
this.hook = hook; this.hook = hook;
} }

View File

@ -0,0 +1,149 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import static org.junit.Assert.assertEquals;
@Category({ LargeTests.class, RegionServerTests.class })
public class TestNotCleanupCompactedFileWhenRegionWarmup {
private static final Logger LOG =
LoggerFactory.getLogger(TestNotCleanupCompactedFileWhenRegionWarmup.class);
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestNotCleanupCompactedFileWhenRegionWarmup.class);
private static HBaseTestingUtility TEST_UTIL;
private static Admin admin;
private static Table table;
private static TableName TABLE_NAME = TableName.valueOf("TestCleanupCompactedFileAfterFailover");
private static byte[] ROW = Bytes.toBytes("row");
private static byte[] FAMILY = Bytes.toBytes("cf");
private static byte[] QUALIFIER = Bytes.toBytes("cq");
private static byte[] VALUE = Bytes.toBytes("value");
@BeforeClass
public static void beforeClass() throws Exception {
TEST_UTIL = new HBaseTestingUtility();
// Set the scanner lease to 20min, so the scanner can't be closed by RegionServer
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 1200000);
TEST_UTIL.getConfiguration()
.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 100);
TEST_UTIL.getConfiguration().set("dfs.blocksize", "64000");
TEST_UTIL.getConfiguration().set("dfs.namenode.fs-limits.min-block-size", "1024");
TEST_UTIL.getConfiguration().set(TimeToLiveHFileCleaner.TTL_CONF_KEY, "0");
TEST_UTIL.startMiniCluster(1);
admin = TEST_UTIL.getAdmin();
}
@AfterClass
public static void afterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
@Before
public void before() throws Exception {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TABLE_NAME);
builder.setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY));
admin.createTable(builder.build());
TEST_UTIL.waitTableAvailable(TABLE_NAME);
table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
}
@After
public void after() throws Exception {
admin.disableTable(TABLE_NAME);
admin.deleteTable(TABLE_NAME);
}
@Test
public void testRegionWarmup() throws Exception {
List<HRegion> regions = new ArrayList<>();
for (JVMClusterUtil.RegionServerThread rsThread : TEST_UTIL.getHBaseCluster()
.getLiveRegionServerThreads()) {
HRegionServer rs = rsThread.getRegionServer();
if (rs.getOnlineTables().contains(TABLE_NAME)) {
regions.addAll(rs.getRegions(TABLE_NAME));
}
}
assertEquals("Table should only have one region", 1, regions.size());
HRegion region = regions.get(0);
HStore store = region.getStore(FAMILY);
writeDataAndFlush(3, region);
assertEquals(3, store.getStorefilesCount());
// Open a scanner and not close, then the storefile will be referenced
store.getScanner(new Scan(), null, 0);
region.compact(true);
assertEquals(1, store.getStorefilesCount());
// The compacted file should not be archived as there are references by user scanner
assertEquals(3, store.getStoreEngine().getStoreFileManager().getCompactedfiles().size());
HStore newStore = region.instantiateHStore(ColumnFamilyDescriptorBuilder.of(FAMILY), true);
// Should not archive the compacted storefiles when region warmup
assertEquals(4, newStore.getStorefilesCount());
newStore = region.instantiateHStore(ColumnFamilyDescriptorBuilder.of(FAMILY), false);
// Archived the compacted storefiles when region real open
assertEquals(1, newStore.getStorefilesCount());
}
private void writeDataAndFlush(int fileNum, HRegion region) throws Exception {
for (int i = 0; i < fileNum; i++) {
for (int j = 0; j < 100; j++) {
table.put(new Put(concat(ROW, j)).addColumn(FAMILY, QUALIFIER, concat(VALUE, j)));
}
region.flush(true);
}
}
private byte[] concat(byte[] base, int index) {
return Bytes.toBytes(Bytes.toString(base) + "-" + index);
}
}