Merge branch 'master' into HBASE-26913-replication-observability-framework
This commit is contained in:
commit
7229b2a1b1
|
@ -1709,7 +1709,7 @@ public class HFileBlock implements Cacheable {
|
|||
if (!fileContext.isCompressedOrEncrypted()) {
|
||||
hFileBlock.sanityCheckUncompressed();
|
||||
}
|
||||
LOG.trace("Read {} in {} ns", hFileBlock, duration);
|
||||
LOG.trace("Read {} in {} ms", hFileBlock, duration);
|
||||
// Cache next block header if we read it for the next time through here.
|
||||
if (nextBlockOnDiskSize != -1) {
|
||||
cacheNextBlockHeader(offset + hFileBlock.getOnDiskSizeWithHeader(), onDiskBlock,
|
||||
|
|
|
@ -46,6 +46,9 @@ public final class MobConstants {
|
|||
public static final String MOB_CACHE_EVICT_REMAIN_RATIO = "hbase.mob.cache.evict.remain.ratio";
|
||||
public static final Tag MOB_REF_TAG =
|
||||
new ArrayBackedTag(TagType.MOB_REFERENCE_TAG_TYPE, HConstants.EMPTY_BYTE_ARRAY);
|
||||
public static final String MOB_CLEANER_BATCH_SIZE_UPPER_BOUND =
|
||||
"hbase.master.mob.cleaner.batch.size.upper.bound";
|
||||
public static final int DEFAULT_MOB_CLEANER_BATCH_SIZE_UPPER_BOUND = 10000;
|
||||
|
||||
public static final float DEFAULT_EVICT_REMAIN_RATIO = 0.5f;
|
||||
public static final long DEFAULT_MOB_CACHE_EVICT_PERIOD = 3600L;
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.mob;
|
||||
|
||||
import static org.apache.hadoop.hbase.mob.MobConstants.DEFAULT_MOB_CLEANER_BATCH_SIZE_UPPER_BOUND;
|
||||
import static org.apache.hadoop.hbase.mob.MobConstants.MOB_CLEANER_BATCH_SIZE_UPPER_BOUND;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -314,20 +317,30 @@ public final class MobUtils {
|
|||
}
|
||||
filesToClean
|
||||
.add(new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true));
|
||||
if (
|
||||
filesToClean.size() >= conf.getInt(MOB_CLEANER_BATCH_SIZE_UPPER_BOUND,
|
||||
DEFAULT_MOB_CLEANER_BATCH_SIZE_UPPER_BOUND)
|
||||
) {
|
||||
if (
|
||||
removeMobFiles(conf, fs, tableName, mobTableDir, columnDescriptor.getName(),
|
||||
filesToClean)
|
||||
) {
|
||||
deletedFileCount += filesToClean.size();
|
||||
}
|
||||
filesToClean.clear();
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error("Cannot parse the fileName " + fileName, e);
|
||||
}
|
||||
}
|
||||
if (!filesToClean.isEmpty()) {
|
||||
try {
|
||||
removeMobFiles(conf, fs, tableName, mobTableDir, columnDescriptor.getName(), filesToClean);
|
||||
deletedFileCount = filesToClean.size();
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to delete the mob files " + filesToClean, e);
|
||||
if (
|
||||
!filesToClean.isEmpty() && removeMobFiles(conf, fs, tableName, mobTableDir,
|
||||
columnDescriptor.getName(), filesToClean)
|
||||
) {
|
||||
deletedFileCount += filesToClean.size();
|
||||
}
|
||||
}
|
||||
LOG.info("{} expired mob files are deleted", deletedFileCount);
|
||||
LOG.info("Table {} {} expired mob files in total are deleted", tableName, deletedFileCount);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -459,10 +472,17 @@ public final class MobUtils {
|
|||
* @param family The name of the column family.
|
||||
* @param storeFiles The files to be deleted.
|
||||
*/
|
||||
public static void removeMobFiles(Configuration conf, FileSystem fs, TableName tableName,
|
||||
Path tableDir, byte[] family, Collection<HStoreFile> storeFiles) throws IOException {
|
||||
public static boolean removeMobFiles(Configuration conf, FileSystem fs, TableName tableName,
|
||||
Path tableDir, byte[] family, Collection<HStoreFile> storeFiles) {
|
||||
try {
|
||||
HFileArchiver.archiveStoreFiles(conf, fs, getMobRegionInfo(tableName), tableDir, family,
|
||||
storeFiles);
|
||||
LOG.info("Table {} {} expired mob files are deleted", tableName, storeFiles.size());
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to delete the mob files, table {}", tableName, e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -432,14 +432,14 @@ public class MemStoreFlusher implements FlushRequester {
|
|||
}
|
||||
|
||||
/**
|
||||
* Return true if global memory usage is above the high watermark
|
||||
* Return the FlushType if global memory usage is above the high watermark
|
||||
*/
|
||||
private FlushType isAboveHighWaterMark() {
|
||||
return server.getRegionServerAccounting().isAboveHighWaterMark();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if we're above the low watermark
|
||||
* Return the FlushType if we're above the low watermark
|
||||
*/
|
||||
private FlushType isAboveLowWaterMark() {
|
||||
return server.getRegionServerAccounting().isAboveLowWaterMark();
|
||||
|
|
|
@ -161,8 +161,8 @@ public class RegionServerAccounting {
|
|||
}
|
||||
|
||||
/**
|
||||
* Return true if we are above the memstore high water mark
|
||||
* @return the flushtype
|
||||
* Return the FlushType if we are above the memstore high water mark
|
||||
* @return the FlushType
|
||||
*/
|
||||
public FlushType isAboveHighWaterMark() {
|
||||
// for onheap memstore we check if the global memstore size and the
|
||||
|
@ -193,7 +193,8 @@ public class RegionServerAccounting {
|
|||
}
|
||||
|
||||
/**
|
||||
* Return true if we're above the low watermark
|
||||
* Return the FlushType if we're above the low watermark
|
||||
* @return the FlushType
|
||||
*/
|
||||
public FlushType isAboveLowWaterMark() {
|
||||
// for onheap memstore we check if the global memstore size and the
|
||||
|
|
|
@ -778,11 +778,11 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
|
|||
}
|
||||
|
||||
protected void archive(final Pair<Path, Long> log) {
|
||||
totalLogSize.addAndGet(-log.getSecond());
|
||||
int retry = 1;
|
||||
while (true) {
|
||||
try {
|
||||
archiveLogFile(log.getFirst());
|
||||
totalLogSize.addAndGet(-log.getSecond());
|
||||
// successful
|
||||
break;
|
||||
} catch (Throwable e) {
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
package org.apache.hadoop.hbase.mob;
|
||||
|
||||
import static org.apache.hadoop.hbase.mob.MobConstants.MOB_CLEANER_BATCH_SIZE_UPPER_BOUND;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
|
@ -55,6 +56,7 @@ public class TestExpiredMobFileCleaner {
|
|||
private final static String family = "family";
|
||||
private final static byte[] row1 = Bytes.toBytes("row1");
|
||||
private final static byte[] row2 = Bytes.toBytes("row2");
|
||||
private final static byte[] row3 = Bytes.toBytes("row3");
|
||||
private final static byte[] qf = Bytes.toBytes("qf");
|
||||
|
||||
private static BufferedMutator table;
|
||||
|
@ -63,6 +65,7 @@ public class TestExpiredMobFileCleaner {
|
|||
@BeforeClass
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
|
||||
TEST_UTIL.getConfiguration().setInt(MOB_CLEANER_BATCH_SIZE_UPPER_BOUND, 2);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -146,6 +149,14 @@ public class TestExpiredMobFileCleaner {
|
|||
String f2 = secondFiles[1].getPath().getName();
|
||||
String secondFile = f1.equals(firstFile) ? f2 : f1;
|
||||
|
||||
ts = EnvironmentEdgeManager.currentTime() - 4 * secondsOfDay() * 1000; // 4 days before
|
||||
putKVAndFlush(table, row3, dummyData, ts);
|
||||
ts = EnvironmentEdgeManager.currentTime() - 4 * secondsOfDay() * 1000; // 4 days before
|
||||
putKVAndFlush(table, row3, dummyData, ts);
|
||||
FileStatus[] thirdFiles = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
|
||||
// now there are 4 mob files
|
||||
assertEquals("Before cleanup without delay 3", 4, thirdFiles.length);
|
||||
|
||||
modifyColumnExpiryDays(2); // ttl = 2, make the first row expired
|
||||
|
||||
// run the cleaner
|
||||
|
@ -156,7 +167,7 @@ public class TestExpiredMobFileCleaner {
|
|||
|
||||
FileStatus[] filesAfterClean = TEST_UTIL.getTestFileSystem().listStatus(mobDirPath);
|
||||
String lastFile = filesAfterClean[0].getPath().getName();
|
||||
// the first mob fie is removed
|
||||
// there are 4 mob files in total, but only 3 need to be cleaned
|
||||
assertEquals("After cleanup without delay 1", 1, filesAfterClean.length);
|
||||
assertEquals("After cleanup without delay 2", secondFile, lastFile);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue