HBASE-24581 Skip compaction request/check for replica regions at the early stage. (#1986)

Signed-off-by: Nick Dimiduk <ndimiduk@apache.org>
This commit is contained in:
huaxiangsun 2020-07-14 15:49:20 -07:00 committed by GitHub
parent 1360bee7f9
commit 2505c7760d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 20 additions and 7 deletions

View File

@ -1781,9 +1781,11 @@ public class HRegionServer extends Thread implements
@Override
protected void chore() {
for (Region r : this.instance.onlineRegions.values()) {
if (r == null) {
// Skip compaction if region is read only
if (r == null || r.isReadOnly()) {
continue;
}
HRegion hr = (HRegion) r;
for (HStore s : hr.stores.values()) {
try {
@ -2289,9 +2291,12 @@ public class HRegionServer extends Thread implements
LOG.info("Post open deploy tasks for {}, pid={}, masterSystemTime={}",
r.getRegionInfo().getRegionNameAsString(), openProcId, masterSystemTime);
// Do checks to see if we need to compact (references or too many files)
for (HStore s : r.stores.values()) {
if (s.hasReferences() || s.needsCompaction()) {
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
// Skip compaction check if region is read only
if (!r.isReadOnly()) {
for (HStore s : r.stores.values()) {
if (s.hasReferences() || s.needsCompaction()) {
this.compactSplitThread.requestSystemCompaction(r, s, "Opening Region");
}
}
}
long openSeqNum = r.getOpenSeqNum();

View File

@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.master.assignment;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
@ -31,6 +32,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.RegionReplicaTestHelper;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@ -78,13 +80,14 @@ public class TestRegionReplicaSplit {
Table table = HTU.createTable(builder.build(), new byte[][] { f }, getSplits(2),
new Configuration(HTU.getConfiguration()));
HTU.loadTable(HTU.getConnection().getTable(tableName), f);
HTU.flush(tableName);
return table;
}
private static byte[][] getSplits(int numRegions) {
RegionSplitter.UniformSplit split = new RegionSplitter.UniformSplit();
split.setFirstRow(Bytes.toBytes(0L));
split.setLastRow(Bytes.toBytes(Long.MAX_VALUE));
split.setFirstRow(Bytes.toBytes("a"));
split.setLastRow(Bytes.toBytes("z"));
return split.split(numRegions);
}
@ -109,11 +112,16 @@ public class TestRegionReplicaSplit {
}
}
// There are 6 regions before split, 9 regions after split.
HTU.getAdmin().split(table.getName(), Bytes.toBytes(1));
HTU.getAdmin().split(table.getName(), Bytes.toBytes("d"));
int count = 0;
while (true) {
for (RegionServerThread rs : HTU.getMiniHBaseCluster().getRegionServerThreads()) {
for (Region r : rs.getRegionServer().getRegions(table.getName())) {
// Make sure that every region has some data (even for split daughter regions).
if (RegionReplicaUtil.isDefaultReplica(r.getRegionInfo())) {
assertTrue(r.getStore(f).hasReferences() ||
r.getStore(f).getStorefiles().size() > 0);
}
count++;
}
}