HBASE-22796 [HBCK2] Add fix of overlaps to fixMeta hbck Service

Signed-off-by: Sakthi <sakthi@apache.org>
This commit is contained in:
stack 2019-09-06 14:56:05 +02:00
parent 63db1d69a1
commit e9e6202f9c
6 changed files with 348 additions and 50 deletions

View File

@ -794,6 +794,9 @@ public interface RegionInfo {
* @see #isDegenerate()
*/
default boolean isOverlap(RegionInfo other) {
if (other == null) {
return false;
}
if (!getTable().equals(other.getTable())) {
return false;
}

View File

@ -457,6 +457,12 @@ public class CatalogJanitor extends ScheduledChore {
return this.holes;
}
/**
* @return Overlap pairs found as we scanned hbase:meta; ordered by hbase:meta
* table sort. Pairs of overlaps may have overlap with subsequent pairs.
* @see MetaFixer#calculateMerges(int, List) where we aggregate overlaps
* for a single 'merge' call.
*/
public List<Pair<RegionInfo, RegionInfo>> getOverlaps() {
return this.overlaps;
}
@ -479,20 +485,20 @@ public class CatalogJanitor extends ScheduledChore {
@Override
public String toString() {
StringBuffer sb = new StringBuffer();
StringBuilder sb = new StringBuilder();
for (Pair<RegionInfo, RegionInfo> p: this.holes) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append("hole=" + p.getFirst().getRegionNameAsString() + "/" +
p.getSecond().getRegionNameAsString());
sb.append("hole=").append(p.getFirst().getRegionNameAsString()).append("/").
append(p.getSecond().getRegionNameAsString());
}
for (Pair<RegionInfo, RegionInfo> p: this.overlaps) {
if (sb.length() > 0) {
sb.append(", ");
}
sb.append("overlap=" + p.getFirst().getRegionNameAsString() + "/" +
p.getSecond().getRegionNameAsString());
sb.append("overlap=").append(p.getFirst().getRegionNameAsString()).append("/").
append(p.getSecond().getRegionNameAsString());
}
for (byte [] r: this.emptyRegionInfo) {
if (sb.length() > 0) {
@ -534,6 +540,16 @@ public class CatalogJanitor extends ScheduledChore {
*/
private RegionInfo previous = null;
/**
* Keep account of the highest end key seen as we move through hbase:meta.
* Usually, the current RegionInfo has the highest end key but if an overlap,
* this may no longer hold. An overlap may be a region with startkey 'd' and
* endkey 'g'. The next region in meta may be 'e' to 'f' and then 'f' to 'g'.
* Looking at previous and current meta row, we won't know about the 'd' to 'g'
* overlap unless we keep a running 'highest-endpoint-seen'.
*/
private RegionInfo highestEndKeyRegionInfo = null;
ReportMakingVisitor(MasterServices services) {
this.services = services;
}
@ -587,7 +603,7 @@ public class CatalogJanitor extends ScheduledChore {
MetaTableAccessor.getRegionInfoColumn());
} else {
ri = locations.getDefaultRegionLocation().getRegion();
checkServer(metaTableRow.getRow(), locations);
checkServer(locations);
}
if (ri == null) {
@ -615,13 +631,22 @@ public class CatalogJanitor extends ScheduledChore {
if (!this.previous.isNext(ri)) {
if (this.previous.isOverlap(ri)) {
addOverlap(this.previous, ri);
} else if (ri.isOverlap(this.highestEndKeyRegionInfo)) {
// We may have seen a region a few rows back that overlaps this one.
addOverlap(this.highestEndKeyRegionInfo, ri);
} else {
addHole(this.previous, ri);
}
} else if (ri.isOverlap(this.highestEndKeyRegionInfo)) {
// We may have seen a region a few rows back that overlaps this one
// even though it properly 'follows' the region just before.
addOverlap(this.highestEndKeyRegionInfo, ri);
}
}
}
this.previous = ri;
this.highestEndKeyRegionInfo =
MetaFixer.getRegionInfoWithLargestEndKey(ri, this.highestEndKeyRegionInfo);
return ri;
}
@ -658,7 +683,7 @@ public class CatalogJanitor extends ScheduledChore {
/**
* Run through referenced servers and save off unknown and the dead.
*/
private void checkServer(byte [] metaTableRow, RegionLocations locations) {
private void checkServer(RegionLocations locations) {
if (this.services == null) {
// Can't do this test if no services.
return;
@ -672,6 +697,11 @@ public class CatalogJanitor extends ScheduledChore {
if (sn == null) {
continue;
}
if (location.getRegion() == null) {
LOG.warn("Empty RegionInfo in {}", location);
// This should never happen but if it does, will mess up below.
continue;
}
// skip the offline regions which belong to disabled table.
if (isTableDisabled(location.getRegion())) {
continue;
@ -680,7 +710,7 @@ public class CatalogJanitor extends ScheduledChore {
isServerKnownAndOnline(sn);
switch (state) {
case UNKNOWN:
this.report.unknownServers.add(new Pair(location.getRegion(), sn));
this.report.unknownServers.add(new Pair<>(location.getRegion(), sn));
break;
default:

View File

@ -18,8 +18,12 @@
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
@ -28,6 +32,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.exceptions.MergeRegionException;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
@ -36,20 +41,32 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
/**
* Server-side fixing of bad or inconsistent state in hbase:meta.
* Distinct from MetaTableAccessor because {@link MetaTableAccessor} is about low-level
* manipulations driven by the Master. This class MetaFixer is
* employed by the Master and it 'knows' about holes and orphan
* employed by the Master and it 'knows' about holes and orphans
* and encapsulates their fixing on behalf of the Master.
*/
@InterfaceAudience.Private
class MetaFixer {
private static final Logger LOG = LoggerFactory.getLogger(MetaFixer.class);
private static final String MAX_MERGE_COUNT_KEY = "hbase.master.metafixer.max.merge.count";
private static final int MAX_MERGE_COUNT_DEFAULT = 10;
private final MasterServices masterServices;
/**
* Maximum for many regions to merge at a time.
*/
private final int maxMergeCount;
MetaFixer(MasterServices masterServices) {
this.masterServices = masterServices;
this.maxMergeCount = this.masterServices.getConfiguration().
getInt(MAX_MERGE_COUNT_KEY, MAX_MERGE_COUNT_DEFAULT);
}
void fix() throws IOException {
@ -66,14 +83,12 @@ class MetaFixer {
/**
* If hole, it papers it over by adding a region in the filesystem and to hbase:meta.
* Does not assign.
* @return True if we fixed any 'holes'.
*/
boolean fixHoles(CatalogJanitor.Report report) throws IOException {
boolean result = false;
void fixHoles(CatalogJanitor.Report report) throws IOException {
List<Pair<RegionInfo, RegionInfo>> holes = report.getHoles();
if (holes.isEmpty()) {
LOG.debug("No holes.");
return result;
return;
}
for (Pair<RegionInfo, RegionInfo> p: holes) {
RegionInfo ri = getHoleCover(p);
@ -86,11 +101,10 @@ class MetaFixer {
// in hbase:meta (if the below fails). Should be able to rerun the fix.
// The second call to createRegionDir will just go through. Idempotent.
Put put = MetaTableAccessor.makePutFromRegionInfo(ri, HConstants.LATEST_TIMESTAMP);
MetaTableAccessor.putsToMetaTable(this.masterServices.getConnection(), Arrays.asList(put));
MetaTableAccessor.putsToMetaTable(this.masterServices.getConnection(),
Collections.singletonList(put));
LOG.info("Fixed hole by adding {}; region is NOT assigned (assign to online).", ri);
result = true;
}
return result;
}
/**
@ -136,28 +150,94 @@ class MetaFixer {
return RegionInfoBuilder.newBuilder(tn).setStartKey(start).setEndKey(end).build();
}
boolean fixOverlaps(CatalogJanitor.Report report) throws IOException {
boolean result = false;
List<Pair<RegionInfo, RegionInfo>> overlaps = report.getOverlaps();
/**
* Fix overlaps noted in CJ consistency report.
*/
void fixOverlaps(CatalogJanitor.Report report) throws IOException {
for (Set<RegionInfo> regions: calculateMerges(maxMergeCount, report.getOverlaps())) {
RegionInfo [] regionsArray = regions.toArray(new RegionInfo [] {});
try {
this.masterServices.mergeRegions(regionsArray,
false, HConstants.NO_NONCE, HConstants.NO_NONCE);
} catch (MergeRegionException mre) {
LOG.warn("Failed overlap fix of {}", regionsArray, mre);
}
}
}
/**
* Run through <code>overlaps</code> and return a list of merges to run.
* Presumes overlaps are ordered (which they are coming out of the CatalogJanitor
* consistency report).
* @param maxMergeCount Maximum regions to merge at a time (avoid merging
* 100k regions in one go!)
*/
@VisibleForTesting
static List<SortedSet<RegionInfo>> calculateMerges(int maxMergeCount,
List<Pair<RegionInfo, RegionInfo>> overlaps) {
if (overlaps.isEmpty()) {
LOG.debug("No overlaps.");
return result;
return Collections.emptyList();
}
for (Pair<RegionInfo, RegionInfo> p: overlaps) {
RegionInfo ri = getHoleCover(p);
if (ri == null) {
continue;
List<SortedSet<RegionInfo>> merges = new ArrayList<>();
SortedSet<RegionInfo> currentMergeSet = new TreeSet<>();
RegionInfo regionInfoWithlargestEndKey = null;
for (Pair<RegionInfo, RegionInfo> pair: overlaps) {
if (regionInfoWithlargestEndKey != null) {
if (!isOverlap(regionInfoWithlargestEndKey, pair) ||
currentMergeSet.size() >= maxMergeCount) {
merges.add(currentMergeSet);
currentMergeSet = new TreeSet<>();
}
Configuration configuration = this.masterServices.getConfiguration();
HRegion.createRegionDir(configuration, ri, FSUtils.getRootDir(configuration));
// If an error here, then we'll have a region in the filesystem but not
// in hbase:meta (if the below fails). Should be able to rerun the fix.
// The second call to createRegionDir will just go through. Idempotent.
Put put = MetaTableAccessor.makePutFromRegionInfo(ri, HConstants.LATEST_TIMESTAMP);
MetaTableAccessor.putsToMetaTable(this.masterServices.getConnection(), Arrays.asList(put));
LOG.info("Fixed hole by adding {}; region is NOT assigned (assign to online).", ri);
result = true;
}
return result;
currentMergeSet.add(pair.getFirst());
currentMergeSet.add(pair.getSecond());
regionInfoWithlargestEndKey = getRegionInfoWithLargestEndKey(
getRegionInfoWithLargestEndKey(pair.getFirst(), pair.getSecond()),
regionInfoWithlargestEndKey);
}
merges.add(currentMergeSet);
return merges;
}
/**
* @return Either <code>a</code> or <code>b</code>, whichever has the
* endkey that is furthest along in the Table.
*/
@VisibleForTesting
static RegionInfo getRegionInfoWithLargestEndKey(RegionInfo a, RegionInfo b) {
if (a == null) {
// b may be null.
return b;
}
if (b == null) {
// Both are null. The return is not-defined.
return a;
}
if (!a.getTable().equals(b.getTable())) {
// This is an odd one. This should be the right answer.
return b;
}
if (a.isLast()) {
return a;
}
if (b.isLast()) {
return b;
}
int compare = Bytes.compareTo(a.getEndKey(), b.getEndKey());
return compare == 0 || compare > 0? a: b;
}
/**
* @return True if an overlap found between passed in <code>ri</code> and
* the <code>pair</code>. Does NOT check the pairs themselves overlap.
*/
@VisibleForTesting
static boolean isOverlap(RegionInfo ri, Pair<RegionInfo, RegionInfo> pair) {
if (ri == null || pair == null) {
// Can't be an overlap in either of these cases.
return false;
}
return ri.isOverlap(pair.getFirst()) || ri.isOverlap(pair.getSecond());
}
}

View File

@ -294,7 +294,7 @@ public class RegionStateNode implements Comparable<RegionStateNode> {
RegionInfo ri = getRegionInfo();
State s = state;
if (s != State.OPEN) {
throw new DoNotRetryRegionException(ri.getEncodedName() + " is no OPEN; state=" + s);
throw new DoNotRetryRegionException(ri.getEncodedName() + " is not OPEN; state=" + s);
}
if (ri.isSplitParent()) {
throw new DoNotRetryRegionException(

View File

@ -17,7 +17,10 @@
*/
package org.apache.hadoop.hbase.master;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.hbase.HBaseClassTestRule;
@ -26,8 +29,11 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.Threads;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
@ -36,9 +42,6 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Category({MasterTests.class, LargeTests.class})
public class TestMetaFixer {
@ -47,7 +50,6 @@ public class TestMetaFixer {
HBaseClassTestRule.forClass(TestMetaFixer.class);
@Rule
public TestName name = new TestName();
private static final Logger LOG = LoggerFactory.getLogger(TestMetaFixer.class);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
@ -75,21 +77,20 @@ public class TestMetaFixer {
MasterServices services = TEST_UTIL.getHBaseCluster().getMaster();
services.getCatalogJanitor().scan();
CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport();
Assert.assertTrue(report.isEmpty());
assertTrue(report.isEmpty());
int originalCount = ris.size();
// Remove first, last and middle region. See if hole gets plugged. Table has 26 regions.
deleteRegion(services, ris.get(ris.size() -1));
deleteRegion(services, ris.get(3));
deleteRegion(services, ris.get(0));
ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn);
services.getCatalogJanitor().scan();
report = services.getCatalogJanitor().getLastReport();
Assert.assertEquals(report.toString(), 3, report.getHoles().size());
MetaFixer fixer = new MetaFixer(services);
Assert.assertTrue(fixer.fixHoles(report));
fixer.fixHoles(report);
services.getCatalogJanitor().scan();
report = services.getCatalogJanitor().getLastReport();
Assert.assertTrue(report.toString(), report.isEmpty());
assertTrue(report.toString(), report.isEmpty());
// Disable and reenable so the added regions get reassigned.
TEST_UTIL.getAdmin().disableTable(tn);
TEST_UTIL.getAdmin().enableTable(tn);
@ -110,18 +111,61 @@ public class TestMetaFixer {
List<RegionInfo> ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn);
MasterServices services = TEST_UTIL.getHBaseCluster().getMaster();
services.getCatalogJanitor().scan();
CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport();
int originalCount = ris.size();
deleteRegion(services, ris.get(0));
services.getCatalogJanitor().scan();
report = services.getCatalogJanitor().getLastReport();
CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport();
ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn);
Assert.assertTrue(ris.isEmpty());
assertTrue(ris.isEmpty());
MetaFixer fixer = new MetaFixer(services);
Assert.assertFalse(fixer.fixHoles(report));
fixer.fixHoles(report);
report = services.getCatalogJanitor().getLastReport();
Assert.assertTrue(report.isEmpty());
assertTrue(report.isEmpty());
ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn);
Assert.assertEquals(0, ris.size());
}
private static void makeOverlap(MasterServices services, RegionInfo a, RegionInfo b)
throws IOException {
RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable()).
setStartKey(a.getStartKey()).
setEndKey(b.getEndKey()).
build();
MetaTableAccessor.putsToMetaTable(services.getConnection(),
Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion,
System.currentTimeMillis())));
// TODO: Add checks at assign time to PREVENT being able to assign over existing assign.
services.getAssignmentManager().assign(overlapRegion);
}
@Test
public void testOverlap() throws IOException {
TableName tn = TableName.valueOf(this.name.getMethodName());
TEST_UTIL.createMultiRegionTable(tn, HConstants.CATALOG_FAMILY);
List<RegionInfo> ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn);
assertTrue(ris.size() > 5);
MasterServices services = TEST_UTIL.getHBaseCluster().getMaster();
services.getCatalogJanitor().scan();
CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport();
assertTrue(report.isEmpty());
// Make a simple overlap spanning second and third region.
makeOverlap(services, ris.get(1), ris.get(3));
makeOverlap(services, ris.get(2), ris.get(3));
makeOverlap(services, ris.get(2), ris.get(4));
Threads.sleep(10000);
services.getCatalogJanitor().scan();
report = services.getCatalogJanitor().getLastReport();
Assert.assertEquals(6, report.getOverlaps().size());
Assert.assertEquals(1, MetaFixer.calculateMerges(10, report.getOverlaps()).size());
MetaFixer fixer = new MetaFixer(services);
fixer.fixOverlaps(report);
while (true) {
services.getCatalogJanitor().scan();
report = services.getCatalogJanitor().getLastReport();
if (report.isEmpty()) {
break;
}
Threads.sleep(10);
}
assertTrue(report.toString(), report.isEmpty());
}
}

View File

@ -0,0 +1,141 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import java.util.SortedSet;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test small utility methods inside {@link MetaFixer}.
* For cluster tests see {@link TestMetaFixer}
*/
@Category({MasterTests.class, SmallTests.class})
public class TestMetaFixerNoCluster {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class);
private static byte [] A = Bytes.toBytes("a");
private static byte [] B = Bytes.toBytes("b");
private static byte [] C = Bytes.toBytes("c");
private static byte [] D = Bytes.toBytes("d");
private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO;
private static RegionInfo _ARI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setEndKey(A).build();
private static RegionInfo _BRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setEndKey(B).build();
private static RegionInfo ABRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setStartKey(A).setEndKey(B).build();
private static RegionInfo ACRI =
org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setStartKey(A).setEndKey(C).build();
private static RegionInfo CDRI =
org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setStartKey(C).setEndKey(D).build();
private static RegionInfo ADRI =
org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setStartKey(A).setEndKey(D).build();
private static RegionInfo D_RI =
org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setStartKey(D).build();
private static RegionInfo C_RI =
org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
setStartKey(C).build();
@Test
public void testGetRegionInfoWithLargestEndKey() {
assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(_ARI, _BRI).equals(_BRI));
assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(C_RI, D_RI).equals(C_RI));
assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(ABRI, CDRI).equals(CDRI));
assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(null, CDRI).equals(CDRI));
assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(null, null) == null);
}
@Test
public void testIsOverlap() {
assertTrue(MetaFixer.isOverlap(_BRI, new Pair<RegionInfo, RegionInfo>(ABRI, ACRI)));
assertFalse(MetaFixer.isOverlap(_ARI, new Pair<RegionInfo, RegionInfo>(C_RI, D_RI)));
assertTrue(MetaFixer.isOverlap(ADRI, new Pair<RegionInfo, RegionInfo>(CDRI, C_RI)));
assertFalse(MetaFixer.isOverlap(_BRI, new Pair<RegionInfo, RegionInfo>(CDRI, C_RI)));
}
@Test
public void testCalculateMergesNoAggregation() {
List<Pair<RegionInfo, RegionInfo>> overlaps = new ArrayList<>();
overlaps.add(new Pair<RegionInfo, RegionInfo>(_ARI, _BRI));
overlaps.add(new Pair<RegionInfo, RegionInfo>(C_RI, D_RI));
List<SortedSet<RegionInfo>> merges = MetaFixer.calculateMerges(10, overlaps);
assertEquals(2, merges.size());
assertEquals(2, merges.get(0).size());
assertEquals(2, merges.get(1).size());
}
@Test
public void testCalculateMergesAggregation() {
List<Pair<RegionInfo, RegionInfo>> overlaps = new ArrayList<>();
overlaps.add(new Pair<RegionInfo, RegionInfo>(ALL, D_RI));
overlaps.add(new Pair<RegionInfo, RegionInfo>(_ARI, _BRI));
overlaps.add(new Pair<RegionInfo, RegionInfo>(C_RI, D_RI));
List<SortedSet<RegionInfo>> merges = MetaFixer.calculateMerges(10, overlaps);
assertEquals(1, merges.size());
assertEquals(5, merges.get(0).size());
}
@Test
public void testCalculateMergesNoRepeatOfRegionNames() {
List<Pair<RegionInfo, RegionInfo>> overlaps = new ArrayList<>();
overlaps.add(new Pair<RegionInfo, RegionInfo>(_BRI, ABRI));
overlaps.add(new Pair<RegionInfo, RegionInfo>(ABRI, ADRI));
List<SortedSet<RegionInfo>> merges = MetaFixer.calculateMerges(10, overlaps);
assertEquals(1, merges.size());
// There should be three regions to merge, not four.
assertEquals(3, merges.get(0).size());
}
@Test
public void testCalculateMergesRespectsMax() {
List<Pair<RegionInfo, RegionInfo>> overlaps = new ArrayList<>();
overlaps.add(new Pair<RegionInfo, RegionInfo>(_BRI, ABRI));
overlaps.add(new Pair<RegionInfo, RegionInfo>(ABRI, ADRI));
overlaps.add(new Pair<RegionInfo, RegionInfo>(C_RI, D_RI));
List<SortedSet<RegionInfo>> merges = MetaFixer.calculateMerges(3, overlaps);
assertEquals(2, merges.size());
// There should be three regions to merge, not four.
assertEquals(3, merges.get(0).size());
assertEquals(2, merges.get(1).size());
}
}