diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java index 80569002cae..836578c40d3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java @@ -794,6 +794,9 @@ public interface RegionInfo { * @see #isDegenerate() */ default boolean isOverlap(RegionInfo other) { + if (other == null) { + return false; + } if (!getTable().equals(other.getTable())) { return false; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index bfea5a71f6c..5d6b6a7b73b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -456,6 +456,12 @@ public class CatalogJanitor extends ScheduledChore { return this.holes; } + /** + * @return Overlap pairs found as we scanned hbase:meta; ordered by hbase:meta + * table sort. Pairs of overlaps may have overlap with subsequent pairs. + * @see MetaFixer#calculateMerges(int, List) where we aggregate overlaps + * for a single 'merge' call. + */ public List> getOverlaps() { return this.overlaps; } @@ -478,20 +484,20 @@ public class CatalogJanitor extends ScheduledChore { @Override public String toString() { - StringBuffer sb = new StringBuffer(); + StringBuilder sb = new StringBuilder(); for (Pair p: this.holes) { if (sb.length() > 0) { sb.append(", "); } - sb.append("hole=" + p.getFirst().getRegionNameAsString() + "/" + - p.getSecond().getRegionNameAsString()); + sb.append("hole=").append(p.getFirst().getRegionNameAsString()).append("/"). + append(p.getSecond().getRegionNameAsString()); } for (Pair p: this.overlaps) { if (sb.length() > 0) { sb.append(", "); } - sb.append("overlap=" + p.getFirst().getRegionNameAsString() + "/" + - p.getSecond().getRegionNameAsString()); + sb.append("overlap=").append(p.getFirst().getRegionNameAsString()).append("/"). + append(p.getSecond().getRegionNameAsString()); } for (byte [] r: this.emptyRegionInfo) { if (sb.length() > 0) { @@ -533,6 +539,16 @@ public class CatalogJanitor extends ScheduledChore { */ private RegionInfo previous = null; + /** + * Keep account of the highest end key seen as we move through hbase:meta. + * Usually, the current RegionInfo has the highest end key but if an overlap, + * this may no longer hold. An overlap may be a region with startkey 'd' and + * endkey 'g'. The next region in meta may be 'e' to 'f' and then 'f' to 'g'. + * Looking at previous and current meta row, we won't know about the 'd' to 'g' + * overlap unless we keep a running 'highest-endpoint-seen'. + */ + private RegionInfo highestEndKeyRegionInfo = null; + ReportMakingVisitor(MasterServices services) { this.services = services; } @@ -609,13 +625,22 @@ public class CatalogJanitor extends ScheduledChore { if (!this.previous.isNext(ri)) { if (this.previous.isOverlap(ri)) { addOverlap(this.previous, ri); + } else if (ri.isOverlap(this.highestEndKeyRegionInfo)) { + // We may have seen a region a few rows back that overlaps this one. + addOverlap(this.highestEndKeyRegionInfo, ri); } else { addHole(this.previous, ri); } + } else if (ri.isOverlap(this.highestEndKeyRegionInfo)) { + // We may have seen a region a few rows back that overlaps this one + // even though it properly 'follows' the region just before. + addOverlap(this.highestEndKeyRegionInfo, ri); } } } this.previous = ri; + this.highestEndKeyRegionInfo = + MetaFixer.getRegionInfoWithLargestEndKey(ri, this.highestEndKeyRegionInfo); return ri; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaFixer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaFixer.java index d94e4bd5cb6..3477ec6e644 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaFixer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaFixer.java @@ -18,8 +18,12 @@ package org.apache.hadoop.hbase.master; import java.io.IOException; -import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -28,6 +32,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.exceptions.MergeRegionException; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -36,20 +41,32 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; + + /** * Server-side fixing of bad or inconsistent state in hbase:meta. * Distinct from MetaTableAccessor because {@link MetaTableAccessor} is about low-level * manipulations driven by the Master. This class MetaFixer is - * employed by the Master and it 'knows' about holes and orphan + * employed by the Master and it 'knows' about holes and orphans * and encapsulates their fixing on behalf of the Master. */ @InterfaceAudience.Private class MetaFixer { private static final Logger LOG = LoggerFactory.getLogger(MetaFixer.class); + private static final String MAX_MERGE_COUNT_KEY = "hbase.master.metafixer.max.merge.count"; + private static final int MAX_MERGE_COUNT_DEFAULT = 10; private final MasterServices masterServices; + /** + * Maximum for many regions to merge at a time. + */ + private final int maxMergeCount; MetaFixer(MasterServices masterServices) { this.masterServices = masterServices; + this.maxMergeCount = this.masterServices.getConfiguration(). + getInt(MAX_MERGE_COUNT_KEY, MAX_MERGE_COUNT_DEFAULT); } void fix() throws IOException { @@ -66,14 +83,12 @@ class MetaFixer { /** * If hole, it papers it over by adding a region in the filesystem and to hbase:meta. * Does not assign. - * @return True if we fixed any 'holes'. */ - boolean fixHoles(CatalogJanitor.Report report) throws IOException { - boolean result = false; + void fixHoles(CatalogJanitor.Report report) throws IOException { List> holes = report.getHoles(); if (holes.isEmpty()) { LOG.debug("No holes."); - return result; + return; } for (Pair p: holes) { RegionInfo ri = getHoleCover(p); @@ -86,11 +101,10 @@ class MetaFixer { // in hbase:meta (if the below fails). Should be able to rerun the fix. // The second call to createRegionDir will just go through. Idempotent. Put put = MetaTableAccessor.makePutFromRegionInfo(ri, HConstants.LATEST_TIMESTAMP); - MetaTableAccessor.putsToMetaTable(this.masterServices.getConnection(), Arrays.asList(put)); + MetaTableAccessor.putsToMetaTable(this.masterServices.getConnection(), + Collections.singletonList(put)); LOG.info("Fixed hole by adding {}; region is NOT assigned (assign to online).", ri); - result = true; } - return result; } /** @@ -136,28 +150,94 @@ class MetaFixer { return RegionInfoBuilder.newBuilder(tn).setStartKey(start).setEndKey(end).build(); } - boolean fixOverlaps(CatalogJanitor.Report report) throws IOException { - boolean result = false; - List> overlaps = report.getOverlaps(); + /** + * Fix overlaps noted in CJ consistency report. + */ + void fixOverlaps(CatalogJanitor.Report report) throws IOException { + for (Set regions: calculateMerges(maxMergeCount, report.getOverlaps())) { + RegionInfo [] regionsArray = regions.toArray(new RegionInfo [] {}); + try { + this.masterServices.mergeRegions(regionsArray, + false, HConstants.NO_NONCE, HConstants.NO_NONCE); + } catch (MergeRegionException mre) { + LOG.warn("Failed overlap fix of {}", regionsArray, mre); + } + } + } + + /** + * Run through overlaps and return a list of merges to run. + * Presumes overlaps are ordered (which they are coming out of the CatalogJanitor + * consistency report). + * @param maxMergeCount Maximum regions to merge at a time (avoid merging + * 100k regions in one go!) + */ + @VisibleForTesting + static List> calculateMerges(int maxMergeCount, + List> overlaps) { if (overlaps.isEmpty()) { LOG.debug("No overlaps."); - return result; + return Collections.emptyList(); } - for (Pair p: overlaps) { - RegionInfo ri = getHoleCover(p); - if (ri == null) { - continue; + List> merges = new ArrayList<>(); + SortedSet currentMergeSet = new TreeSet<>(); + RegionInfo regionInfoWithlargestEndKey = null; + for (Pair pair: overlaps) { + if (regionInfoWithlargestEndKey != null) { + if (!isOverlap(regionInfoWithlargestEndKey, pair) || + currentMergeSet.size() >= maxMergeCount) { + merges.add(currentMergeSet); + currentMergeSet = new TreeSet<>(); + } } - Configuration configuration = this.masterServices.getConfiguration(); - HRegion.createRegionDir(configuration, ri, FSUtils.getRootDir(configuration)); - // If an error here, then we'll have a region in the filesystem but not - // in hbase:meta (if the below fails). Should be able to rerun the fix. - // The second call to createRegionDir will just go through. Idempotent. - Put put = MetaTableAccessor.makePutFromRegionInfo(ri, HConstants.LATEST_TIMESTAMP); - MetaTableAccessor.putsToMetaTable(this.masterServices.getConnection(), Arrays.asList(put)); - LOG.info("Fixed hole by adding {}; region is NOT assigned (assign to online).", ri); - result = true; + currentMergeSet.add(pair.getFirst()); + currentMergeSet.add(pair.getSecond()); + regionInfoWithlargestEndKey = getRegionInfoWithLargestEndKey( + getRegionInfoWithLargestEndKey(pair.getFirst(), pair.getSecond()), + regionInfoWithlargestEndKey); } - return result; + merges.add(currentMergeSet); + return merges; + } + + /** + * @return Either a or b, whichever has the + * endkey that is furthest along in the Table. + */ + @VisibleForTesting + static RegionInfo getRegionInfoWithLargestEndKey(RegionInfo a, RegionInfo b) { + if (a == null) { + // b may be null. + return b; + } + if (b == null) { + // Both are null. The return is not-defined. + return a; + } + if (!a.getTable().equals(b.getTable())) { + // This is an odd one. This should be the right answer. + return b; + } + if (a.isLast()) { + return a; + } + if (b.isLast()) { + return b; + } + int compare = Bytes.compareTo(a.getEndKey(), b.getEndKey()); + return compare == 0 || compare > 0? a: b; + } + + /** + * @return True if an overlap found between passed in ri and + * the pair. Does NOT check the pairs themselves overlap. + */ + @VisibleForTesting + static boolean isOverlap(RegionInfo ri, Pair pair) { + if (ri == null || pair == null) { + // Can't be an overlap in either of these cases. + return false; + } + return ri.isOverlap(pair.getFirst()) || ri.isOverlap(pair.getSecond()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaFixer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaFixer.java index 2bffe31415f..aba00fed336 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaFixer.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaFixer.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hbase.master; +import static org.junit.Assert.assertTrue; + import java.io.IOException; +import java.util.Collections; import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -26,8 +29,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Threads; + import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -36,9 +42,6 @@ import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - @Category({MasterTests.class, LargeTests.class}) public class TestMetaFixer { @@ -47,7 +50,6 @@ public class TestMetaFixer { HBaseClassTestRule.forClass(TestMetaFixer.class); @Rule public TestName name = new TestName(); - private static final Logger LOG = LoggerFactory.getLogger(TestMetaFixer.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -75,21 +77,20 @@ public class TestMetaFixer { MasterServices services = TEST_UTIL.getHBaseCluster().getMaster(); services.getCatalogJanitor().scan(); CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport(); - Assert.assertTrue(report.isEmpty()); + assertTrue(report.isEmpty()); int originalCount = ris.size(); // Remove first, last and middle region. See if hole gets plugged. Table has 26 regions. deleteRegion(services, ris.get(ris.size() -1)); deleteRegion(services, ris.get(3)); deleteRegion(services, ris.get(0)); - ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn); services.getCatalogJanitor().scan(); report = services.getCatalogJanitor().getLastReport(); Assert.assertEquals(report.toString(), 3, report.getHoles().size()); MetaFixer fixer = new MetaFixer(services); - Assert.assertTrue(fixer.fixHoles(report)); + fixer.fixHoles(report); services.getCatalogJanitor().scan(); report = services.getCatalogJanitor().getLastReport(); - Assert.assertTrue(report.toString(), report.isEmpty()); + assertTrue(report.toString(), report.isEmpty()); // Disable and reenable so the added regions get reassigned. TEST_UTIL.getAdmin().disableTable(tn); TEST_UTIL.getAdmin().enableTable(tn); @@ -110,18 +111,61 @@ public class TestMetaFixer { List ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn); MasterServices services = TEST_UTIL.getHBaseCluster().getMaster(); services.getCatalogJanitor().scan(); - CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport(); - int originalCount = ris.size(); deleteRegion(services, ris.get(0)); services.getCatalogJanitor().scan(); - report = services.getCatalogJanitor().getLastReport(); + CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport(); ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn); - Assert.assertTrue(ris.isEmpty()); + assertTrue(ris.isEmpty()); MetaFixer fixer = new MetaFixer(services); - Assert.assertFalse(fixer.fixHoles(report)); + fixer.fixHoles(report); report = services.getCatalogJanitor().getLastReport(); - Assert.assertTrue(report.isEmpty()); + assertTrue(report.isEmpty()); ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn); Assert.assertEquals(0, ris.size()); } + + private static void makeOverlap(MasterServices services, RegionInfo a, RegionInfo b) + throws IOException { + RegionInfo overlapRegion = RegionInfoBuilder.newBuilder(a.getTable()). + setStartKey(a.getStartKey()). + setEndKey(b.getEndKey()). + build(); + MetaTableAccessor.putsToMetaTable(services.getConnection(), + Collections.singletonList(MetaTableAccessor.makePutFromRegionInfo(overlapRegion, + System.currentTimeMillis()))); + // TODO: Add checks at assign time to PREVENT being able to assign over existing assign. + services.getAssignmentManager().assign(overlapRegion); + } + + @Test + public void testOverlap() throws IOException { + TableName tn = TableName.valueOf(this.name.getMethodName()); + TEST_UTIL.createMultiRegionTable(tn, HConstants.CATALOG_FAMILY); + List ris = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), tn); + assertTrue(ris.size() > 5); + MasterServices services = TEST_UTIL.getHBaseCluster().getMaster(); + services.getCatalogJanitor().scan(); + CatalogJanitor.Report report = services.getCatalogJanitor().getLastReport(); + assertTrue(report.isEmpty()); + // Make a simple overlap spanning second and third region. + makeOverlap(services, ris.get(1), ris.get(3)); + makeOverlap(services, ris.get(2), ris.get(3)); + makeOverlap(services, ris.get(2), ris.get(4)); + Threads.sleep(10000); + services.getCatalogJanitor().scan(); + report = services.getCatalogJanitor().getLastReport(); + Assert.assertEquals(6, report.getOverlaps().size()); + Assert.assertEquals(1, MetaFixer.calculateMerges(10, report.getOverlaps()).size()); + MetaFixer fixer = new MetaFixer(services); + fixer.fixOverlaps(report); + while (true) { + services.getCatalogJanitor().scan(); + report = services.getCatalogJanitor().getLastReport(); + if (report.isEmpty()) { + break; + } + Threads.sleep(10); + } + assertTrue(report.toString(), report.isEmpty()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaFixerNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaFixerNoCluster.java new file mode 100644 index 00000000000..5b80f89bffd --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMetaFixerNoCluster.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.util.ArrayList; +import java.util.List; +import java.util.SortedSet; + +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.RegionInfoBuilder; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; + +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; + + + +/** + * Test small utility methods inside {@link MetaFixer}. + * For cluster tests see {@link TestMetaFixer} + */ +@Category({MasterTests.class, SmallTests.class}) +public class TestMetaFixerNoCluster { + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestMetaFixerNoCluster.class); + private static byte [] A = Bytes.toBytes("a"); + private static byte [] B = Bytes.toBytes("b"); + private static byte [] C = Bytes.toBytes("c"); + private static byte [] D = Bytes.toBytes("d"); + private static RegionInfo ALL = RegionInfoBuilder.FIRST_META_REGIONINFO; + private static RegionInfo _ARI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setEndKey(A).build(); + private static RegionInfo _BRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setEndKey(B).build(); + private static RegionInfo ABRI = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(A).setEndKey(B).build(); + private static RegionInfo ACRI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(A).setEndKey(C).build(); + private static RegionInfo CDRI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(C).setEndKey(D).build(); + private static RegionInfo ADRI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(A).setEndKey(D).build(); + private static RegionInfo D_RI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(D).build(); + private static RegionInfo C_RI = + org.apache.hadoop.hbase.client.RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME). + setStartKey(C).build(); + + @Test + public void testGetRegionInfoWithLargestEndKey() { + assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(_ARI, _BRI).equals(_BRI)); + assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(C_RI, D_RI).equals(C_RI)); + assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(ABRI, CDRI).equals(CDRI)); + assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(null, CDRI).equals(CDRI)); + assertTrue(MetaFixer.getRegionInfoWithLargestEndKey(null, null) == null); + } + + @Test + public void testIsOverlap() { + assertTrue(MetaFixer.isOverlap(_BRI, new Pair(ABRI, ACRI))); + assertFalse(MetaFixer.isOverlap(_ARI, new Pair(C_RI, D_RI))); + assertTrue(MetaFixer.isOverlap(ADRI, new Pair(CDRI, C_RI))); + assertFalse(MetaFixer.isOverlap(_BRI, new Pair(CDRI, C_RI))); + } + + @Test + public void testCalculateMergesNoAggregation() { + List> overlaps = new ArrayList<>(); + overlaps.add(new Pair(_ARI, _BRI)); + overlaps.add(new Pair(C_RI, D_RI)); + List> merges = MetaFixer.calculateMerges(10, overlaps); + assertEquals(2, merges.size()); + assertEquals(2, merges.get(0).size()); + assertEquals(2, merges.get(1).size()); + } + + @Test + public void testCalculateMergesAggregation() { + List> overlaps = new ArrayList<>(); + overlaps.add(new Pair(ALL, D_RI)); + overlaps.add(new Pair(_ARI, _BRI)); + overlaps.add(new Pair(C_RI, D_RI)); + List> merges = MetaFixer.calculateMerges(10, overlaps); + assertEquals(1, merges.size()); + assertEquals(5, merges.get(0).size()); + } + + @Test + public void testCalculateMergesNoRepeatOfRegionNames() { + List> overlaps = new ArrayList<>(); + overlaps.add(new Pair(_BRI, ABRI)); + overlaps.add(new Pair(ABRI, ADRI)); + List> merges = MetaFixer.calculateMerges(10, overlaps); + assertEquals(1, merges.size()); + // There should be three regions to merge, not four. + assertEquals(3, merges.get(0).size()); + } + + @Test + public void testCalculateMergesRespectsMax() { + List> overlaps = new ArrayList<>(); + overlaps.add(new Pair(_BRI, ABRI)); + overlaps.add(new Pair(ABRI, ADRI)); + overlaps.add(new Pair(C_RI, D_RI)); + List> merges = MetaFixer.calculateMerges(3, overlaps); + assertEquals(2, merges.size()); + // There should be three regions to merge, not four. + assertEquals(3, merges.get(0).size()); + assertEquals(2, merges.get(1).size()); + } +}