HBASE-22941 merge operation returns parent regions in random order (#556)
* HBASE-22941 merge operation returns parent regions in random order store and return the merge parent regions in ascending order remove left over check for exactly two merged regions add unit test * use SortedMap type to emphasise that the Map is sorted. * use regionCount consistently and checkstyle fixes * Delete tests that expect multiregion merges to fail. Signed-off-by: stack <stack@apache.org>
This commit is contained in:
parent
83e77940c5
commit
56fd3e9007
|
@ -812,10 +812,6 @@ public class MasterRpcServices extends RSRpcServices
|
|||
|
||||
RegionStates regionStates = master.getAssignmentManager().getRegionStates();
|
||||
|
||||
if (request.getRegionCount() != 2) {
|
||||
throw new ServiceException(new DoNotRetryIOException(
|
||||
"Only support merging 2 regions but " + request.getRegionCount() + " region passed"));
|
||||
}
|
||||
RegionInfo[] regionsToMerge = new RegionInfo[request.getRegionCount()];
|
||||
for (int i = 0; i < request.getRegionCount(); i++) {
|
||||
final byte[] encodedNameOfRegion = request.getRegion(i).getValue().toByteArray();
|
||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.hadoop.hbase.master.assignment;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellBuilderFactory;
|
||||
|
@ -263,7 +263,7 @@ public class RegionStateStore {
|
|||
throws IOException {
|
||||
TableDescriptor htd = getDescriptor(child.getTable());
|
||||
boolean globalScope = htd.hasGlobalReplicationScope();
|
||||
Map<RegionInfo, Long> parentSeqNums = new HashMap<>(parents.length);
|
||||
SortedMap<RegionInfo, Long> parentSeqNums = new TreeMap<>();
|
||||
for (RegionInfo ri: parents) {
|
||||
parentSeqNums.put(ri, globalScope? getOpenSeqNumForParentRegion(ri): -1);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,9 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
|
||||
import org.apache.hadoop.hbase.client.AsyncConnection;
|
||||
|
@ -104,4 +106,51 @@ public class TestSplitMerge {
|
|||
.getRegionLocation(Bytes.toBytes(1), true).get().getServerName());
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMergeRegionOrder() throws Exception {
|
||||
|
||||
int regionCount= 20;
|
||||
|
||||
TableName tableName = TableName.valueOf("MergeRegionOrder");
|
||||
byte[] family = Bytes.toBytes("CF");
|
||||
TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName)
|
||||
.setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build();
|
||||
|
||||
byte[][] splitKeys = new byte[regionCount-1][];
|
||||
|
||||
for (int c = 0; c < regionCount-1; c++) {
|
||||
splitKeys[c] = Bytes.toBytes(c+1 * 1000);
|
||||
}
|
||||
|
||||
UTIL.getAdmin().createTable(td, splitKeys);
|
||||
UTIL.waitTableAvailable(tableName);
|
||||
|
||||
List<RegionInfo> regions = UTIL.getAdmin().getRegions(tableName);
|
||||
|
||||
byte[][] regionNames = new byte[regionCount][];
|
||||
for (int c = 0; c < regionCount; c++) {
|
||||
regionNames[c] = regions.get(c).getRegionName();
|
||||
}
|
||||
|
||||
UTIL.getAdmin().mergeRegionsAsync(regionNames, false).get(60, TimeUnit.SECONDS);
|
||||
|
||||
List<RegionInfo> mergedRegions =
|
||||
MetaTableAccessor.getTableRegions(UTIL.getConnection(), tableName);
|
||||
|
||||
assertEquals(1, mergedRegions.size());
|
||||
|
||||
RegionInfo mergedRegion = mergedRegions.get(0);
|
||||
|
||||
List<RegionInfo> mergeParentRegions = MetaTableAccessor.getMergeRegions(UTIL.getConnection(),
|
||||
mergedRegion.getEncodedNameAsBytes());
|
||||
|
||||
assertEquals(mergeParentRegions.size(), regionCount);
|
||||
|
||||
for (int c = 0; c < regionCount-1; c++) {
|
||||
assertTrue(Bytes.compareTo(
|
||||
mergeParentRegions.get(c).getStartKey(),
|
||||
mergeParentRegions.get(c+1).getStartKey()) < 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -588,15 +588,6 @@ public class TestAdmin1 extends TestAdminBase {
|
|||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
// 3
|
||||
try {
|
||||
FutureUtils.get(ADMIN.mergeRegionsAsync(
|
||||
tableRegions.stream().map(RegionInfo::getEncodedNameAsBytes).toArray(byte[][]::new),
|
||||
false));
|
||||
fail();
|
||||
} catch (DoNotRetryIOException e) {
|
||||
// expected
|
||||
}
|
||||
} finally {
|
||||
ADMIN.disableTable(tableName);
|
||||
ADMIN.deleteTable(tableName);
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.util.Optional;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionLocation;
|
||||
|
@ -205,16 +204,6 @@ public class TestAsyncRegionAdminApi2 extends TestAsyncAdminBase {
|
|||
// expected
|
||||
assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
|
||||
}
|
||||
// 3
|
||||
try {
|
||||
admin.mergeRegions(
|
||||
regions.stream().map(RegionInfo::getEncodedNameAsBytes).collect(Collectors.toList()), false)
|
||||
.get();
|
||||
fail();
|
||||
} catch (ExecutionException e) {
|
||||
// expected
|
||||
assertThat(e.getCause(), instanceOf(DoNotRetryIOException.class));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
Loading…
Reference in New Issue