HBASE-3328 Added Admin API to specify explicit split points
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1049379 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
e6e0eea777
commit
c74c097573
|
@ -1047,8 +1047,8 @@ public class HBaseAdmin implements Abortable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Split a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
* Split a table or an individual region. Implicitly finds an optimal split
|
||||
* point. Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table to region to split
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
|
@ -1056,6 +1056,20 @@ public class HBaseAdmin implements Abortable {
|
|||
*/
|
||||
public void split(final byte [] tableNameOrRegionName)
|
||||
throws IOException, InterruptedException {
|
||||
split(tableNameOrRegionName, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a table or an individual region.
|
||||
* Asynchronous operation.
|
||||
*
|
||||
* @param tableNameOrRegionName table to region to split
|
||||
* @param splitPoint the explicit position to split on
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws InterruptedException interrupt exception occurred
|
||||
*/
|
||||
public void split(final byte [] tableNameOrRegionName,
|
||||
final byte [] splitPoint) throws IOException, InterruptedException {
|
||||
CatalogTracker ct = getCatalogTracker();
|
||||
try {
|
||||
if (isRegionName(tableNameOrRegionName)) {
|
||||
|
@ -1066,7 +1080,7 @@ public class HBaseAdmin implements Abortable {
|
|||
LOG.info("No server in .META. for " +
|
||||
Bytes.toString(tableNameOrRegionName) + "; pair=" + pair);
|
||||
} else {
|
||||
split(pair.getSecond(), pair.getFirst());
|
||||
split(pair.getSecond(), pair.getFirst(), splitPoint);
|
||||
}
|
||||
} else {
|
||||
List<Pair<HRegionInfo, HServerAddress>> pairs =
|
||||
|
@ -1075,7 +1089,12 @@ public class HBaseAdmin implements Abortable {
|
|||
for (Pair<HRegionInfo, HServerAddress> pair: pairs) {
|
||||
// May not be a server for a particular row
|
||||
if (pair.getSecond() == null) continue;
|
||||
split(pair.getSecond(), pair.getFirst());
|
||||
if (splitPoint != null) {
|
||||
// if a split point given, only split that particular region
|
||||
HRegionInfo r = pair.getFirst();
|
||||
if (!r.containsRow(splitPoint)) continue;
|
||||
}
|
||||
split(pair.getSecond(), pair.getFirst(), splitPoint);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -1083,10 +1102,10 @@ public class HBaseAdmin implements Abortable {
|
|||
}
|
||||
}
|
||||
|
||||
private void split(final HServerAddress hsa, final HRegionInfo hri)
|
||||
throws IOException {
|
||||
private void split(final HServerAddress hsa, final HRegionInfo hri,
|
||||
byte[] splitPoint) throws IOException {
|
||||
HRegionInterface rs = this.connection.getHRegionConnection(hsa);
|
||||
rs.splitRegion(hri);
|
||||
rs.splitRegion(hri, splitPoint);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -371,6 +371,20 @@ public interface HRegionInterface extends HBaseRPCProtocolVersion, Stoppable, Ab
|
|||
void splitRegion(HRegionInfo regionInfo)
|
||||
throws NotServingRegionException, IOException;
|
||||
|
||||
/**
|
||||
* Splits the specified region.
|
||||
* <p>
|
||||
* This method currently flushes the region and then forces a compaction which
|
||||
* will then trigger a split. The flush is done synchronously but the
|
||||
* compaction is asynchronous.
|
||||
* @param regionInfo region to split
|
||||
* @param splitPoint the explicit row to split on
|
||||
* @throws NotServingRegionException
|
||||
* @throws IOException
|
||||
*/
|
||||
void splitRegion(HRegionInfo regionInfo, byte[] splitPoint)
|
||||
throws NotServingRegionException, IOException;
|
||||
|
||||
/**
|
||||
* Compacts the specified region. Performs a major compaction if specified.
|
||||
* <p>
|
||||
|
|
|
@ -242,6 +242,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
private final ReentrantReadWriteLock updatesLock =
|
||||
new ReentrantReadWriteLock();
|
||||
private boolean splitRequest;
|
||||
private byte[] splitPoint = null;
|
||||
|
||||
private final ReadWriteConsistencyControl rwcc =
|
||||
new ReadWriteConsistencyControl();
|
||||
|
@ -829,6 +830,10 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
} finally {
|
||||
lock.readLock().unlock();
|
||||
}
|
||||
if (splitRow != null) {
|
||||
assert splitPoint == null || Bytes.equals(splitRow, splitPoint);
|
||||
this.splitPoint = null; // clear the split point (if set)
|
||||
}
|
||||
return splitRow;
|
||||
}
|
||||
|
||||
|
@ -3277,8 +3282,8 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
}
|
||||
|
||||
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||
(4 * Bytes.SIZEOF_LONG) + Bytes.SIZEOF_BOOLEAN +
|
||||
(23 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT);
|
||||
(4 * Bytes.SIZEOF_LONG) + Bytes.SIZEOF_BOOLEAN + ClassSize.ARRAY +
|
||||
(24 * ClassSize.REFERENCE) + ClassSize.OBJECT + Bytes.SIZEOF_INT);
|
||||
|
||||
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD +
|
||||
(ClassSize.OBJECT * 2) + (2 * ClassSize.ATOMIC_BOOLEAN) +
|
||||
|
@ -3464,15 +3469,21 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For internal use in forcing splits ahead of file size limit.
|
||||
* @param b
|
||||
* @return previous value
|
||||
*/
|
||||
public boolean shouldSplit(boolean b) {
|
||||
boolean old = this.splitRequest;
|
||||
this.splitRequest = b;
|
||||
return old;
|
||||
boolean shouldForceSplit() {
|
||||
return this.splitRequest;
|
||||
}
|
||||
|
||||
byte[] getSplitPoint() {
|
||||
return this.splitPoint;
|
||||
}
|
||||
|
||||
void forceSplit(byte[] sp) {
|
||||
// NOTE : this HRegion will go away after the forced split is successfull
|
||||
// therefore, no reason to clear this value
|
||||
this.splitRequest = true;
|
||||
if (sp != null) {
|
||||
this.splitPoint = sp;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2133,9 +2133,15 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
|
|||
@Override
|
||||
public void splitRegion(HRegionInfo regionInfo)
|
||||
throws NotServingRegionException, IOException {
|
||||
splitRegion(regionInfo, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void splitRegion(HRegionInfo regionInfo, byte[] splitPoint)
|
||||
throws NotServingRegionException, IOException {
|
||||
HRegion region = getRegion(regionInfo.getRegionName());
|
||||
region.flushcache();
|
||||
region.shouldSplit(true);
|
||||
region.forceSplit(splitPoint);
|
||||
// force a compaction, split will be side-effect
|
||||
// TODO: flush/compact/split refactor will make it trivial to do this
|
||||
// sync/async (and won't require us to do a compaction to split!)
|
||||
|
|
|
@ -614,7 +614,7 @@ public class Store implements HeapSize {
|
|||
* @throws IOException
|
||||
*/
|
||||
StoreSize compact(final boolean forceMajor) throws IOException {
|
||||
boolean forceSplit = this.region.shouldSplit(false);
|
||||
boolean forceSplit = this.region.shouldForceSplit();
|
||||
synchronized (compactLock) {
|
||||
this.lastCompactSize = 0; // reset first in case compaction is aborted
|
||||
|
||||
|
@ -1334,6 +1334,10 @@ public class Store implements HeapSize {
|
|||
largestSf = sf;
|
||||
}
|
||||
}
|
||||
// if the user explicit set a split point, use that
|
||||
if (this.region.getSplitPoint() != null) {
|
||||
return new StoreSize(maxSize, this.region.getSplitPoint());
|
||||
}
|
||||
StoreFile.Reader r = largestSf.getReader();
|
||||
if (r == null) {
|
||||
LOG.warn("Storefile " + largestSf + " Reader is null");
|
||||
|
|
|
@ -149,7 +149,8 @@ public class TestAdmin {
|
|||
* Verify schema modification takes.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test public void testChangeTableSchema() throws IOException {
|
||||
@Test
|
||||
public void testChangeTableSchema() throws IOException {
|
||||
final byte [] tableName = Bytes.toBytes("changeTableSchema");
|
||||
HTableDescriptor [] tables = admin.listTables();
|
||||
int numTables = tables.length;
|
||||
|
@ -474,9 +475,16 @@ public class TestAdmin {
|
|||
*/
|
||||
@Test
|
||||
public void testForceSplit() throws Exception {
|
||||
splitTest(null);
|
||||
splitTest(Bytes.toBytes("pwn"));
|
||||
}
|
||||
|
||||
void splitTest(byte[] splitPoint) throws Exception {
|
||||
byte [] familyName = HConstants.CATALOG_FAMILY;
|
||||
byte [] tableName = Bytes.toBytes("testForceSplit");
|
||||
assertFalse(admin.tableExists(tableName));
|
||||
final HTable table = TEST_UTIL.createTable(tableName, familyName);
|
||||
try {
|
||||
byte[] k = new byte[3];
|
||||
int rowCount = 0;
|
||||
for (byte b1 = 'a'; b1 < 'z'; b1++) {
|
||||
|
@ -539,7 +547,7 @@ public class TestAdmin {
|
|||
};
|
||||
t.start();
|
||||
// Split the table
|
||||
this.admin.split(Bytes.toString(tableName));
|
||||
this.admin.split(tableName, splitPoint);
|
||||
t.join();
|
||||
|
||||
// Verify row count
|
||||
|
@ -553,6 +561,26 @@ public class TestAdmin {
|
|||
}
|
||||
scanner.close();
|
||||
assertEquals(rowCount, rows);
|
||||
|
||||
if (splitPoint != null) {
|
||||
// make sure the split point matches our explicit configuration
|
||||
Map<HRegionInfo, HServerAddress> regions = null;
|
||||
try {
|
||||
regions = table.getRegionsInfo();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
assertEquals(2, regions.size());
|
||||
HRegionInfo[] r = regions.keySet().toArray(new HRegionInfo[0]);
|
||||
assertEquals(Bytes.toString(splitPoint),
|
||||
Bytes.toString(r[0].getEndKey()));
|
||||
assertEquals(Bytes.toString(splitPoint),
|
||||
Bytes.toString(r[1].getStartKey()));
|
||||
LOG.debug("Properly split on " + Bytes.toString(splitPoint));
|
||||
}
|
||||
} finally {
|
||||
TEST_UTIL.deleteTable(tableName);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue