diff --git a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 2f04fa10127..267c64de5fa 100644 --- a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -529,6 +529,7 @@ public class HBaseAdmin { arr[0] = (HTableDescriptor)args[0]; this.master.modifyTable(tableName, op, arr); } break; + case HConstants.MODIFY_TABLE_COMPACT: case HConstants.MODIFY_TABLE_SPLIT: { Writable[] arr = null; if (args != null && args.length > 0) { @@ -538,8 +539,7 @@ public class HBaseAdmin { } else if (args[0] instanceof ImmutableBytesWritable) { arr[0] = (ImmutableBytesWritable)args[0]; } else { - throw new IOException( - "SPLIT with arg requires byte[] or ImmutableBytesWritable"); + throw new IOException("SPLIT or COMPACT with arg requires byte[] or ImmutableBytesWritable"); } } this.master.modifyTable(tableName, op, arr); diff --git a/src/java/org/apache/hadoop/hbase/master/RegionManager.java b/src/java/org/apache/hadoop/hbase/master/RegionManager.java index 03d76aef657..3fb62da69bd 100644 --- a/src/java/org/apache/hadoop/hbase/master/RegionManager.java +++ b/src/java/org/apache/hadoop/hbase/master/RegionManager.java @@ -996,6 +996,10 @@ class RegionManager implements HConstants { while (i.hasNext()) { Pair pair = i.next(); if (addr.equals(pair.getSecond())) { + if (LOG.isDebugEnabled()) { + LOG.debug("sending MSG_REGION_COMPACT " + pair.getFirst() + " to " + + addr); + } returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_COMPACT, pair.getFirst())); i.remove(); } @@ -1004,6 +1008,10 @@ class RegionManager implements HConstants { while (i.hasNext()) { Pair pair = i.next(); if (addr.equals(pair.getSecond())) { + if (LOG.isDebugEnabled()) { + LOG.debug("sending MSG_REGION_SPLIT " + pair.getFirst() + " to " + + addr); + } returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_SPLIT, pair.getFirst())); i.remove(); } diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 39d05832721..494a9abf3c4 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -534,8 +534,7 @@ public class HRegion implements HConstants { byte [] startKey = this.regionInfo.getStartKey(); byte [] endKey = this.regionInfo.getEndKey(); if (HStoreKey.equalsTwoRowKeys(this.regionInfo,startKey, midKey)) { - LOG.debug("Startkey (" + startKey + ") and midkey + (" + - midKey + ") are same, not splitting"); + LOG.debug("Startkey and midkey are same, not splitting"); return null; } if (HStoreKey.equalsTwoRowKeys(this.regionInfo,midKey, endKey)) { diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 01226aa363a..19c8b7215dd 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1018,7 +1018,11 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { case MSG_REGION_SPLIT: { // Force split a region HRegion region = getRegion(info.getRegionName()); + // flush the memcache for the region + region.flushcache(); + // flag that the region should be split region.regionInfo.shouldSplit(true); + // force a compaction compactSplitThread.compactionRequested(region, "MSG_REGION_SPLIT"); } break; @@ -1026,6 +1030,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable { case MSG_REGION_COMPACT: { // Compact a region HRegion region = getRegion(info.getRegionName()); + // flush the memcache for the region + region.flushcache(); + // force a compaction compactSplitThread.compactionRequested(region, "MSG_REGION_COMPACT"); } break; diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java index b58cbca40e9..708ab20628c 100644 --- a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -823,6 +823,7 @@ public class HStore implements HConstants { List filesToCompact = null; synchronized (storefiles) { if (this.storefiles.size() <= 0) { + LOG.debug("no store files to compact"); return null; } // filesToCompact are sorted oldest to newest. @@ -839,8 +840,8 @@ public class HStore implements HConstants { doMajorCompaction = isMajorCompaction(filesToCompact); } boolean references = hasReferences(filesToCompact); - if (!doMajorCompaction && !references && - filesToCompact.size() < compactionThreshold) { + if (!doMajorCompaction && !references && + (forceSplit || (filesToCompact.size() < compactionThreshold))) { return checkSplit(forceSplit); } if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) { @@ -2036,6 +2037,9 @@ public class HStore implements HConstants { splitable = !curHSF.isReference(); if (!splitable) { // RETURN IN MIDDLE OF FUNCTION!!! If not splitable, just return. + if (LOG.isDebugEnabled()) { + LOG.debug(curHSF + " is not splittable"); + } return null; } } @@ -2063,6 +2067,9 @@ public class HStore implements HConstants { // (ever) split this region. if (HStoreKey.equalsTwoRowKeys(info, mk.getRow(), firstKey.getRow()) && HStoreKey.equalsTwoRowKeys(info, mk.getRow(), lastKey.getRow())) { + if (LOG.isDebugEnabled()) { + LOG.debug("cannot split because midkey is the same as first or last row"); + } return null; } return new StoreSize(maxSize, mk.getRow()); diff --git a/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java b/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java new file mode 100755 index 00000000000..9e48253c2cc --- /dev/null +++ b/src/test/org/apache/hadoop/hbase/client/TestForceSplit.java @@ -0,0 +1,119 @@ +/** + * Copyright 2007 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client; + +import java.io.IOException; +import java.util.Map; + +import org.apache.hadoop.hbase.HBaseClusterTestCase; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HServerAddress; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.io.BatchUpdate; +import org.apache.hadoop.hbase.util.Bytes; + +/** + * Tests forced splitting of HTable + */ +public class TestForceSplit extends HBaseClusterTestCase { + private static final byte[] tableName = Bytes.toBytes("test"); + private static final byte[] columnName = Bytes.toBytes("a:"); + private static final byte[] key_mmi = Bytes.toBytes("mmi"); + private static final byte[] key_ssm = Bytes.toBytes("ssm"); + + /** + * the test + * @throws IOException + */ + public void testHTable() throws Exception { + // create the test table + HTableDescriptor htd = new HTableDescriptor(tableName); + htd.addFamily(new HColumnDescriptor(columnName)); + HBaseAdmin admin = new HBaseAdmin(conf); + admin.createTable(htd); + HTable table = new HTable(conf, tableName); + byte[] k = new byte[3]; + for (byte b1 = 'a'; b1 < 'z'; b1++) { + for (byte b2 = 'a'; b2 < 'z'; b2++) { + for (byte b3 = 'a'; b3 < 'z'; b3++) { + k[0] = b1; + k[1] = b2; + k[2] = b3; + BatchUpdate update = new BatchUpdate(k); + update.put(columnName, k); + table.commit(update); + } + } + } + + // get the initial layout (should just be one region) + Map m = table.getRegionsInfo(); + System.out.println("Initial regions (" + m.size() + "): " + m); + assertTrue(m.size() == 1); + + // tell the master to split the table + admin.modifyTable(tableName, HConstants.MODIFY_TABLE_SPLIT); + + // give some time for the split to happen + Thread.sleep(15 * 1000); + + // check again + table = new HTable(conf, tableName); + m = table.getRegionsInfo(); + System.out.println("Regions after split (" + m.size() + "): " + m); + // should have two regions now + assertTrue(m.size() == 2); + // and "mmi" should be the midpoint + for (HRegionInfo hri: m.keySet()) { + byte[] start = hri.getStartKey(); + byte[] end = hri.getEndKey(); + if (Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY)) + assertTrue(Bytes.equals(end, key_mmi)); + if (Bytes.equals(end, key_mmi)) + assertTrue(Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY)); + } + + // tell the master to split the table again, the second half + admin.modifyTable(tableName, HConstants.MODIFY_TABLE_SPLIT, key_mmi); + + // give some time for the split to happen + Thread.sleep(15 * 1000); + + // check again + table = new HTable(conf, tableName); + m = table.getRegionsInfo(); + System.out.println("Regions after split (" + m.size() + "): " + m); + // should have three regions now + assertTrue(m.size() == 3); + // and "mmi" and "ssm" should be the midpoints + for (HRegionInfo hri: m.keySet()) { + byte[] start = hri.getStartKey(); + byte[] end = hri.getEndKey(); + if (Bytes.equals(start, HConstants.EMPTY_BYTE_ARRAY)) + assertTrue(Bytes.equals(end, key_mmi)); + if (Bytes.equals(start, key_mmi)) + assertTrue(Bytes.equals(end, key_ssm)); + if (Bytes.equals(start, key_ssm)) + assertTrue(Bytes.equals(end, HConstants.EMPTY_BYTE_ARRAY)); + } + } +}