diff --git a/CHANGES.txt b/CHANGES.txt
index 5f70e4a7793..74141ddd5a6 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -52,6 +52,7 @@ Release 0.19.0 - Unreleased
(Andrzej Bialecki via Stack)
HBASE-576 Investigate IPC performance
HBASE-920 Make region balancing sloppier
+ HBASE-902 Add force compaction and force split operations to UI and Admin
NEW FEATURES
HBASE-875 Use MurmurHash instead of JenkinsHash [in bloomfilters]
diff --git a/src/java/org/apache/hadoop/hbase/HConstants.java b/src/java/org/apache/hadoop/hbase/HConstants.java
index 6bed7a4baa8..de8bb422fa7 100644
--- a/src/java/org/apache/hadoop/hbase/HConstants.java
+++ b/src/java/org/apache/hadoop/hbase/HConstants.java
@@ -254,4 +254,12 @@ public interface HConstants {
* backoff table, a bit more aggressive than simple exponential backoff.
*/
public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 };
+
+ /** modifyTable op for replacing the table descriptor */
+ public static final int MODIFY_TABLE_SET_HTD = 1;
+ /** modifyTable op for forcing a split */
+ public static final int MODIFY_TABLE_SPLIT = 2;
+ /** modifyTable op for forcing a compaction */
+ public static final int MODIFY_TABLE_COMPACT = 3;
+
}
diff --git a/src/java/org/apache/hadoop/hbase/HMsg.java b/src/java/org/apache/hadoop/hbase/HMsg.java
index 120c18ab881..b15baba91ea 100644
--- a/src/java/org/apache/hadoop/hbase/HMsg.java
+++ b/src/java/org/apache/hadoop/hbase/HMsg.java
@@ -49,7 +49,13 @@ public class HMsg implements Writable {
/** Stop serving the specified region */
MSG_REGION_CLOSE,
-
+
+ /** Split the specified region */
+ MSG_REGION_SPLIT,
+
+ /** Compact the specified region */
+ MSG_REGION_COMPACT,
+
/** Region server is unknown to master. Restart */
MSG_CALL_SERVER_STARTUP,
diff --git a/src/java/org/apache/hadoop/hbase/HRegionInfo.java b/src/java/org/apache/hadoop/hbase/HRegionInfo.java
index 4e163cbe2aa..f1f9ea168a2 100644
--- a/src/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/src/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -68,7 +68,8 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
//TODO: Move NO_HASH to HStoreFile which is really the only place it is used.
public static final int NO_HASH = -1;
private volatile int encodedName = NO_HASH;
-
+ private boolean splitRequest = false;
+
private void setHashCode() {
int result = this.regionName.hashCode();
result ^= this.regionId;
@@ -317,7 +318,8 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
Bytes.toString(this.startKey) + "', ENDKEY => '" +
Bytes.toString(this.endKey) +
"', ENCODED => " + getEncodedName() + "," +
- (isOffline()? " OFFLINE => true,": "") + (isSplit()? " SPLIT => true,": "") +
+ (isOffline()? " OFFLINE => true,": "") +
+ (isSplit()? " SPLIT => true,": "") +
" TABLE => {" + this.tableDesc.toString() + "}";
}
@@ -393,4 +395,14 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
// Compare end keys.
return HStoreKey.compareTwoRowKeys(other, this.endKey, other.endKey);
}
+
+ /**
+ * For internal use in forcing splits ahead of file size limit.
+ */
+ public boolean shouldSplit(boolean b) {
+ boolean old = this.splitRequest;
+ this.splitRequest = b;
+ return old;
+ }
+
}
diff --git a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 2c6f0ed1039..2f04fa10127 100644
--- a/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/src/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -35,11 +35,13 @@ import org.apache.hadoop.hbase.RegionException;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
+import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException;
/**
@@ -504,20 +506,48 @@ public class HBaseAdmin {
}
/**
- * Modify a table's HTableDescriptor
+ * Modify an existing table
*
* @param tableName name of table
- * @param desc the updated descriptor
+ * @param op table modification operation
+ * @param args operation specific arguments
* @throws IOException
*/
- public void modifyTableMeta(final byte [] tableName, HTableDescriptor desc)
- throws IOException {
+ public void modifyTable(final byte [] tableName, int op, Object... args)
+ throws IOException {
if (this.master == null) {
throw new MasterNotRunningException("master has been shut down");
}
HTableDescriptor.isLegalTableName(tableName);
try {
- this.master.modifyTableMeta(tableName, desc);
+ switch (op) {
+ case HConstants.MODIFY_TABLE_SET_HTD: {
+ if (args == null || args.length < 1 ||
+ !(args[0] instanceof HTableDescriptor))
+ throw new IOException("SET_HTD requires a HTableDescriptor");
+ Writable[] arr = new Writable[1];
+ arr[0] = (HTableDescriptor)args[0];
+ this.master.modifyTable(tableName, op, arr);
+ } break;
+ case HConstants.MODIFY_TABLE_SPLIT: {
+ Writable[] arr = null;
+ if (args != null && args.length > 0) {
+ arr = new Writable[1];
+ if (args[0] instanceof byte[]) {
+ arr[0] = new ImmutableBytesWritable((byte[])args[0]);
+ } else if (args[0] instanceof ImmutableBytesWritable) {
+ arr[0] = (ImmutableBytesWritable)args[0];
+ } else {
+ throw new IOException(
+ "SPLIT with arg requires byte[] or ImmutableBytesWritable");
+ }
+ }
+ this.master.modifyTable(tableName, op, arr);
+ break;
+ }
+ default:
+ throw new IOException("unknown modifyTable op " + op);
+ }
} catch (RemoteException e) {
throw RemoteExceptionHandler.decodeRemoteException(e);
}
diff --git a/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java b/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
index 7cea66c3956..acd37cdf495 100644
--- a/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
+++ b/src/java/org/apache/hadoop/hbase/io/HbaseObjectWritable.java
@@ -95,6 +95,7 @@ public class HbaseObjectWritable implements Writable, Configurable {
// Hadoop types
addToMap(Text.class, code++);
addToMap(Writable.class, code++);
+ addToMap(Writable [].class, code++);
addToMap(HbaseMapWritable.class, code++);
addToMap(NullInstance.class, code++);
try {
diff --git a/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java b/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
index ef7af43374d..0b8d181d871 100644
--- a/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
+++ b/src/java/org/apache/hadoop/hbase/ipc/HMasterInterface.java
@@ -24,6 +24,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.VersionedProtocol;
/**
@@ -39,8 +40,9 @@ public interface HMasterInterface extends VersionedProtocol {
* of actual class names (HADOOP-2519).
*
Version 4 when we moved to all byte arrays (HBASE-42).
*
Version 5 HBASE-576.
+ *
Version 6 modifyTable.
*/
- public static final long versionID = 5L;
+ public static final long versionID = 6L;
/** @return true if master is available */
public boolean isMasterRunning();
@@ -110,10 +112,11 @@ public interface HMasterInterface extends VersionedProtocol {
* Modify a table's metadata
*
* @param tableName
- * @param desc
+ * @param op
+ * @param args
* @throws IOException
*/
- public void modifyTableMeta(byte[] tableName, HTableDescriptor desc)
+ public void modifyTable(byte[] tableName, int op, Writable[] args)
throws IOException;
/**
diff --git a/src/java/org/apache/hadoop/hbase/master/HMaster.java b/src/java/org/apache/hadoop/hbase/master/HMaster.java
index 7056934d290..4d82d1cfe5b 100644
--- a/src/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/src/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -21,9 +21,11 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.lang.reflect.Constructor;
+import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Random;
+import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.LinkedBlockingQueue;
@@ -55,6 +57,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.ServerConnection;
import org.apache.hadoop.hbase.client.ServerConnectionManager;
import org.apache.hadoop.hbase.io.Cell;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
@@ -64,10 +67,12 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.InfoServer;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.Server;
@@ -672,10 +677,117 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
new ChangeTableState(this, tableName, false).process();
}
- public void modifyTableMeta(final byte[] tableName, HTableDescriptor desc)
- throws IOException
- {
- new ModifyTableMeta(this, tableName, desc).process();
+ private List>
+ getTableRegions(final byte [] tableName) throws IOException {
+ List> result =
+ new ArrayList>();
+ Set regions = regionManager.getMetaRegionsForTable(tableName);
+ for (MetaRegion m: regions) {
+ byte [] metaRegionName = m.getRegionName();
+ HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
+ long scannerid =
+ srvr.openScanner(metaRegionName,
+ new byte[][] {COL_REGIONINFO, COL_SERVER},
+ tableName,
+ LATEST_TIMESTAMP,
+ null);
+ try {
+ while (true) {
+ RowResult data = srvr.next(scannerid);
+ if (data == null || data.size() <= 0)
+ break;
+ HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+ if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
+ Cell cell = data.get(COL_SERVER);
+ if (cell != null) {
+ HServerAddress server =
+ new HServerAddress(Bytes.toString(cell.getValue()));
+ result.add(new Pair(info, server));
+ }
+ } else {
+ break;
+ }
+ }
+ } finally {
+ srvr.close(scannerid);
+ }
+ }
+ return result;
+ }
+
+ private Pair
+ getTableRegionClosest(final byte [] tableName, final byte [] rowKey)
+ throws IOException {
+ Set regions = regionManager.getMetaRegionsForTable(tableName);
+ for (MetaRegion m: regions) {
+ byte [] metaRegionName = m.getRegionName();
+ HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
+ long scannerid =
+ srvr.openScanner(metaRegionName,
+ new byte[][] {COL_REGIONINFO, COL_SERVER},
+ tableName,
+ LATEST_TIMESTAMP,
+ null);
+ try {
+ while (true) {
+ RowResult data = srvr.next(scannerid);
+ if (data == null || data.size() <= 0)
+ break;
+ HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
+ if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
+ if ((Bytes.compareTo(info.getStartKey(), rowKey) >= 0) &&
+ (Bytes.compareTo(info.getEndKey(), rowKey) < 0)) {
+ Cell cell = data.get(COL_SERVER);
+ if (cell != null) {
+ HServerAddress server =
+ new HServerAddress(Bytes.toString(cell.getValue()));
+ return new Pair(info, server);
+ }
+ }
+ } else {
+ break;
+ }
+ }
+ } finally {
+ srvr.close(scannerid);
+ }
+ }
+ return null;
+ }
+
+ public void modifyTable(final byte[] tableName, int op, Writable[] args)
+ throws IOException {
+ switch (op) {
+ case MODIFY_TABLE_SET_HTD:
+ if (args == null || args.length < 1 ||
+ !(args[0] instanceof HTableDescriptor))
+ throw new IOException("SET_HTD request requires an HTableDescriptor");
+ HTableDescriptor htd = (HTableDescriptor) args[0];
+ LOG.info("modifyTable(SET_HTD): " + htd);
+ new ModifyTableMeta(this, tableName, htd).process();
+ break;
+ case MODIFY_TABLE_SPLIT:
+ case MODIFY_TABLE_COMPACT:
+ if (args != null && args.length > 0) {
+ if (!(args[0] instanceof ImmutableBytesWritable))
+ throw new IOException(
+ "request argument must be ImmutableBytesWritable");
+ byte[] rowKey = ((ImmutableBytesWritable)args[0]).get();
+ Pair pair =
+ getTableRegionClosest(tableName, rowKey);
+ if (pair != null) {
+ regionManager.startAction(pair.getFirst().getRegionName(),
+ pair.getFirst(), pair.getSecond(), op);
+ }
+ } else {
+ for (Pair pair: getTableRegions(tableName))
+ regionManager.startAction(pair.getFirst().getRegionName(),
+ pair.getFirst(), pair.getSecond(), op);
+ }
+ break;
+ default:
+ throw new IOException("unsupported modifyTable op " + op);
+ }
}
public HServerAddress findRootRegion() {
diff --git a/src/java/org/apache/hadoop/hbase/master/RegionManager.java b/src/java/org/apache/hadoop/hbase/master/RegionManager.java
index cc3cc5904c3..e87d6ec04b5 100644
--- a/src/java/org/apache/hadoop/hbase/master/RegionManager.java
+++ b/src/java/org/apache/hadoop/hbase/master/RegionManager.java
@@ -25,6 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.HashSet;
+import java.util.Iterator;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
@@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.HMsg;
import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.util.Writables;
@@ -120,7 +122,18 @@ class RegionManager implements HConstants {
private final HMaster master;
private final RegionHistorian historian;
private final float slop;
-
+
+ /** Set of regions to split. */
+ private final Map> regionsToSplit =
+ Collections.synchronizedSortedMap(
+ new TreeMap>
+ (Bytes.BYTES_COMPARATOR));
+ /** Set of regions to compact. */
+ private final Map> regionsToCompact =
+ Collections.synchronizedSortedMap(
+ new TreeMap>
+ (Bytes.BYTES_COMPARATOR));
+
RegionManager(HMaster master) {
this.master = master;
this.historian = RegionHistorian.getInstance();
@@ -921,4 +934,74 @@ class RegionManager implements HConstants {
public void setNumMetaRegions(int num) {
numberOfMetaRegions.set(num);
}
+
+ /**
+ * @param regionName
+ */
+ public void startAction(byte[] regionName, HRegionInfo info,
+ HServerAddress server, int op) {
+ switch (op) {
+ case HConstants.MODIFY_TABLE_SPLIT:
+ regionsToSplit.put(regionName,
+ new Pair(info, server));
+ break;
+ case HConstants.MODIFY_TABLE_COMPACT:
+ regionsToCompact.put(regionName,
+ new Pair(info, server));
+ break;
+ default:
+ throw new IllegalArgumentException("illegal table action " + op);
+ }
+ }
+
+ /**
+ * @param regionName
+ */
+ public void endAction(byte[] regionName, int op) {
+ switch (op) {
+ case HConstants.MODIFY_TABLE_SPLIT:
+ regionsToSplit.remove(regionName);
+ break;
+ case HConstants.MODIFY_TABLE_COMPACT:
+ regionsToCompact.remove(regionName);
+ break;
+ default:
+ throw new IllegalArgumentException("illegal table action " + op);
+ }
+ }
+
+ /**
+ * @param regionName
+ */
+ public void endActions(byte[] regionName) {
+ regionsToSplit.remove(regionName);
+ regionsToCompact.remove(regionName);
+ }
+
+ /**
+ * Send messages to the given region server asking it to split any
+ * regions in 'regionsToSplit'
+ * @param serverInfo
+ * @param returnMsgs
+ */
+ public void applyActions(HServerInfo serverInfo, ArrayList returnMsgs) {
+ HServerAddress addr = serverInfo.getServerAddress();
+ Iterator> i =
+ regionsToCompact.values().iterator();
+ while (i.hasNext()) {
+ Pair pair = i.next();
+ if (addr.equals(pair.getSecond())) {
+ returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_COMPACT, pair.getFirst()));
+ i.remove();
+ }
+ }
+ i = regionsToSplit.values().iterator();
+ while (i.hasNext()) {
+ Pair pair = i.next();
+ if (addr.equals(pair.getSecond())) {
+ returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_SPLIT, pair.getFirst()));
+ i.remove();
+ }
+ }
+ }
}
diff --git a/src/java/org/apache/hadoop/hbase/master/ServerManager.java b/src/java/org/apache/hadoop/hbase/master/ServerManager.java
index 6c73f0c96db..ae6764233ce 100644
--- a/src/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/src/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -70,7 +70,7 @@ class ServerManager implements HConstants {
/** Map of server names -> server load */
final Map serversToLoad =
new ConcurrentHashMap();
-
+
private HMaster master;
private final Leases serverLeases;
@@ -345,7 +345,7 @@ class ServerManager implements HConstants {
break;
case MSG_REPORT_SPLIT:
- processSplitRegion(serverName, serverInfo, region, incomingMsgs[++i],
+ processSplitRegion(serverName, serverInfo, region, incomingMsgs[++i],
incomingMsgs[++i], returnMsgs);
break;
@@ -364,9 +364,14 @@ class ServerManager implements HConstants {
master.regionManager.setClosing(i.getRegionName());
}
}
+
// Figure out what the RegionServer ought to do, and write back.
master.regionManager.assignRegions(serverInfo, serverName,
mostLoadedRegions, returnMsgs);
+
+ // Send any pending table actions.
+ master.regionManager.applyActions(serverInfo, returnMsgs);
+
return returnMsgs.toArray(new HMsg[returnMsgs.size()]);
}
@@ -382,7 +387,12 @@ class ServerManager implements HConstants {
*/
private void processSplitRegion(String serverName, HServerInfo serverInfo,
HRegionInfo region, HMsg splitA, HMsg splitB, ArrayList returnMsgs) {
-
+
+ // Cancel any actions pending for the affected region.
+ // This prevents the master from sending a SPLIT message if the table
+ // has already split by the region server.
+ master.regionManager.endActions(region.getRegionName());
+
HRegionInfo newRegionA = splitA.getRegionInfo();
master.regionManager.setUnassigned(newRegionA);
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 8b48c2bbd6c..1f54963e91c 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -851,6 +851,21 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
closeRegion(e.msg.getRegionInfo(), false);
break;
+ case MSG_REGION_SPLIT: {
+ HRegionInfo info = e.msg.getRegionInfo();
+ // Force split a region
+ HRegion region = getRegion(info.getRegionName());
+ region.regionInfo.shouldSplit(true);
+ compactSplitThread.compactionRequested(region);
+ } break;
+
+ case MSG_REGION_COMPACT: {
+ // Compact a region
+ HRegionInfo info = e.msg.getRegionInfo();
+ HRegion region = getRegion(info.getRegionName());
+ compactSplitThread.compactionRequested(region);
+ } break;
+
default:
throw new AssertionError(
"Impossible state during msg processing. Instruction: "
diff --git a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
index f8cbcf6e201..b686b8f88be 100644
--- a/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/src/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -768,6 +768,7 @@ public class HStore implements HConstants {
* @throws IOException
*/
StoreSize compact(boolean majorCompaction) throws IOException {
+ boolean forceSplit = this.info.shouldSplit(false);
synchronized (compactLock) {
long maxId = -1;
int nrows = -1;
@@ -803,11 +804,11 @@ public class HStore implements HConstants {
}
if (!majorCompaction && !hasReferences(filesToCompact) &&
filesToCompact.size() < compactionThreshold) {
- return checkSplit();
+ return checkSplit(forceSplit);
}
if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) {
LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
- return checkSplit();
+ return checkSplit(forceSplit);
}
// HBASE-745, preparing all store file size for incremental compacting
@@ -848,7 +849,7 @@ public class HStore implements HConstants {
StringUtils.humanReadableInt(totalSize) + "; Skipped " + point +
" files, size: " + skipped);
}
- return checkSplit();
+ return checkSplit(forceSplit);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Compaction size of " + this.storeNameStr + ": " +
@@ -911,7 +912,7 @@ public class HStore implements HConstants {
(lastMajorCompaction/1000) + " seconds"));
}
}
- return checkSplit();
+ return checkSplit(forceSplit);
}
/*
@@ -1838,11 +1839,11 @@ public class HStore implements HConstants {
*
* @return a StoreSize if store can be split, null otherwise
*/
- StoreSize checkSplit() {
+ StoreSize checkSplit(boolean force) {
if (this.storefiles.size() <= 0) {
return null;
}
- if (storeSize < this.desiredMaxFileSize) {
+ if (!force && (storeSize < this.desiredMaxFileSize)) {
return null;
}
this.lock.readLock().lock();
diff --git a/src/java/org/apache/hadoop/hbase/util/Pair.java b/src/java/org/apache/hadoop/hbase/util/Pair.java
new file mode 100644
index 00000000000..88a66921cc9
--- /dev/null
+++ b/src/java/org/apache/hadoop/hbase/util/Pair.java
@@ -0,0 +1,92 @@
+package org.apache.hadoop.hbase.util;
+
+import java.io.Serializable;
+
+/**
+ * A generic class for pairs.
+ */
+public class Pair implements Serializable
+{
+ private static final long serialVersionUID = -3986244606585552569L;
+ protected T1 first = null;
+ protected T2 second = null;
+
+ /**
+ * Default constructor.
+ */
+ public Pair()
+ {
+ }
+
+ /**
+ * Constructor
+ * @param a
+ * @param b
+ */
+ public Pair(T1 a, T2 b)
+ {
+ this.first = a;
+ this.second = b;
+ }
+
+ /**
+ * Replace the first element of the pair.
+ * @param a
+ */
+ public void setFirst(T1 a)
+ {
+ this.first = a;
+ }
+
+ /**
+ * Replace the second element of the pair.
+ * @param b
+ */
+ public void setSecond(T2 b)
+ {
+ this.second = b;
+ }
+
+ /**
+ * Return the first element stored in the pair.
+ */
+ public T1 getFirst()
+ {
+ return first;
+ }
+
+ /**
+ * Return the second element stored in the pair.
+ */
+ public T2 getSecond()
+ {
+ return second;
+ }
+
+ private static boolean equals(Object x, Object y)
+ {
+ return (x == null && y == null) || (x != null && x.equals(y));
+ }
+
+ @SuppressWarnings("unchecked")
+ public boolean equals(Object other)
+ {
+ return other instanceof Pair && equals(first, ((Pair)other).first) &&
+ equals(second, ((Pair)other).second);
+ }
+
+ public int hashCode()
+ {
+ if (first == null)
+ return (second == null) ? 0 : second.hashCode() + 1;
+ else if (second == null)
+ return first.hashCode() + 2;
+ else
+ return first.hashCode() * 17 + second.hashCode();
+ }
+
+ public String toString()
+ {
+ return "{" + getFirst() + "," + getSecond() + "}";
+ }
+}
\ No newline at end of file
diff --git a/src/test/org/apache/hadoop/hbase/client/TestHTable.java b/src/test/org/apache/hadoop/hbase/client/TestHTable.java
index a1340990d73..377e83e631e 100644
--- a/src/test/org/apache/hadoop/hbase/client/TestHTable.java
+++ b/src/test/org/apache/hadoop/hbase/client/TestHTable.java
@@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.Map;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
@@ -33,15 +31,11 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.hbase.HRegionInfo;
/**
* Tests HTable
*/
public class TestHTable extends HBaseClusterTestCase implements HConstants {
- private static final Log LOG = LogFactory.getLog(TestHTable.class);
private static final HColumnDescriptor column =
new HColumnDescriptor(COLUMN_FAMILY);
@@ -146,7 +140,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
for (HColumnDescriptor c: desc.getFamilies())
c.setValue(attrName, attrValue);
// update metadata for all regions of this table
- admin.modifyTableMeta(tableAname, desc);
+ admin.modifyTable(tableAname, HConstants.MODIFY_TABLE_SET_HTD, desc);
// enable the table
admin.enableTable(tableAname);
diff --git a/src/webapps/master/table.jsp b/src/webapps/master/table.jsp
index 334f6517d04..2d94a728d5f 100644
--- a/src/webapps/master/table.jsp
+++ b/src/webapps/master/table.jsp
@@ -1,10 +1,12 @@
<%@ page contentType="text/html;charset=UTF-8"
import="org.apache.hadoop.io.Text"
+ import="org.apache.hadoop.io.Writable"
import="org.apache.hadoop.hbase.HTableDescriptor"
import="org.apache.hadoop.hbase.client.HTable"
import="org.apache.hadoop.hbase.HRegionInfo"
import="org.apache.hadoop.hbase.HServerAddress"
import="org.apache.hadoop.hbase.HServerInfo"
+ import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.master.MetaRegion"
import="org.apache.hadoop.hbase.util.Bytes"
@@ -18,22 +20,62 @@
master.getServersToServerInfo();
String tableHeader = "Name | Region Server | Encoded Name | Start Key | End Key |
";
HServerAddress rootLocation = master.getRootRegionLocation();
-%>
+%>
+
+
+
+<%
+String action = request.getParameter("action");
+String key = request.getParameter("key");
+if ( action != null ) {
+%>
+
+
+
+
+
+
+Table action request accepted
+
+<%
+ if (action.equals("split")) {
+ if (key != null && key.length() > 0) {
+ Writable[] arr = new Writable[1];
+ arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
+ master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_SPLIT, arr);
+ } else {
+ master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_SPLIT, null);
+ }
+ %> Split request accepted. <%
+ } else if (action.equals("compact")) {
+ if (key != null && key.length() > 0) {
+ Writable[] arr = new Writable[1];
+ arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
+ master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_COMPACT, arr);
+ } else {
+ master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_COMPACT, null);
+ }
+ %> Compact request accepted. <%
+ }
+%>
+
This page will refresh in 5 seconds.
+
+<%
+} else {
+%>
Regions in <%= tableName %>
-
Regions in <%= tableName %>
-
<%if(tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))) {%>
<%= tableHeader %>
<% int infoPort = serverToServerInfos.get(rootLocation.getBindAddress()+":"+rootLocation.getPort()).getInfoPort();
@@ -51,9 +93,7 @@
- | <%= Bytes.toString(meta.getStartKey()) %> | - |
<% } %>
-<%} else { %>
-
-<%
+<%} else {
try {
Map regions = table.getRegionsInfo();
if(regions != null && regions.size() > 0) { %>
@@ -74,5 +114,47 @@
ex.printStackTrace();
}
}%>
+
+
+Actions:
+
+
+
+
+
+
+<%
+}
+%>
+