HBASE-1082 Administrative functions for table/region maintenance

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@731801 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-01-06 01:12:36 +00:00
parent 84b7463032
commit 4dde10431e
13 changed files with 275 additions and 79 deletions

View File

@ -78,6 +78,34 @@ module HBase
@formatter.footer(now) @formatter.footer(now)
end end
def flush(tableNameOrRegionName)
now = Time.now
@formatter.header()
@admin.flush(tableNameOrRegionName)
@formatter.footer(now)
end
def compact(tableNameOrRegionName)
now = Time.now
@formatter.header()
@admin.compact(tableNameOrRegionName)
@formatter.footer(now)
end
def major_compact(tableNameOrRegionName)
now = Time.now
@formatter.header()
@admin.majorCompact(tableNameOrRegionName)
@formatter.footer(now)
end
def split(tableNameOrRegionName)
now = Time.now
@formatter.header()
@admin.split(tableNameOrRegionName)
@formatter.footer(now)
end
def enable(tableName) def enable(tableName)
# TODO: Need an isEnabled method # TODO: Need an isEnabled method
now = Time.now now = Time.now

View File

@ -105,18 +105,32 @@ def tools
# Help for hbase shell surgery tools # Help for hbase shell surgery tools
h = <<HERE h = <<HERE
HBASE SURGERY TOOLS: HBASE SURGERY TOOLS:
enable_region Enable a single region. For example,
hbase> enable_region 'REGIONNAME'
disable_region Disable a single region
close_region Close a single region. Optionally specify regionserver. close_region Close a single region. Optionally specify regionserver.
Examples: Examples:
hbase> close_region 'REGIONNAME' hbase> close_region 'REGIONNAME'
hbase> close_region 'REGIONNAME', 'REGIONSERVER_IP:PORT' hbase> close_region 'REGIONNAME', 'REGIONSERVER_IP:PORT'
compact Compact all regions in passed table or pass a region row
to compact an individual region
disable_region Disable a single region
enable_region Enable a single region. For example:
hbase> enable_region 'REGIONNAME'
flush Flush all regions in passed table or pass a region row to
flush an individual region. For example:
hbase> flush 'TABLENAME'
hbase> flush 'REGIONNAME'
major_compact Run major compaction on passed table or pass a region row
to major compact an individual region
split Split table or pass a region row to split individual region
Above commands are for 'experts'-only as misuse can damage an install Above commands are for 'experts'-only as misuse can damage an install
HERE HERE
puts h puts h
@ -334,6 +348,22 @@ def count(table, interval = 1000)
table(table).count(interval) table(table).count(interval)
end end
def flush(tableNameOrRegionName)
admin().flush(tableNameOrRegionName)
end
def compact(tableNameOrRegionName)
admin().compact(tableNameOrRegionName)
end
def major_compact(tableNameOrRegionName)
admin().major_compact(tableNameOrRegionName)
end
def split(tableNameOrRegionName)
admin().split(tableNameOrRegionName)
end
# Output a banner message that tells users where to go for help # Output a banner message that tells users where to go for help
puts <<HERE puts <<HERE
HBase Shell; enter 'help<RETURN>' for list of supported commands. HBase Shell; enter 'help<RETURN>' for list of supported commands.

View File

@ -268,4 +268,7 @@ public interface HConstants {
// Messages client can send master. // Messages client can send master.
public static final int MODIFY_CLOSE_REGION = MODIFY_TABLE_COMPACT + 1; public static final int MODIFY_CLOSE_REGION = MODIFY_TABLE_COMPACT + 1;
public static final int MODIFY_TABLE_FLUSH = MODIFY_CLOSE_REGION + 1;
public static final int MODIFY_TABLE_MAJOR_COMPACT = MODIFY_TABLE_FLUSH + 1;
} }

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.io.Writable;
* Most of the time the messages are simple but some messages are accompanied * Most of the time the messages are simple but some messages are accompanied
* by the region affected. HMsg may also carry optional message. * by the region affected. HMsg may also carry optional message.
*/ */
@SuppressWarnings("serial")
public class HMsg implements Writable { public class HMsg implements Writable {
/** /**
* Message types sent between master and regionservers * Message types sent between master and regionservers
@ -100,6 +99,16 @@ public class HMsg implements Writable {
* regions * regions
*/ */
MSG_REPORT_QUIESCED, MSG_REPORT_QUIESCED,
/**
* Flush
*/
MSG_REGION_FLUSH,
/**
* Run Major Compaction
*/
MSG_REGION_MAJOR_COMPACT,
} }
private Type type = null; private Type type = null;

View File

@ -404,5 +404,4 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
this.splitRequest = b; this.splitRequest = b;
return old; return old;
} }
} }

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.BooleanWritable;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.Shell.ExitCodeException;
/** /**
* Provides administrative functions for HBase * Provides administrative functions for HBase
@ -540,10 +541,68 @@ public class HBaseAdmin {
newargs); newargs);
} }
/**
* Flush a table or an individual region
* @param tableNameOrRegionName
* @throws IOException
*/
public void flush(final String tableNameOrRegionName) throws IOException {
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_FLUSH);
}
/**
* Compact a table or an individual region
* @param tableNameOrRegionName
* @throws IOException
*/
public void compact(final String tableNameOrRegionName) throws IOException {
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_COMPACT);
}
/**
* Major compact a table or an individual region
* @param tableNameOrRegionName
* @throws IOException
*/
public void majorCompact(final String tableNameOrRegionName)
throws IOException {
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_MAJOR_COMPACT);
}
/**
* Split a table or an individual region
* @param tableNameOrRegionName
* @throws IOException
*/
public void split(final String tableNameOrRegionName) throws IOException {
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_SPLIT);
}
/*
* Call modifyTable using passed tableName or region name String. If no
* such table, presume we have been passed a region name.
* @param tableNameOrRegionName
* @param op
* @throws IOException
*/
private void modifyTable(final String tableNameOrRegionName, final int op)
throws IOException {
if (tableNameOrRegionName == null) {
throw new IllegalArgumentException("Pass a table name or region name");
}
String tableName = tableExists(tableNameOrRegionName)?
tableNameOrRegionName: null;
String regionName = tableName == null? tableNameOrRegionName: null;
Object [] args = regionName == null? null: new String [] {regionName};
modifyTable(tableName == null? null: Bytes.toBytes(tableName), op, args);
}
/** /**
* Modify an existing table * Modify an existing table
* *
* @param tableName name of table * @param tableName name of table. May be null if we are operating on a
* region.
* @param op table modification operation * @param op table modification operation
* @param args operation specific arguments * @param args operation specific arguments
* @throws IOException * @throws IOException
@ -554,7 +613,7 @@ public class HBaseAdmin {
throw new MasterNotRunningException("master has been shut down"); throw new MasterNotRunningException("master has been shut down");
} }
// Let pass if its a catalog table. Used by admins. // Let pass if its a catalog table. Used by admins.
if (!MetaUtils.isMetaTableName(tableName)) { if (tableName != null && !MetaUtils.isMetaTableName(tableName)) {
// This will throw exception // This will throw exception
HTableDescriptor.isLegalTableName(tableName); HTableDescriptor.isLegalTableName(tableName);
} }
@ -573,15 +632,19 @@ public class HBaseAdmin {
case HConstants.MODIFY_TABLE_COMPACT: case HConstants.MODIFY_TABLE_COMPACT:
case HConstants.MODIFY_TABLE_SPLIT: case HConstants.MODIFY_TABLE_SPLIT:
case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
case HConstants.MODIFY_TABLE_FLUSH:
if (args != null && args.length > 0) { if (args != null && args.length > 0) {
arr = new Writable[1]; arr = new Writable[1];
if (args[0] instanceof byte[]) { if (args[0] instanceof byte[]) {
arr[0] = new ImmutableBytesWritable((byte[])args[0]); arr[0] = new ImmutableBytesWritable((byte[])args[0]);
} else if (args[0] instanceof ImmutableBytesWritable) { } else if (args[0] instanceof ImmutableBytesWritable) {
arr[0] = (ImmutableBytesWritable)args[0]; arr[0] = (ImmutableBytesWritable)args[0];
} else if (args[0] instanceof String) {
arr[0] = new ImmutableBytesWritable(Bytes.toBytes((String)args[0]));
} else { } else {
throw new IllegalArgumentException("SPLIT or COMPACT with arg " + throw new IllegalArgumentException("Requires byte[], String, or" +
"requires byte[] or ImmutableBytesWritable"); "ImmutableBytesWritable");
} }
} }
this.master.modifyTable(tableName, op, arr); this.master.modifyTable(tableName, op, arr);

View File

@ -795,20 +795,22 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
case MODIFY_TABLE_SPLIT: case MODIFY_TABLE_SPLIT:
case MODIFY_TABLE_COMPACT: case MODIFY_TABLE_COMPACT:
case MODIFY_TABLE_MAJOR_COMPACT:
case MODIFY_TABLE_FLUSH:
if (args != null && args.length > 0) { if (args != null && args.length > 0) {
if (!(args[0] instanceof ImmutableBytesWritable)) if (!(args[0] instanceof ImmutableBytesWritable))
throw new IOException( throw new IOException(
"request argument must be ImmutableBytesWritable"); "request argument must be ImmutableBytesWritable");
byte[] rowKey = ((ImmutableBytesWritable)args[0]).get(); byte [] rowKey = ((ImmutableBytesWritable)args[0]).get();
Pair<HRegionInfo,HServerAddress> pair = Pair<HRegionInfo,HServerAddress> pair =
getTableRegionClosest(tableName, rowKey); getTableRegionClosest(tableName, rowKey);
if (pair != null) { if (pair != null) {
regionManager.startAction(pair.getFirst().getRegionName(), this.regionManager.startAction(pair.getFirst().getRegionName(),
pair.getFirst(), pair.getSecond(), op); pair.getFirst(), pair.getSecond(), op);
} }
} else { } else {
for (Pair<HRegionInfo,HServerAddress> pair: getTableRegions(tableName)) for (Pair<HRegionInfo,HServerAddress> pair: getTableRegions(tableName))
regionManager.startAction(pair.getFirst().getRegionName(), this.regionManager.startAction(pair.getFirst().getRegionName(),
pair.getFirst(), pair.getSecond(), op); pair.getFirst(), pair.getSecond(), op);
} }
break; break;

View File

@ -104,12 +104,22 @@ class RegionManager implements HConstants {
private final float slop; private final float slop;
/** Set of regions to split. */ /** Set of regions to split. */
private final SortedMap<byte[],Pair<HRegionInfo,HServerAddress>> regionsToSplit = private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToSplit =
Collections.synchronizedSortedMap( Collections.synchronizedSortedMap(
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>> new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
(Bytes.BYTES_COMPARATOR)); (Bytes.BYTES_COMPARATOR));
/** Set of regions to compact. */ /** Set of regions to compact. */
private final SortedMap<byte[],Pair<HRegionInfo,HServerAddress>> regionsToCompact = private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToCompact =
Collections.synchronizedSortedMap(
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
(Bytes.BYTES_COMPARATOR));
/** Set of regions to major compact. */
private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToMajorCompact =
Collections.synchronizedSortedMap(
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
(Bytes.BYTES_COMPARATOR));
/** Set of regions to flush. */
private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToFlush =
Collections.synchronizedSortedMap( Collections.synchronizedSortedMap(
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>> new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
(Bytes.BYTES_COMPARATOR)); (Bytes.BYTES_COMPARATOR));
@ -937,18 +947,28 @@ class RegionManager implements HConstants {
HServerAddress server, int op) { HServerAddress server, int op) {
switch (op) { switch (op) {
case HConstants.MODIFY_TABLE_SPLIT: case HConstants.MODIFY_TABLE_SPLIT:
regionsToSplit.put(regionName, startAction(regionName, info, server, this.regionsToSplit);
new Pair<HRegionInfo,HServerAddress>(info, server));
break; break;
case HConstants.MODIFY_TABLE_COMPACT: case HConstants.MODIFY_TABLE_COMPACT:
regionsToCompact.put(regionName, startAction(regionName, info, server, this.regionsToCompact);
new Pair<HRegionInfo,HServerAddress>(info, server)); break;
case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
startAction(regionName, info, server, this.regionsToMajorCompact);
break;
case HConstants.MODIFY_TABLE_FLUSH:
startAction(regionName, info, server, this.regionsToFlush);
break; break;
default: default:
throw new IllegalArgumentException("illegal table action " + op); throw new IllegalArgumentException("illegal table action " + op);
} }
} }
private void startAction(final byte[] regionName, final HRegionInfo info,
final HServerAddress server,
final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> map) {
map.put(regionName, new Pair<HRegionInfo,HServerAddress>(info, server));
}
/** /**
* @param regionName * @param regionName
* @param op * @param op
@ -956,10 +976,16 @@ class RegionManager implements HConstants {
public void endAction(byte[] regionName, int op) { public void endAction(byte[] regionName, int op) {
switch (op) { switch (op) {
case HConstants.MODIFY_TABLE_SPLIT: case HConstants.MODIFY_TABLE_SPLIT:
regionsToSplit.remove(regionName); this.regionsToSplit.remove(regionName);
break; break;
case HConstants.MODIFY_TABLE_COMPACT: case HConstants.MODIFY_TABLE_COMPACT:
regionsToCompact.remove(regionName); this.regionsToCompact.remove(regionName);
break;
case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
this.regionsToMajorCompact.remove(regionName);
break;
case HConstants.MODIFY_TABLE_FLUSH:
this.regionsToFlush.remove(regionName);
break; break;
default: default:
throw new IllegalArgumentException("illegal table action " + op); throw new IllegalArgumentException("illegal table action " + op);
@ -976,37 +1002,36 @@ class RegionManager implements HConstants {
/** /**
* Send messages to the given region server asking it to split any * Send messages to the given region server asking it to split any
* regions in 'regionsToSplit' * regions in 'regionsToSplit', etc.
* @param serverInfo * @param serverInfo
* @param returnMsgs * @param returnMsgs
*/ */
public void applyActions(HServerInfo serverInfo, ArrayList<HMsg> returnMsgs) { public void applyActions(HServerInfo serverInfo, ArrayList<HMsg> returnMsgs) {
applyActions(serverInfo, returnMsgs, this.regionsToCompact,
HMsg.Type.MSG_REGION_COMPACT);
applyActions(serverInfo, returnMsgs, this.regionsToSplit,
HMsg.Type.MSG_REGION_SPLIT);
applyActions(serverInfo, returnMsgs, this.regionsToFlush,
HMsg.Type.MSG_REGION_FLUSH);
applyActions(serverInfo, returnMsgs, this.regionsToMajorCompact,
HMsg.Type.MSG_REGION_MAJOR_COMPACT);
}
private void applyActions(final HServerInfo serverInfo,
final ArrayList<HMsg> returnMsgs,
SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> map,
final HMsg.Type msg) {
HServerAddress addr = serverInfo.getServerAddress(); HServerAddress addr = serverInfo.getServerAddress();
Iterator<Pair<HRegionInfo, HServerAddress>> i = Iterator<Pair<HRegionInfo, HServerAddress>> i =
regionsToCompact.values().iterator(); map.values().iterator();
synchronized (regionsToCompact) { synchronized (map) {
while (i.hasNext()) { while (i.hasNext()) {
Pair<HRegionInfo,HServerAddress> pair = i.next(); Pair<HRegionInfo,HServerAddress> pair = i.next();
if (addr.equals(pair.getSecond())) { if (addr.equals(pair.getSecond())) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("sending MSG_REGION_COMPACT " + pair.getFirst() + " to " + LOG.debug("Sending " + msg + " " + pair.getFirst() + " to " + addr);
addr);
} }
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_COMPACT, pair.getFirst())); returnMsgs.add(new HMsg(msg, pair.getFirst()));
i.remove();
}
}
}
i = regionsToSplit.values().iterator();
synchronized (regionsToSplit) {
while (i.hasNext()) {
Pair<HRegionInfo,HServerAddress> pair = i.next();
if (addr.equals(pair.getSecond())) {
if (LOG.isDebugEnabled()) {
LOG.debug("sending MSG_REGION_SPLIT " + pair.getFirst() + " to " +
addr);
}
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_SPLIT, pair.getFirst()));
i.remove(); i.remove();
} }
} }

View File

@ -139,13 +139,26 @@ class CompactSplitThread extends Thread implements HConstants {
*/ */
public synchronized void compactionRequested(final HRegion r, public synchronized void compactionRequested(final HRegion r,
final String why) { final String why) {
compactionRequested(r, false, why);
}
/**
* @param r HRegion store belongs to
* @param force Whether next compaction should be major
* @param why Why compaction requested -- used in debug messages
*/
public synchronized void compactionRequested(final HRegion r,
final boolean force, final String why) {
if (this.server.stopRequested.get()) { if (this.server.stopRequested.get()) {
return; return;
} }
LOG.debug("Compaction requested for region " + r.setForceMajorCompaction(force);
Bytes.toString(r.getRegionName()) + "/" + if (LOG.isDebugEnabled()) {
r.getRegionInfo().getEncodedName() + LOG.debug("Compaction " + (force? "(major) ": "") +
"requested for region " + Bytes.toString(r.getRegionName()) +
"/" + r.getRegionInfo().getEncodedName() +
(why != null && !why.isEmpty()? " because: " + why: "")); (why != null && !why.isEmpty()? " because: " + why: ""));
}
synchronized (regionsInQueue) { synchronized (regionsInQueue) {
if (!regionsInQueue.contains(r)) { if (!regionsInQueue.contains(r)) {
compactionQueue.add(r); compactionQueue.add(r);
@ -223,6 +236,10 @@ class CompactSplitThread extends Thread implements HConstants {
this.limit = limit; this.limit = limit;
} }
int getLimit() {
return this.limit;
}
/** /**
* Only interrupt once it's done with a run through the work loop. * Only interrupt once it's done with a run through the work loop.
*/ */

View File

@ -138,6 +138,12 @@ public class HRegion implements HConstants {
final Path regiondir; final Path regiondir;
private final Path regionCompactionDir; private final Path regionCompactionDir;
/*
* Set this when scheduling compaction if want the next compaction to be a
* major compaction. Cleared each time through compaction code.
*/
private volatile boolean forceMajorCompaction = false;
/* /*
* Data structure of write state flags used coordinating flushes, * Data structure of write state flags used coordinating flushes,
* compactions and closes. * compactions and closes.
@ -648,6 +654,14 @@ public class HRegion implements HConstants {
} }
} }
void setForceMajorCompaction(final boolean b) {
this.forceMajorCompaction = b;
}
boolean getForceMajorCompaction() {
return this.forceMajorCompaction;
}
/** /**
* Called by compaction thread and after region is opened to compact the * Called by compaction thread and after region is opened to compact the
* HStores if necessary. * HStores if necessary.
@ -663,7 +677,9 @@ public class HRegion implements HConstants {
* @throws IOException * @throws IOException
*/ */
public byte [] compactStores() throws IOException { public byte [] compactStores() throws IOException {
return compactStores(false); boolean majorCompaction = this.forceMajorCompaction;
this.forceMajorCompaction = false;
return compactStores(majorCompaction);
} }
/* /*

View File

@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.UnknownRowLockException; import org.apache.hadoop.hbase.UnknownRowLockException;
import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.UnknownScannerException;
import org.apache.hadoop.hbase.ValueOverMaxLengthException; import org.apache.hadoop.hbase.ValueOverMaxLengthException;
import org.apache.hadoop.hbase.HMsg.Type;
import org.apache.hadoop.hbase.Leases.LeaseStillHeldException; import org.apache.hadoop.hbase.Leases.LeaseStillHeldException;
import org.apache.hadoop.hbase.client.ServerConnection; import org.apache.hadoop.hbase.client.ServerConnection;
import org.apache.hadoop.hbase.client.ServerConnectionManager; import org.apache.hadoop.hbase.client.ServerConnectionManager;
@ -727,7 +728,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
* Thread for toggling safemode after some configurable interval. * Thread for toggling safemode after some configurable interval.
*/ */
private class SafeModeThread extends Thread { private class SafeModeThread extends Thread {
public void run() { public void run() {
// first, wait the required interval before turning off safemode // first, wait the required interval before turning off safemode
int safemodeInterval = int safemodeInterval =
@ -761,7 +761,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
-1 -1
}; };
for (int i = 0; i < limitSteps.length; i++) { for (int i = 0; i < limitSteps.length; i++) {
if (LOG.isDebugEnabled()) { // Just log changes.
if (compactSplitThread.getLimit() != limitSteps[i] &&
LOG.isDebugEnabled()) {
LOG.debug("setting compaction limit to " + limitSteps[i]); LOG.debug("setting compaction limit to " + limitSteps[i]);
} }
compactSplitThread.setLimit(limitSteps[i]); compactSplitThread.setLimit(limitSteps[i]);
@ -1224,6 +1226,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
continue; continue;
} }
LOG.info("Worker: " + e.msg); LOG.info("Worker: " + e.msg);
HRegion region = null;
HRegionInfo info = e.msg.getRegionInfo(); HRegionInfo info = e.msg.getRegionInfo();
switch(e.msg.getType()) { switch(e.msg.getType()) {
@ -1257,27 +1260,28 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
closeRegion(e.msg.getRegionInfo(), false); closeRegion(e.msg.getRegionInfo(), false);
break; break;
case MSG_REGION_SPLIT: { case MSG_REGION_SPLIT:
// Force split a region region = getRegion(info.getRegionName());
HRegion region = getRegion(info.getRegionName());
// flush the memcache for the region
region.flushcache(); region.flushcache();
// flag that the region should be split
region.regionInfo.shouldSplit(true); region.regionInfo.shouldSplit(true);
// force a compaction // force a compaction; split will be side-effect.
compactSplitThread.compactionRequested(region, compactSplitThread.compactionRequested(region,
"MSG_REGION_SPLIT"); e.msg.getType().name());
} break; break;
case MSG_REGION_COMPACT: { case MSG_REGION_MAJOR_COMPACT:
case MSG_REGION_COMPACT:
// Compact a region // Compact a region
HRegion region = getRegion(info.getRegionName()); region = getRegion(info.getRegionName());
// flush the memcache for the region
region.flushcache();
// force a compaction
compactSplitThread.compactionRequested(region, compactSplitThread.compactionRequested(region,
"MSG_REGION_COMPACT"); e.msg.isType(Type.MSG_REGION_MAJOR_COMPACT),
} break; e.msg.getType().name());
break;
case MSG_REGION_FLUSH:
region = getRegion(info.getRegionName());
region.flushcache();
break;
default: default:
throw new AssertionError( throw new AssertionError(

View File

@ -75,7 +75,7 @@ public class TestForceSplit extends HBaseClusterTestCase {
assertTrue(m.size() == 1); assertTrue(m.size() == 1);
// tell the master to split the table // tell the master to split the table
admin.modifyTable(tableName, HConstants.MODIFY_TABLE_SPLIT); admin.split(Bytes.toString(tableName));
// give some time for the split to happen // give some time for the split to happen
Thread.sleep(15 * 1000); Thread.sleep(15 * 1000);

View File

@ -10,8 +10,8 @@ Automatically created by Tomcat JspC.
<servlet> <servlet>
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name> <servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
<servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class> <servlet-class>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-class>
</servlet> </servlet>
<servlet> <servlet>
@ -20,13 +20,13 @@ Automatically created by Tomcat JspC.
</servlet> </servlet>
<servlet> <servlet>
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name> <servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
<servlet-class>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-class> <servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class>
</servlet> </servlet>
<servlet-mapping> <servlet-mapping>
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name> <servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
<url-pattern>/table.jsp</url-pattern> <url-pattern>/master.jsp</url-pattern>
</servlet-mapping> </servlet-mapping>
<servlet-mapping> <servlet-mapping>
@ -35,8 +35,8 @@ Automatically created by Tomcat JspC.
</servlet-mapping> </servlet-mapping>
<servlet-mapping> <servlet-mapping>
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name> <servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
<url-pattern>/master.jsp</url-pattern> <url-pattern>/table.jsp</url-pattern>
</servlet-mapping> </servlet-mapping>
</web-app> </web-app>