HBASE-1082 Administrative functions for table/region maintenance
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@731801 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
84b7463032
commit
4dde10431e
28
bin/HBase.rb
28
bin/HBase.rb
|
@ -78,6 +78,34 @@ module HBase
|
|||
@formatter.footer(now)
|
||||
end
|
||||
|
||||
def flush(tableNameOrRegionName)
|
||||
now = Time.now
|
||||
@formatter.header()
|
||||
@admin.flush(tableNameOrRegionName)
|
||||
@formatter.footer(now)
|
||||
end
|
||||
|
||||
def compact(tableNameOrRegionName)
|
||||
now = Time.now
|
||||
@formatter.header()
|
||||
@admin.compact(tableNameOrRegionName)
|
||||
@formatter.footer(now)
|
||||
end
|
||||
|
||||
def major_compact(tableNameOrRegionName)
|
||||
now = Time.now
|
||||
@formatter.header()
|
||||
@admin.majorCompact(tableNameOrRegionName)
|
||||
@formatter.footer(now)
|
||||
end
|
||||
|
||||
def split(tableNameOrRegionName)
|
||||
now = Time.now
|
||||
@formatter.header()
|
||||
@admin.split(tableNameOrRegionName)
|
||||
@formatter.footer(now)
|
||||
end
|
||||
|
||||
def enable(tableName)
|
||||
# TODO: Need an isEnabled method
|
||||
now = Time.now
|
||||
|
|
42
bin/hirb.rb
42
bin/hirb.rb
|
@ -105,18 +105,32 @@ def tools
|
|||
# Help for hbase shell surgery tools
|
||||
h = <<HERE
|
||||
HBASE SURGERY TOOLS:
|
||||
enable_region Enable a single region. For example,
|
||||
|
||||
hbase> enable_region 'REGIONNAME'
|
||||
|
||||
disable_region Disable a single region
|
||||
|
||||
close_region Close a single region. Optionally specify regionserver.
|
||||
Examples:
|
||||
|
||||
hbase> close_region 'REGIONNAME'
|
||||
hbase> close_region 'REGIONNAME', 'REGIONSERVER_IP:PORT'
|
||||
|
||||
compact Compact all regions in passed table or pass a region row
|
||||
to compact an individual region
|
||||
|
||||
disable_region Disable a single region
|
||||
|
||||
enable_region Enable a single region. For example:
|
||||
|
||||
hbase> enable_region 'REGIONNAME'
|
||||
|
||||
flush Flush all regions in passed table or pass a region row to
|
||||
flush an individual region. For example:
|
||||
|
||||
hbase> flush 'TABLENAME'
|
||||
hbase> flush 'REGIONNAME'
|
||||
|
||||
major_compact Run major compaction on passed table or pass a region row
|
||||
to major compact an individual region
|
||||
|
||||
split Split table or pass a region row to split individual region
|
||||
|
||||
Above commands are for 'experts'-only as misuse can damage an install
|
||||
HERE
|
||||
puts h
|
||||
|
@ -334,6 +348,22 @@ def count(table, interval = 1000)
|
|||
table(table).count(interval)
|
||||
end
|
||||
|
||||
def flush(tableNameOrRegionName)
|
||||
admin().flush(tableNameOrRegionName)
|
||||
end
|
||||
|
||||
def compact(tableNameOrRegionName)
|
||||
admin().compact(tableNameOrRegionName)
|
||||
end
|
||||
|
||||
def major_compact(tableNameOrRegionName)
|
||||
admin().major_compact(tableNameOrRegionName)
|
||||
end
|
||||
|
||||
def split(tableNameOrRegionName)
|
||||
admin().split(tableNameOrRegionName)
|
||||
end
|
||||
|
||||
# Output a banner message that tells users where to go for help
|
||||
puts <<HERE
|
||||
HBase Shell; enter 'help<RETURN>' for list of supported commands.
|
||||
|
|
|
@ -268,4 +268,7 @@ public interface HConstants {
|
|||
|
||||
// Messages client can send master.
|
||||
public static final int MODIFY_CLOSE_REGION = MODIFY_TABLE_COMPACT + 1;
|
||||
}
|
||||
|
||||
public static final int MODIFY_TABLE_FLUSH = MODIFY_CLOSE_REGION + 1;
|
||||
public static final int MODIFY_TABLE_MAJOR_COMPACT = MODIFY_TABLE_FLUSH + 1;
|
||||
}
|
|
@ -34,7 +34,6 @@ import org.apache.hadoop.io.Writable;
|
|||
* Most of the time the messages are simple but some messages are accompanied
|
||||
* by the region affected. HMsg may also carry optional message.
|
||||
*/
|
||||
@SuppressWarnings("serial")
|
||||
public class HMsg implements Writable {
|
||||
/**
|
||||
* Message types sent between master and regionservers
|
||||
|
@ -100,6 +99,16 @@ public class HMsg implements Writable {
|
|||
* regions
|
||||
*/
|
||||
MSG_REPORT_QUIESCED,
|
||||
|
||||
/**
|
||||
* Flush
|
||||
*/
|
||||
MSG_REGION_FLUSH,
|
||||
|
||||
/**
|
||||
* Run Major Compaction
|
||||
*/
|
||||
MSG_REGION_MAJOR_COMPACT,
|
||||
}
|
||||
|
||||
private Type type = null;
|
||||
|
|
|
@ -404,5 +404,4 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
|||
this.splitRequest = b;
|
||||
return old;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.util.Writables;
|
|||
import org.apache.hadoop.io.BooleanWritable;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.util.Shell.ExitCodeException;
|
||||
|
||||
/**
|
||||
* Provides administrative functions for HBase
|
||||
|
@ -539,11 +540,69 @@ public class HBaseAdmin {
|
|||
modifyTable(HConstants.META_TABLE_NAME, HConstants.MODIFY_CLOSE_REGION,
|
||||
newargs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush a table or an individual region
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
*/
|
||||
public void flush(final String tableNameOrRegionName) throws IOException {
|
||||
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_FLUSH);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compact a table or an individual region
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
*/
|
||||
public void compact(final String tableNameOrRegionName) throws IOException {
|
||||
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_COMPACT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Major compact a table or an individual region
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
*/
|
||||
public void majorCompact(final String tableNameOrRegionName)
|
||||
throws IOException {
|
||||
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_MAJOR_COMPACT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Split a table or an individual region
|
||||
* @param tableNameOrRegionName
|
||||
* @throws IOException
|
||||
*/
|
||||
public void split(final String tableNameOrRegionName) throws IOException {
|
||||
modifyTable(tableNameOrRegionName, HConstants.MODIFY_TABLE_SPLIT);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Call modifyTable using passed tableName or region name String. If no
|
||||
* such table, presume we have been passed a region name.
|
||||
* @param tableNameOrRegionName
|
||||
* @param op
|
||||
* @throws IOException
|
||||
*/
|
||||
private void modifyTable(final String tableNameOrRegionName, final int op)
|
||||
throws IOException {
|
||||
if (tableNameOrRegionName == null) {
|
||||
throw new IllegalArgumentException("Pass a table name or region name");
|
||||
}
|
||||
String tableName = tableExists(tableNameOrRegionName)?
|
||||
tableNameOrRegionName: null;
|
||||
String regionName = tableName == null? tableNameOrRegionName: null;
|
||||
Object [] args = regionName == null? null: new String [] {regionName};
|
||||
modifyTable(tableName == null? null: Bytes.toBytes(tableName), op, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Modify an existing table
|
||||
*
|
||||
* @param tableName name of table
|
||||
* @param tableName name of table. May be null if we are operating on a
|
||||
* region.
|
||||
* @param op table modification operation
|
||||
* @param args operation specific arguments
|
||||
* @throws IOException
|
||||
|
@ -554,7 +613,7 @@ public class HBaseAdmin {
|
|||
throw new MasterNotRunningException("master has been shut down");
|
||||
}
|
||||
// Let pass if its a catalog table. Used by admins.
|
||||
if (!MetaUtils.isMetaTableName(tableName)) {
|
||||
if (tableName != null && !MetaUtils.isMetaTableName(tableName)) {
|
||||
// This will throw exception
|
||||
HTableDescriptor.isLegalTableName(tableName);
|
||||
}
|
||||
|
@ -573,15 +632,19 @@ public class HBaseAdmin {
|
|||
|
||||
case HConstants.MODIFY_TABLE_COMPACT:
|
||||
case HConstants.MODIFY_TABLE_SPLIT:
|
||||
case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
|
||||
case HConstants.MODIFY_TABLE_FLUSH:
|
||||
if (args != null && args.length > 0) {
|
||||
arr = new Writable[1];
|
||||
if (args[0] instanceof byte[]) {
|
||||
arr[0] = new ImmutableBytesWritable((byte[])args[0]);
|
||||
} else if (args[0] instanceof ImmutableBytesWritable) {
|
||||
arr[0] = (ImmutableBytesWritable)args[0];
|
||||
} else if (args[0] instanceof String) {
|
||||
arr[0] = new ImmutableBytesWritable(Bytes.toBytes((String)args[0]));
|
||||
} else {
|
||||
throw new IllegalArgumentException("SPLIT or COMPACT with arg " +
|
||||
"requires byte[] or ImmutableBytesWritable");
|
||||
throw new IllegalArgumentException("Requires byte[], String, or" +
|
||||
"ImmutableBytesWritable");
|
||||
}
|
||||
}
|
||||
this.master.modifyTable(tableName, op, arr);
|
||||
|
|
|
@ -795,20 +795,22 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
|
|||
|
||||
case MODIFY_TABLE_SPLIT:
|
||||
case MODIFY_TABLE_COMPACT:
|
||||
case MODIFY_TABLE_MAJOR_COMPACT:
|
||||
case MODIFY_TABLE_FLUSH:
|
||||
if (args != null && args.length > 0) {
|
||||
if (!(args[0] instanceof ImmutableBytesWritable))
|
||||
throw new IOException(
|
||||
"request argument must be ImmutableBytesWritable");
|
||||
byte[] rowKey = ((ImmutableBytesWritable)args[0]).get();
|
||||
byte [] rowKey = ((ImmutableBytesWritable)args[0]).get();
|
||||
Pair<HRegionInfo,HServerAddress> pair =
|
||||
getTableRegionClosest(tableName, rowKey);
|
||||
if (pair != null) {
|
||||
regionManager.startAction(pair.getFirst().getRegionName(),
|
||||
this.regionManager.startAction(pair.getFirst().getRegionName(),
|
||||
pair.getFirst(), pair.getSecond(), op);
|
||||
}
|
||||
} else {
|
||||
for (Pair<HRegionInfo,HServerAddress> pair: getTableRegions(tableName))
|
||||
regionManager.startAction(pair.getFirst().getRegionName(),
|
||||
this.regionManager.startAction(pair.getFirst().getRegionName(),
|
||||
pair.getFirst(), pair.getSecond(), op);
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -104,12 +104,22 @@ class RegionManager implements HConstants {
|
|||
private final float slop;
|
||||
|
||||
/** Set of regions to split. */
|
||||
private final SortedMap<byte[],Pair<HRegionInfo,HServerAddress>> regionsToSplit =
|
||||
private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToSplit =
|
||||
Collections.synchronizedSortedMap(
|
||||
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
|
||||
(Bytes.BYTES_COMPARATOR));
|
||||
/** Set of regions to compact. */
|
||||
private final SortedMap<byte[],Pair<HRegionInfo,HServerAddress>> regionsToCompact =
|
||||
private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToCompact =
|
||||
Collections.synchronizedSortedMap(
|
||||
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
|
||||
(Bytes.BYTES_COMPARATOR));
|
||||
/** Set of regions to major compact. */
|
||||
private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToMajorCompact =
|
||||
Collections.synchronizedSortedMap(
|
||||
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
|
||||
(Bytes.BYTES_COMPARATOR));
|
||||
/** Set of regions to flush. */
|
||||
private final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> regionsToFlush =
|
||||
Collections.synchronizedSortedMap(
|
||||
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
|
||||
(Bytes.BYTES_COMPARATOR));
|
||||
|
@ -937,18 +947,28 @@ class RegionManager implements HConstants {
|
|||
HServerAddress server, int op) {
|
||||
switch (op) {
|
||||
case HConstants.MODIFY_TABLE_SPLIT:
|
||||
regionsToSplit.put(regionName,
|
||||
new Pair<HRegionInfo,HServerAddress>(info, server));
|
||||
startAction(regionName, info, server, this.regionsToSplit);
|
||||
break;
|
||||
case HConstants.MODIFY_TABLE_COMPACT:
|
||||
regionsToCompact.put(regionName,
|
||||
new Pair<HRegionInfo,HServerAddress>(info, server));
|
||||
startAction(regionName, info, server, this.regionsToCompact);
|
||||
break;
|
||||
case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
|
||||
startAction(regionName, info, server, this.regionsToMajorCompact);
|
||||
break;
|
||||
case HConstants.MODIFY_TABLE_FLUSH:
|
||||
startAction(regionName, info, server, this.regionsToFlush);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("illegal table action " + op);
|
||||
}
|
||||
}
|
||||
|
||||
private void startAction(final byte[] regionName, final HRegionInfo info,
|
||||
final HServerAddress server,
|
||||
final SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> map) {
|
||||
map.put(regionName, new Pair<HRegionInfo,HServerAddress>(info, server));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param regionName
|
||||
* @param op
|
||||
|
@ -956,10 +976,16 @@ class RegionManager implements HConstants {
|
|||
public void endAction(byte[] regionName, int op) {
|
||||
switch (op) {
|
||||
case HConstants.MODIFY_TABLE_SPLIT:
|
||||
regionsToSplit.remove(regionName);
|
||||
this.regionsToSplit.remove(regionName);
|
||||
break;
|
||||
case HConstants.MODIFY_TABLE_COMPACT:
|
||||
regionsToCompact.remove(regionName);
|
||||
this.regionsToCompact.remove(regionName);
|
||||
break;
|
||||
case HConstants.MODIFY_TABLE_MAJOR_COMPACT:
|
||||
this.regionsToMajorCompact.remove(regionName);
|
||||
break;
|
||||
case HConstants.MODIFY_TABLE_FLUSH:
|
||||
this.regionsToFlush.remove(regionName);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("illegal table action " + op);
|
||||
|
@ -976,43 +1002,42 @@ class RegionManager implements HConstants {
|
|||
|
||||
/**
|
||||
* Send messages to the given region server asking it to split any
|
||||
* regions in 'regionsToSplit'
|
||||
* regions in 'regionsToSplit', etc.
|
||||
* @param serverInfo
|
||||
* @param returnMsgs
|
||||
*/
|
||||
public void applyActions(HServerInfo serverInfo, ArrayList<HMsg> returnMsgs) {
|
||||
applyActions(serverInfo, returnMsgs, this.regionsToCompact,
|
||||
HMsg.Type.MSG_REGION_COMPACT);
|
||||
applyActions(serverInfo, returnMsgs, this.regionsToSplit,
|
||||
HMsg.Type.MSG_REGION_SPLIT);
|
||||
applyActions(serverInfo, returnMsgs, this.regionsToFlush,
|
||||
HMsg.Type.MSG_REGION_FLUSH);
|
||||
applyActions(serverInfo, returnMsgs, this.regionsToMajorCompact,
|
||||
HMsg.Type.MSG_REGION_MAJOR_COMPACT);
|
||||
}
|
||||
|
||||
private void applyActions(final HServerInfo serverInfo,
|
||||
final ArrayList<HMsg> returnMsgs,
|
||||
SortedMap<byte[], Pair<HRegionInfo,HServerAddress>> map,
|
||||
final HMsg.Type msg) {
|
||||
HServerAddress addr = serverInfo.getServerAddress();
|
||||
Iterator<Pair<HRegionInfo, HServerAddress>> i =
|
||||
regionsToCompact.values().iterator();
|
||||
synchronized (regionsToCompact) {
|
||||
map.values().iterator();
|
||||
synchronized (map) {
|
||||
while (i.hasNext()) {
|
||||
Pair<HRegionInfo,HServerAddress> pair = i.next();
|
||||
if (addr.equals(pair.getSecond())) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("sending MSG_REGION_COMPACT " + pair.getFirst() + " to " +
|
||||
addr);
|
||||
LOG.debug("Sending " + msg + " " + pair.getFirst() + " to " + addr);
|
||||
}
|
||||
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_COMPACT, pair.getFirst()));
|
||||
i.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
i = regionsToSplit.values().iterator();
|
||||
synchronized (regionsToSplit) {
|
||||
while (i.hasNext()) {
|
||||
Pair<HRegionInfo,HServerAddress> pair = i.next();
|
||||
if (addr.equals(pair.getSecond())) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("sending MSG_REGION_SPLIT " + pair.getFirst() + " to " +
|
||||
addr);
|
||||
}
|
||||
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_SPLIT, pair.getFirst()));
|
||||
returnMsgs.add(new HMsg(msg, pair.getFirst()));
|
||||
i.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* State of a Region as it transitions from closed to open, etc. See
|
||||
* note on regionsInTransition data member above for listing of state
|
||||
|
|
|
@ -132,20 +132,33 @@ class CompactSplitThread extends Thread implements HConstants {
|
|||
compactionQueue.clear();
|
||||
LOG.info(getName() + " exiting");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* @param r HRegion store belongs to
|
||||
* @param why Why compaction requested -- used in debug messages
|
||||
*/
|
||||
public synchronized void compactionRequested(final HRegion r,
|
||||
final String why) {
|
||||
compactionRequested(r, false, why);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param r HRegion store belongs to
|
||||
* @param force Whether next compaction should be major
|
||||
* @param why Why compaction requested -- used in debug messages
|
||||
*/
|
||||
public synchronized void compactionRequested(final HRegion r,
|
||||
final boolean force, final String why) {
|
||||
if (this.server.stopRequested.get()) {
|
||||
return;
|
||||
}
|
||||
LOG.debug("Compaction requested for region " +
|
||||
Bytes.toString(r.getRegionName()) + "/" +
|
||||
r.getRegionInfo().getEncodedName() +
|
||||
(why != null && !why.isEmpty()? " because: " + why: ""));
|
||||
r.setForceMajorCompaction(force);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Compaction " + (force? "(major) ": "") +
|
||||
"requested for region " + Bytes.toString(r.getRegionName()) +
|
||||
"/" + r.getRegionInfo().getEncodedName() +
|
||||
(why != null && !why.isEmpty()? " because: " + why: ""));
|
||||
}
|
||||
synchronized (regionsInQueue) {
|
||||
if (!regionsInQueue.contains(r)) {
|
||||
compactionQueue.add(r);
|
||||
|
@ -223,6 +236,10 @@ class CompactSplitThread extends Thread implements HConstants {
|
|||
this.limit = limit;
|
||||
}
|
||||
|
||||
int getLimit() {
|
||||
return this.limit;
|
||||
}
|
||||
|
||||
/**
|
||||
* Only interrupt once it's done with a run through the work loop.
|
||||
*/
|
||||
|
|
|
@ -137,6 +137,12 @@ public class HRegion implements HConstants {
|
|||
final HRegionInfo regionInfo;
|
||||
final Path regiondir;
|
||||
private final Path regionCompactionDir;
|
||||
|
||||
/*
|
||||
* Set this when scheduling compaction if want the next compaction to be a
|
||||
* major compaction. Cleared each time through compaction code.
|
||||
*/
|
||||
private volatile boolean forceMajorCompaction = false;
|
||||
|
||||
/*
|
||||
* Data structure of write state flags used coordinating flushes,
|
||||
|
@ -648,6 +654,14 @@ public class HRegion implements HConstants {
|
|||
}
|
||||
}
|
||||
|
||||
void setForceMajorCompaction(final boolean b) {
|
||||
this.forceMajorCompaction = b;
|
||||
}
|
||||
|
||||
boolean getForceMajorCompaction() {
|
||||
return this.forceMajorCompaction;
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by compaction thread and after region is opened to compact the
|
||||
* HStores if necessary.
|
||||
|
@ -663,7 +677,9 @@ public class HRegion implements HConstants {
|
|||
* @throws IOException
|
||||
*/
|
||||
public byte [] compactStores() throws IOException {
|
||||
return compactStores(false);
|
||||
boolean majorCompaction = this.forceMajorCompaction;
|
||||
this.forceMajorCompaction = false;
|
||||
return compactStores(majorCompaction);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
|||
import org.apache.hadoop.hbase.UnknownRowLockException;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
import org.apache.hadoop.hbase.ValueOverMaxLengthException;
|
||||
import org.apache.hadoop.hbase.HMsg.Type;
|
||||
import org.apache.hadoop.hbase.Leases.LeaseStillHeldException;
|
||||
import org.apache.hadoop.hbase.client.ServerConnection;
|
||||
import org.apache.hadoop.hbase.client.ServerConnectionManager;
|
||||
|
@ -727,7 +728,6 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
* Thread for toggling safemode after some configurable interval.
|
||||
*/
|
||||
private class SafeModeThread extends Thread {
|
||||
|
||||
public void run() {
|
||||
// first, wait the required interval before turning off safemode
|
||||
int safemodeInterval =
|
||||
|
@ -761,7 +761,9 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
-1
|
||||
};
|
||||
for (int i = 0; i < limitSteps.length; i++) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
// Just log changes.
|
||||
if (compactSplitThread.getLimit() != limitSteps[i] &&
|
||||
LOG.isDebugEnabled()) {
|
||||
LOG.debug("setting compaction limit to " + limitSteps[i]);
|
||||
}
|
||||
compactSplitThread.setLimit(limitSteps[i]);
|
||||
|
@ -1224,6 +1226,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
continue;
|
||||
}
|
||||
LOG.info("Worker: " + e.msg);
|
||||
HRegion region = null;
|
||||
HRegionInfo info = e.msg.getRegionInfo();
|
||||
switch(e.msg.getType()) {
|
||||
|
||||
|
@ -1257,28 +1260,29 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
|
|||
closeRegion(e.msg.getRegionInfo(), false);
|
||||
break;
|
||||
|
||||
case MSG_REGION_SPLIT: {
|
||||
// Force split a region
|
||||
HRegion region = getRegion(info.getRegionName());
|
||||
// flush the memcache for the region
|
||||
case MSG_REGION_SPLIT:
|
||||
region = getRegion(info.getRegionName());
|
||||
region.flushcache();
|
||||
// flag that the region should be split
|
||||
region.regionInfo.shouldSplit(true);
|
||||
// force a compaction
|
||||
// force a compaction; split will be side-effect.
|
||||
compactSplitThread.compactionRequested(region,
|
||||
"MSG_REGION_SPLIT");
|
||||
} break;
|
||||
e.msg.getType().name());
|
||||
break;
|
||||
|
||||
case MSG_REGION_COMPACT: {
|
||||
case MSG_REGION_MAJOR_COMPACT:
|
||||
case MSG_REGION_COMPACT:
|
||||
// Compact a region
|
||||
HRegion region = getRegion(info.getRegionName());
|
||||
// flush the memcache for the region
|
||||
region.flushcache();
|
||||
// force a compaction
|
||||
region = getRegion(info.getRegionName());
|
||||
compactSplitThread.compactionRequested(region,
|
||||
"MSG_REGION_COMPACT");
|
||||
} break;
|
||||
|
||||
e.msg.isType(Type.MSG_REGION_MAJOR_COMPACT),
|
||||
e.msg.getType().name());
|
||||
break;
|
||||
|
||||
case MSG_REGION_FLUSH:
|
||||
region = getRegion(info.getRegionName());
|
||||
region.flushcache();
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new AssertionError(
|
||||
"Impossible state during msg processing. Instruction: "
|
||||
|
|
|
@ -75,7 +75,7 @@ public class TestForceSplit extends HBaseClusterTestCase {
|
|||
assertTrue(m.size() == 1);
|
||||
|
||||
// tell the master to split the table
|
||||
admin.modifyTable(tableName, HConstants.MODIFY_TABLE_SPLIT);
|
||||
admin.split(Bytes.toString(tableName));
|
||||
|
||||
// give some time for the split to happen
|
||||
Thread.sleep(15 * 1000);
|
||||
|
|
|
@ -10,8 +10,8 @@ Automatically created by Tomcat JspC.
|
|||
|
||||
|
||||
<servlet>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
||||
<servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
|
||||
<servlet-class>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-class>
|
||||
</servlet>
|
||||
|
||||
<servlet>
|
||||
|
@ -20,13 +20,13 @@ Automatically created by Tomcat JspC.
|
|||
</servlet>
|
||||
|
||||
<servlet>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
|
||||
<servlet-class>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-class>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
||||
<servlet-class>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-class>
|
||||
</servlet>
|
||||
|
||||
<servlet-mapping>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
||||
<url-pattern>/table.jsp</url-pattern>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
|
||||
<url-pattern>/master.jsp</url-pattern>
|
||||
</servlet-mapping>
|
||||
|
||||
<servlet-mapping>
|
||||
|
@ -35,8 +35,8 @@ Automatically created by Tomcat JspC.
|
|||
</servlet-mapping>
|
||||
|
||||
<servlet-mapping>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.master_jsp</servlet-name>
|
||||
<url-pattern>/master.jsp</url-pattern>
|
||||
<servlet-name>org.apache.hadoop.hbase.generated.master.table_jsp</servlet-name>
|
||||
<url-pattern>/table.jsp</url-pattern>
|
||||
</servlet-mapping>
|
||||
|
||||
</web-app>
|
||||
|
|
Loading…
Reference in New Issue