HBASE-902 Add force compaction and force split operations to UI and Admin

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@706755 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Kyle Purtell 2008-10-21 20:23:23 +00:00
parent b5200e0ea6
commit 0eb54f38b7
15 changed files with 488 additions and 38 deletions

View File

@ -52,6 +52,7 @@ Release 0.19.0 - Unreleased
(Andrzej Bialecki via Stack) (Andrzej Bialecki via Stack)
HBASE-576 Investigate IPC performance HBASE-576 Investigate IPC performance
HBASE-920 Make region balancing sloppier HBASE-920 Make region balancing sloppier
HBASE-902 Add force compaction and force split operations to UI and Admin
NEW FEATURES NEW FEATURES
HBASE-875 Use MurmurHash instead of JenkinsHash [in bloomfilters] HBASE-875 Use MurmurHash instead of JenkinsHash [in bloomfilters]

View File

@ -254,4 +254,12 @@ public interface HConstants {
* backoff table, a bit more aggressive than simple exponential backoff. * backoff table, a bit more aggressive than simple exponential backoff.
*/ */
public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 }; public static int RETRY_BACKOFF[] = { 1, 1, 1, 2, 2, 4, 4, 8, 16, 32 };
/** modifyTable op for replacing the table descriptor */
public static final int MODIFY_TABLE_SET_HTD = 1;
/** modifyTable op for forcing a split */
public static final int MODIFY_TABLE_SPLIT = 2;
/** modifyTable op for forcing a compaction */
public static final int MODIFY_TABLE_COMPACT = 3;
} }

View File

@ -49,7 +49,13 @@ public class HMsg implements Writable {
/** Stop serving the specified region */ /** Stop serving the specified region */
MSG_REGION_CLOSE, MSG_REGION_CLOSE,
/** Split the specified region */
MSG_REGION_SPLIT,
/** Compact the specified region */
MSG_REGION_COMPACT,
/** Region server is unknown to master. Restart */ /** Region server is unknown to master. Restart */
MSG_CALL_SERVER_STARTUP, MSG_CALL_SERVER_STARTUP,

View File

@ -68,7 +68,8 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
//TODO: Move NO_HASH to HStoreFile which is really the only place it is used. //TODO: Move NO_HASH to HStoreFile which is really the only place it is used.
public static final int NO_HASH = -1; public static final int NO_HASH = -1;
private volatile int encodedName = NO_HASH; private volatile int encodedName = NO_HASH;
private boolean splitRequest = false;
private void setHashCode() { private void setHashCode() {
int result = this.regionName.hashCode(); int result = this.regionName.hashCode();
result ^= this.regionId; result ^= this.regionId;
@ -317,7 +318,8 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
Bytes.toString(this.startKey) + "', ENDKEY => '" + Bytes.toString(this.startKey) + "', ENDKEY => '" +
Bytes.toString(this.endKey) + Bytes.toString(this.endKey) +
"', ENCODED => " + getEncodedName() + "," + "', ENCODED => " + getEncodedName() + "," +
(isOffline()? " OFFLINE => true,": "") + (isSplit()? " SPLIT => true,": "") + (isOffline()? " OFFLINE => true,": "") +
(isSplit()? " SPLIT => true,": "") +
" TABLE => {" + this.tableDesc.toString() + "}"; " TABLE => {" + this.tableDesc.toString() + "}";
} }
@ -393,4 +395,14 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
// Compare end keys. // Compare end keys.
return HStoreKey.compareTwoRowKeys(other, this.endKey, other.endKey); return HStoreKey.compareTwoRowKeys(other, this.endKey, other.endKey);
} }
/**
* For internal use in forcing splits ahead of file size limit.
*/
public boolean shouldSplit(boolean b) {
boolean old = this.splitRequest;
this.splitRequest = b;
return old;
}
} }

View File

@ -35,11 +35,13 @@ import org.apache.hadoop.hbase.RegionException;
import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
/** /**
@ -504,20 +506,48 @@ public class HBaseAdmin {
} }
/** /**
* Modify a table's HTableDescriptor * Modify an existing table
* *
* @param tableName name of table * @param tableName name of table
* @param desc the updated descriptor * @param op table modification operation
* @param args operation specific arguments
* @throws IOException * @throws IOException
*/ */
public void modifyTableMeta(final byte [] tableName, HTableDescriptor desc) public void modifyTable(final byte [] tableName, int op, Object... args)
throws IOException { throws IOException {
if (this.master == null) { if (this.master == null) {
throw new MasterNotRunningException("master has been shut down"); throw new MasterNotRunningException("master has been shut down");
} }
HTableDescriptor.isLegalTableName(tableName); HTableDescriptor.isLegalTableName(tableName);
try { try {
this.master.modifyTableMeta(tableName, desc); switch (op) {
case HConstants.MODIFY_TABLE_SET_HTD: {
if (args == null || args.length < 1 ||
!(args[0] instanceof HTableDescriptor))
throw new IOException("SET_HTD requires a HTableDescriptor");
Writable[] arr = new Writable[1];
arr[0] = (HTableDescriptor)args[0];
this.master.modifyTable(tableName, op, arr);
} break;
case HConstants.MODIFY_TABLE_SPLIT: {
Writable[] arr = null;
if (args != null && args.length > 0) {
arr = new Writable[1];
if (args[0] instanceof byte[]) {
arr[0] = new ImmutableBytesWritable((byte[])args[0]);
} else if (args[0] instanceof ImmutableBytesWritable) {
arr[0] = (ImmutableBytesWritable)args[0];
} else {
throw new IOException(
"SPLIT with arg requires byte[] or ImmutableBytesWritable");
}
}
this.master.modifyTable(tableName, op, arr);
break;
}
default:
throw new IOException("unknown modifyTable op " + op);
}
} catch (RemoteException e) { } catch (RemoteException e) {
throw RemoteExceptionHandler.decodeRemoteException(e); throw RemoteExceptionHandler.decodeRemoteException(e);
} }

View File

@ -95,6 +95,7 @@ public class HbaseObjectWritable implements Writable, Configurable {
// Hadoop types // Hadoop types
addToMap(Text.class, code++); addToMap(Text.class, code++);
addToMap(Writable.class, code++); addToMap(Writable.class, code++);
addToMap(Writable [].class, code++);
addToMap(HbaseMapWritable.class, code++); addToMap(HbaseMapWritable.class, code++);
addToMap(NullInstance.class, code++); addToMap(NullInstance.class, code++);
try { try {

View File

@ -24,6 +24,7 @@ import java.io.IOException;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HServerAddress; import org.apache.hadoop.hbase.HServerAddress;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.VersionedProtocol; import org.apache.hadoop.ipc.VersionedProtocol;
/** /**
@ -39,8 +40,9 @@ public interface HMasterInterface extends VersionedProtocol {
* of actual class names (HADOOP-2519). * of actual class names (HADOOP-2519).
* <p>Version 4 when we moved to all byte arrays (HBASE-42). * <p>Version 4 when we moved to all byte arrays (HBASE-42).
* <p>Version 5 HBASE-576. * <p>Version 5 HBASE-576.
* <p>Version 6 modifyTable.
*/ */
public static final long versionID = 5L; public static final long versionID = 6L;
/** @return true if master is available */ /** @return true if master is available */
public boolean isMasterRunning(); public boolean isMasterRunning();
@ -110,10 +112,11 @@ public interface HMasterInterface extends VersionedProtocol {
* Modify a table's metadata * Modify a table's metadata
* *
* @param tableName * @param tableName
* @param desc * @param op
* @param args
* @throws IOException * @throws IOException
*/ */
public void modifyTableMeta(byte[] tableName, HTableDescriptor desc) public void modifyTable(byte[] tableName, int op, Writable[] args)
throws IOException; throws IOException;
/** /**

View File

@ -21,9 +21,11 @@ package org.apache.hadoop.hbase.master;
import java.io.IOException; import java.io.IOException;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Random; import java.util.Random;
import java.util.Set;
import java.util.concurrent.BlockingQueue; import java.util.concurrent.BlockingQueue;
import java.util.concurrent.DelayQueue; import java.util.concurrent.DelayQueue;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
@ -55,6 +57,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.ServerConnection; import org.apache.hadoop.hbase.client.ServerConnection;
import org.apache.hadoop.hbase.client.ServerConnectionManager; import org.apache.hadoop.hbase.client.ServerConnectionManager;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.ipc.HMasterInterface; import org.apache.hadoop.hbase.ipc.HMasterInterface;
import org.apache.hadoop.hbase.ipc.HMasterRegionInterface; import org.apache.hadoop.hbase.ipc.HMasterRegionInterface;
@ -64,10 +67,12 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.InfoServer; import org.apache.hadoop.hbase.util.InfoServer;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Sleeper; import org.apache.hadoop.hbase.util.Sleeper;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.MapWritable; import org.apache.hadoop.io.MapWritable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ipc.Server;
@ -672,10 +677,117 @@ public class HMaster extends Thread implements HConstants, HMasterInterface,
new ChangeTableState(this, tableName, false).process(); new ChangeTableState(this, tableName, false).process();
} }
public void modifyTableMeta(final byte[] tableName, HTableDescriptor desc) private List<Pair<HRegionInfo,HServerAddress>>
throws IOException getTableRegions(final byte [] tableName) throws IOException {
{ List<Pair<HRegionInfo,HServerAddress>> result =
new ModifyTableMeta(this, tableName, desc).process(); new ArrayList<Pair<HRegionInfo,HServerAddress>>();
Set<MetaRegion> regions = regionManager.getMetaRegionsForTable(tableName);
for (MetaRegion m: regions) {
byte [] metaRegionName = m.getRegionName();
HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
long scannerid =
srvr.openScanner(metaRegionName,
new byte[][] {COL_REGIONINFO, COL_SERVER},
tableName,
LATEST_TIMESTAMP,
null);
try {
while (true) {
RowResult data = srvr.next(scannerid);
if (data == null || data.size() <= 0)
break;
HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
Cell cell = data.get(COL_SERVER);
if (cell != null) {
HServerAddress server =
new HServerAddress(Bytes.toString(cell.getValue()));
result.add(new Pair<HRegionInfo,HServerAddress>(info, server));
}
} else {
break;
}
}
} finally {
srvr.close(scannerid);
}
}
return result;
}
private Pair<HRegionInfo,HServerAddress>
getTableRegionClosest(final byte [] tableName, final byte [] rowKey)
throws IOException {
Set<MetaRegion> regions = regionManager.getMetaRegionsForTable(tableName);
for (MetaRegion m: regions) {
byte [] metaRegionName = m.getRegionName();
HRegionInterface srvr = connection.getHRegionConnection(m.getServer());
long scannerid =
srvr.openScanner(metaRegionName,
new byte[][] {COL_REGIONINFO, COL_SERVER},
tableName,
LATEST_TIMESTAMP,
null);
try {
while (true) {
RowResult data = srvr.next(scannerid);
if (data == null || data.size() <= 0)
break;
HRegionInfo info = Writables.getHRegionInfo(data.get(COL_REGIONINFO));
if (Bytes.compareTo(info.getTableDesc().getName(), tableName) == 0) {
if ((Bytes.compareTo(info.getStartKey(), rowKey) >= 0) &&
(Bytes.compareTo(info.getEndKey(), rowKey) < 0)) {
Cell cell = data.get(COL_SERVER);
if (cell != null) {
HServerAddress server =
new HServerAddress(Bytes.toString(cell.getValue()));
return new Pair<HRegionInfo,HServerAddress>(info, server);
}
}
} else {
break;
}
}
} finally {
srvr.close(scannerid);
}
}
return null;
}
public void modifyTable(final byte[] tableName, int op, Writable[] args)
throws IOException {
switch (op) {
case MODIFY_TABLE_SET_HTD:
if (args == null || args.length < 1 ||
!(args[0] instanceof HTableDescriptor))
throw new IOException("SET_HTD request requires an HTableDescriptor");
HTableDescriptor htd = (HTableDescriptor) args[0];
LOG.info("modifyTable(SET_HTD): " + htd);
new ModifyTableMeta(this, tableName, htd).process();
break;
case MODIFY_TABLE_SPLIT:
case MODIFY_TABLE_COMPACT:
if (args != null && args.length > 0) {
if (!(args[0] instanceof ImmutableBytesWritable))
throw new IOException(
"request argument must be ImmutableBytesWritable");
byte[] rowKey = ((ImmutableBytesWritable)args[0]).get();
Pair<HRegionInfo,HServerAddress> pair =
getTableRegionClosest(tableName, rowKey);
if (pair != null) {
regionManager.startAction(pair.getFirst().getRegionName(),
pair.getFirst(), pair.getSecond(), op);
}
} else {
for (Pair<HRegionInfo,HServerAddress> pair: getTableRegions(tableName))
regionManager.startAction(pair.getFirst().getRegionName(),
pair.getFirst(), pair.getSecond(), op);
}
break;
default:
throw new IOException("unsupported modifyTable op " + op);
}
} }
public HServerAddress findRootRegion() { public HServerAddress findRootRegion() {

View File

@ -25,6 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Map; import java.util.Map;
@ -47,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.ipc.HRegionInterface;
import org.apache.hadoop.hbase.HMsg; import org.apache.hadoop.hbase.HMsg;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.hbase.util.Writables;
@ -120,7 +122,18 @@ class RegionManager implements HConstants {
private final HMaster master; private final HMaster master;
private final RegionHistorian historian; private final RegionHistorian historian;
private final float slop; private final float slop;
/** Set of regions to split. */
private final Map<byte[],Pair<HRegionInfo,HServerAddress>> regionsToSplit =
Collections.synchronizedSortedMap(
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
(Bytes.BYTES_COMPARATOR));
/** Set of regions to compact. */
private final Map<byte[],Pair<HRegionInfo,HServerAddress>> regionsToCompact =
Collections.synchronizedSortedMap(
new TreeMap<byte[],Pair<HRegionInfo,HServerAddress>>
(Bytes.BYTES_COMPARATOR));
RegionManager(HMaster master) { RegionManager(HMaster master) {
this.master = master; this.master = master;
this.historian = RegionHistorian.getInstance(); this.historian = RegionHistorian.getInstance();
@ -921,4 +934,74 @@ class RegionManager implements HConstants {
public void setNumMetaRegions(int num) { public void setNumMetaRegions(int num) {
numberOfMetaRegions.set(num); numberOfMetaRegions.set(num);
} }
/**
* @param regionName
*/
public void startAction(byte[] regionName, HRegionInfo info,
HServerAddress server, int op) {
switch (op) {
case HConstants.MODIFY_TABLE_SPLIT:
regionsToSplit.put(regionName,
new Pair<HRegionInfo,HServerAddress>(info, server));
break;
case HConstants.MODIFY_TABLE_COMPACT:
regionsToCompact.put(regionName,
new Pair<HRegionInfo,HServerAddress>(info, server));
break;
default:
throw new IllegalArgumentException("illegal table action " + op);
}
}
/**
* @param regionName
*/
public void endAction(byte[] regionName, int op) {
switch (op) {
case HConstants.MODIFY_TABLE_SPLIT:
regionsToSplit.remove(regionName);
break;
case HConstants.MODIFY_TABLE_COMPACT:
regionsToCompact.remove(regionName);
break;
default:
throw new IllegalArgumentException("illegal table action " + op);
}
}
/**
* @param regionName
*/
public void endActions(byte[] regionName) {
regionsToSplit.remove(regionName);
regionsToCompact.remove(regionName);
}
/**
* Send messages to the given region server asking it to split any
* regions in 'regionsToSplit'
* @param serverInfo
* @param returnMsgs
*/
public void applyActions(HServerInfo serverInfo, ArrayList<HMsg> returnMsgs) {
HServerAddress addr = serverInfo.getServerAddress();
Iterator<Pair<HRegionInfo,HServerAddress>> i =
regionsToCompact.values().iterator();
while (i.hasNext()) {
Pair<HRegionInfo,HServerAddress> pair = i.next();
if (addr.equals(pair.getSecond())) {
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_COMPACT, pair.getFirst()));
i.remove();
}
}
i = regionsToSplit.values().iterator();
while (i.hasNext()) {
Pair<HRegionInfo,HServerAddress> pair = i.next();
if (addr.equals(pair.getSecond())) {
returnMsgs.add(new HMsg(HMsg.Type.MSG_REGION_SPLIT, pair.getFirst()));
i.remove();
}
}
}
} }

View File

@ -70,7 +70,7 @@ class ServerManager implements HConstants {
/** Map of server names -> server load */ /** Map of server names -> server load */
final Map<String, HServerLoad> serversToLoad = final Map<String, HServerLoad> serversToLoad =
new ConcurrentHashMap<String, HServerLoad>(); new ConcurrentHashMap<String, HServerLoad>();
private HMaster master; private HMaster master;
private final Leases serverLeases; private final Leases serverLeases;
@ -345,7 +345,7 @@ class ServerManager implements HConstants {
break; break;
case MSG_REPORT_SPLIT: case MSG_REPORT_SPLIT:
processSplitRegion(serverName, serverInfo, region, incomingMsgs[++i], processSplitRegion(serverName, serverInfo, region, incomingMsgs[++i],
incomingMsgs[++i], returnMsgs); incomingMsgs[++i], returnMsgs);
break; break;
@ -364,9 +364,14 @@ class ServerManager implements HConstants {
master.regionManager.setClosing(i.getRegionName()); master.regionManager.setClosing(i.getRegionName());
} }
} }
// Figure out what the RegionServer ought to do, and write back. // Figure out what the RegionServer ought to do, and write back.
master.regionManager.assignRegions(serverInfo, serverName, master.regionManager.assignRegions(serverInfo, serverName,
mostLoadedRegions, returnMsgs); mostLoadedRegions, returnMsgs);
// Send any pending table actions.
master.regionManager.applyActions(serverInfo, returnMsgs);
return returnMsgs.toArray(new HMsg[returnMsgs.size()]); return returnMsgs.toArray(new HMsg[returnMsgs.size()]);
} }
@ -382,7 +387,12 @@ class ServerManager implements HConstants {
*/ */
private void processSplitRegion(String serverName, HServerInfo serverInfo, private void processSplitRegion(String serverName, HServerInfo serverInfo,
HRegionInfo region, HMsg splitA, HMsg splitB, ArrayList<HMsg> returnMsgs) { HRegionInfo region, HMsg splitA, HMsg splitB, ArrayList<HMsg> returnMsgs) {
// Cancel any actions pending for the affected region.
// This prevents the master from sending a SPLIT message if the table
// has already split by the region server.
master.regionManager.endActions(region.getRegionName());
HRegionInfo newRegionA = splitA.getRegionInfo(); HRegionInfo newRegionA = splitA.getRegionInfo();
master.regionManager.setUnassigned(newRegionA); master.regionManager.setUnassigned(newRegionA);

View File

@ -851,6 +851,21 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
closeRegion(e.msg.getRegionInfo(), false); closeRegion(e.msg.getRegionInfo(), false);
break; break;
case MSG_REGION_SPLIT: {
HRegionInfo info = e.msg.getRegionInfo();
// Force split a region
HRegion region = getRegion(info.getRegionName());
region.regionInfo.shouldSplit(true);
compactSplitThread.compactionRequested(region);
} break;
case MSG_REGION_COMPACT: {
// Compact a region
HRegionInfo info = e.msg.getRegionInfo();
HRegion region = getRegion(info.getRegionName());
compactSplitThread.compactionRequested(region);
} break;
default: default:
throw new AssertionError( throw new AssertionError(
"Impossible state during msg processing. Instruction: " "Impossible state during msg processing. Instruction: "

View File

@ -768,6 +768,7 @@ public class HStore implements HConstants {
* @throws IOException * @throws IOException
*/ */
StoreSize compact(boolean majorCompaction) throws IOException { StoreSize compact(boolean majorCompaction) throws IOException {
boolean forceSplit = this.info.shouldSplit(false);
synchronized (compactLock) { synchronized (compactLock) {
long maxId = -1; long maxId = -1;
int nrows = -1; int nrows = -1;
@ -803,11 +804,11 @@ public class HStore implements HConstants {
} }
if (!majorCompaction && !hasReferences(filesToCompact) && if (!majorCompaction && !hasReferences(filesToCompact) &&
filesToCompact.size() < compactionThreshold) { filesToCompact.size() < compactionThreshold) {
return checkSplit(); return checkSplit(forceSplit);
} }
if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) { if (!fs.exists(compactionDir) && !fs.mkdirs(compactionDir)) {
LOG.warn("Mkdir on " + compactionDir.toString() + " failed"); LOG.warn("Mkdir on " + compactionDir.toString() + " failed");
return checkSplit(); return checkSplit(forceSplit);
} }
// HBASE-745, preparing all store file size for incremental compacting // HBASE-745, preparing all store file size for incremental compacting
@ -848,7 +849,7 @@ public class HStore implements HConstants {
StringUtils.humanReadableInt(totalSize) + "; Skipped " + point + StringUtils.humanReadableInt(totalSize) + "; Skipped " + point +
" files, size: " + skipped); " files, size: " + skipped);
} }
return checkSplit(); return checkSplit(forceSplit);
} }
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Compaction size of " + this.storeNameStr + ": " + LOG.debug("Compaction size of " + this.storeNameStr + ": " +
@ -911,7 +912,7 @@ public class HStore implements HConstants {
(lastMajorCompaction/1000) + " seconds")); (lastMajorCompaction/1000) + " seconds"));
} }
} }
return checkSplit(); return checkSplit(forceSplit);
} }
/* /*
@ -1838,11 +1839,11 @@ public class HStore implements HConstants {
* *
* @return a StoreSize if store can be split, null otherwise * @return a StoreSize if store can be split, null otherwise
*/ */
StoreSize checkSplit() { StoreSize checkSplit(boolean force) {
if (this.storefiles.size() <= 0) { if (this.storefiles.size() <= 0) {
return null; return null;
} }
if (storeSize < this.desiredMaxFileSize) { if (!force && (storeSize < this.desiredMaxFileSize)) {
return null; return null;
} }
this.lock.readLock().lock(); this.lock.readLock().lock();

View File

@ -0,0 +1,92 @@
package org.apache.hadoop.hbase.util;
import java.io.Serializable;
/**
* A generic class for pairs.
*/
public class Pair<T1, T2> implements Serializable
{
private static final long serialVersionUID = -3986244606585552569L;
protected T1 first = null;
protected T2 second = null;
/**
* Default constructor.
*/
public Pair()
{
}
/**
* Constructor
* @param a
* @param b
*/
public Pair(T1 a, T2 b)
{
this.first = a;
this.second = b;
}
/**
* Replace the first element of the pair.
* @param a
*/
public void setFirst(T1 a)
{
this.first = a;
}
/**
* Replace the second element of the pair.
* @param b
*/
public void setSecond(T2 b)
{
this.second = b;
}
/**
* Return the first element stored in the pair.
*/
public T1 getFirst()
{
return first;
}
/**
* Return the second element stored in the pair.
*/
public T2 getSecond()
{
return second;
}
private static boolean equals(Object x, Object y)
{
return (x == null && y == null) || (x != null && x.equals(y));
}
@SuppressWarnings("unchecked")
public boolean equals(Object other)
{
return other instanceof Pair && equals(first, ((Pair)other).first) &&
equals(second, ((Pair)other).second);
}
public int hashCode()
{
if (first == null)
return (second == null) ? 0 : second.hashCode() + 1;
else if (second == null)
return first.hashCode() + 2;
else
return first.hashCode() * 17 + second.hashCode();
}
public String toString()
{
return "{" + getFirst() + "," + getSecond() + "}";
}
}

View File

@ -22,8 +22,6 @@ package org.apache.hadoop.hbase.client;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseClusterTestCase; import org.apache.hadoop.hbase.HBaseClusterTestCase;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
@ -33,15 +31,11 @@ import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.io.RowResult; import org.apache.hadoop.hbase.io.RowResult;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.hbase.HRegionInfo;
/** /**
* Tests HTable * Tests HTable
*/ */
public class TestHTable extends HBaseClusterTestCase implements HConstants { public class TestHTable extends HBaseClusterTestCase implements HConstants {
private static final Log LOG = LogFactory.getLog(TestHTable.class);
private static final HColumnDescriptor column = private static final HColumnDescriptor column =
new HColumnDescriptor(COLUMN_FAMILY); new HColumnDescriptor(COLUMN_FAMILY);
@ -146,7 +140,7 @@ public class TestHTable extends HBaseClusterTestCase implements HConstants {
for (HColumnDescriptor c: desc.getFamilies()) for (HColumnDescriptor c: desc.getFamilies())
c.setValue(attrName, attrValue); c.setValue(attrName, attrValue);
// update metadata for all regions of this table // update metadata for all regions of this table
admin.modifyTableMeta(tableAname, desc); admin.modifyTable(tableAname, HConstants.MODIFY_TABLE_SET_HTD, desc);
// enable the table // enable the table
admin.enableTable(tableAname); admin.enableTable(tableAname);

View File

@ -1,10 +1,12 @@
<%@ page contentType="text/html;charset=UTF-8" <%@ page contentType="text/html;charset=UTF-8"
import="org.apache.hadoop.io.Text" import="org.apache.hadoop.io.Text"
import="org.apache.hadoop.io.Writable"
import="org.apache.hadoop.hbase.HTableDescriptor" import="org.apache.hadoop.hbase.HTableDescriptor"
import="org.apache.hadoop.hbase.client.HTable" import="org.apache.hadoop.hbase.client.HTable"
import="org.apache.hadoop.hbase.HRegionInfo" import="org.apache.hadoop.hbase.HRegionInfo"
import="org.apache.hadoop.hbase.HServerAddress" import="org.apache.hadoop.hbase.HServerAddress"
import="org.apache.hadoop.hbase.HServerInfo" import="org.apache.hadoop.hbase.HServerInfo"
import="org.apache.hadoop.hbase.io.ImmutableBytesWritable"
import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.master.MetaRegion" import="org.apache.hadoop.hbase.master.MetaRegion"
import="org.apache.hadoop.hbase.util.Bytes" import="org.apache.hadoop.hbase.util.Bytes"
@ -18,22 +20,62 @@
master.getServersToServerInfo(); master.getServersToServerInfo();
String tableHeader = "<table><tr><th>Name</th><th>Region Server</th><th>Encoded Name</th><th>Start Key</th><th>End Key</th></tr>"; String tableHeader = "<table><tr><th>Name</th><th>Region Server</th><th>Encoded Name</th><th>Start Key</th><th>End Key</th></tr>";
HServerAddress rootLocation = master.getRootRegionLocation(); HServerAddress rootLocation = master.getRootRegionLocation();
%><?xml version="1.0" encoding="UTF-8" ?> %>
<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml"> <html xmlns="http://www.w3.org/1999/xhtml">
<%
String action = request.getParameter("action");
String key = request.getParameter("key");
if ( action != null ) {
%>
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<meta http-equiv="refresh" content="5; url=/"/>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head>
<body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Table action request accepted</h1>
<p><hr><p>
<%
if (action.equals("split")) {
if (key != null && key.length() > 0) {
Writable[] arr = new Writable[1];
arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_SPLIT, arr);
} else {
master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_SPLIT, null);
}
%> Split request accepted. <%
} else if (action.equals("compact")) {
if (key != null && key.length() > 0) {
Writable[] arr = new Writable[1];
arr[0] = new ImmutableBytesWritable(Bytes.toBytes(key));
master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_COMPACT, arr);
} else {
master.modifyTable(Bytes.toBytes(tableName), HConstants.MODIFY_TABLE_COMPACT, null);
}
%> Compact request accepted. <%
}
%>
<p>This page will refresh in 5 seconds.
</body>
<%
} else {
%>
<head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/> <head><meta http-equiv="Content-Type" content="text/html;charset=UTF-8"/>
<meta http-equiv="refresh" content="30"/> <meta http-equiv="refresh" content="30"/>
<title>Regions in <%= tableName %></title> <title>Regions in <%= tableName %></title>
<link rel="stylesheet" type="text/css" href="/static/hbase.css" /> <link rel="stylesheet" type="text/css" href="/static/hbase.css" />
</head> </head>
<body> <body>
<a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a> <a id="logo" href="http://wiki.apache.org/lucene-hadoop/Hbase"><img src="/static/hbase_logo_med.gif" alt="HBase Logo" title="HBase Logo" /></a>
<h1 id="page_title">Regions in <%= tableName %></h1> <h1 id="page_title">Regions in <%= tableName %></h1>
<p id="links_menu"><a href="/master.jsp">Master</a>, <a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p> <p id="links_menu"><a href="/master.jsp">Master</a>, <a href="/logs/">Local logs</a>, <a href="/stacks">Thread Dump</a>, <a href="/logLevel">Log Level</a></p>
<hr id="head_rule" /> <hr id="head_rule" />
<%if(tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))) {%> <%if(tableName.equals(Bytes.toString(HConstants.ROOT_TABLE_NAME))) {%>
<%= tableHeader %> <%= tableHeader %>
<% int infoPort = serverToServerInfos.get(rootLocation.getBindAddress()+":"+rootLocation.getPort()).getInfoPort(); <% int infoPort = serverToServerInfos.get(rootLocation.getBindAddress()+":"+rootLocation.getPort()).getInfoPort();
@ -51,9 +93,7 @@
<td>-</td><td><%= Bytes.toString(meta.getStartKey()) %></td><td>-</td></tr> <td>-</td><td><%= Bytes.toString(meta.getStartKey()) %></td><td>-</td></tr>
<% } %> <% } %>
</table> </table>
<%} else { %> <%} else {
<%
try { try {
Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo(); Map<HRegionInfo, HServerAddress> regions = table.getRegionsInfo();
if(regions != null && regions.size() > 0) { %> if(regions != null && regions.size() > 0) { %>
@ -74,5 +114,47 @@
ex.printStackTrace(); ex.printStackTrace();
} }
}%> }%>
<p><hr><p>
Actions:
<p>
<center>
<table style="border-style: none" width="90%">
<tr>
<form method="get">
<input type="hidden" name="action" value="compact">
<input type="hidden" name="name" value="<%= tableName %>">
<td style="border-style: none; text-align: center">
<input style="font-size: 12pt; width: 10em" type="submit" value="Compact"></td>
<td style="border-style: none" width="5%">&nbsp;</td>
<td style="border-style: none">Region Key (optional):<input type="text" name="key" size="40"></td>
<td style="border-style: none">This action will force a compaction of all
regions of the table, or, if a key is supplied, only the region containing the
given key.</td>
</form>
</tr>
<tr><td style="border-style: none" colspan="4">&nbsp;</td></tr>
<tr>
<form method="get">
<input type="hidden" name="action" value="split">
<input type="hidden" name="name" value="<%= tableName %>">
<td style="border-style: none; text-align: center">
<input style="font-size: 12pt; width: 10em" type="submit" value="Split"></td>
<td style="border-style: none" width="5%">&nbsp;</td>
<td style="border-style: none">Region Key (optional):<input type="text" name="key" size="40"></td>
<td style="border-style: none">This action will force a split of all eligible
regions of the table, or, if a key is supplied, only the region containing the
given key. An eligible region is one that does not contain any references to
other regions. Split requests for noneligible regions will be ignored.</td>
</form>
</tr>
</table>
</center>
<p>
<%
}
%>
</body> </body>
</html> </html>