HBASE-2953 Edit of hbase-default.xml removing stale configs.

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@991700 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-09-01 20:51:50 +00:00
parent 22922782b7
commit 6d747b7f38
11 changed files with 37 additions and 69 deletions

View File

@ -871,6 +871,7 @@ Release 0.21.0 - Unreleased
HBASE-2405 Close, split, open of regions in RegionServer are run by a single
thread only.
HBASE-1676 load balancing on a large cluster doesn't work very well
HBASE-2953 Edit of hbase-default.xml removing stale configs.
NEW FEATURES
HBASE-1961 HBase EC2 scripts

View File

@ -290,11 +290,12 @@
<plugin>
<groupId>com.agilejava.docbkx</groupId>
<artifactId>docbkx-maven-plugin</artifactId>
<version>2.0.10</version>
<version>2.0.11</version>
<executions>
<execution>
<goals>
<goal>generate-html</goal>
<goal>generate-pdf</goal>
</goals>
<phase>pre-site</phase>
</execution>

View File

@ -49,10 +49,6 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
* that is 'local', not 'localhost', and the port number the master should use
* instead of 60000.
*
* <p>To make 'local' mode more responsive, make values such as
* <code>hbase.regionserver.msginterval</code>,
* <code>hbase.master.meta.thread.rescanfrequency</code>, and
* <code>hbase.server.thread.wakefrequency</code> a second or less.
*/
public class LocalHBaseCluster {
static final Log LOG = LogFactory.getLog(LocalHBaseCluster.class);

View File

@ -467,7 +467,8 @@ implements HMasterInterface, HMasterRegionInterface, MasterServices, Server {
private static Chore getAndStartBalancerChore(final HMaster master) {
String name = master.getServerName() + "-balancerChore";
int period = master.getConfiguration().getInt("hbase.balancer.period", 600000);
int period = master.getConfiguration().
getInt("hbase.master.balancer.period", 3000000);
// Start up the load balancer chore
Chore chore = new Chore(name, period, master) {
@Override

View File

@ -126,16 +126,14 @@ public class ServerManager {
this.master = master;
this.services = services;
Configuration c = master.getConfiguration();
int metaRescanInterval = c.getInt("hbase.master.meta.thread.rescanfrequency",
60 * 1000);
int monitorInterval = c.getInt("hbase.master.monitor.interval", 60 * 1000);
this.minimumServerCount = c.getInt("hbase.regions.server.count.min", 1);
this.metrics = new MasterMetrics(master.getServerName());
this.serverMonitorThread = new ServerMonitor(metaRescanInterval, master);
this.serverMonitorThread = new ServerMonitor(monitorInterval, master);
String n = Thread.currentThread().getName();
Threads.setDaemonThreadRunning(this.serverMonitorThread,
n + ".serverMonitor");
this.logCleaner = new LogCleaner(
c.getInt("hbase.master.meta.thread.rescanfrequency",60 * 1000),
this.logCleaner = new LogCleaner(c.getInt("hbase.master.cleaner.interval", 60 * 1000),
master, c, this.services.getMasterFileSystem().getFileSystem(),
this.services.getMasterFileSystem().getOldLogDir());
Threads.setDaemonThreadRunning(logCleaner,
@ -580,4 +578,4 @@ public class ServerManager {
public boolean isClusterShutdown() {
return this.clusterShutdown;
}
}
}

View File

@ -336,7 +336,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
}
initializeZooKeeper();
initializeThreads();
int nbBlocks = 0; // TODO: FIX WAS OOME'ing in TESTS -> conf.getInt("hbase.regionserver.nbreservationblocks", 4);
int nbBlocks = conf.getInt("hbase.regionserver.nbreservationblocks", 4);
for (int i = 0; i < nbBlocks; i++) {
reservedSpace.add(new byte[HConstants.DEFAULT_SIZE_RESERVATION_BLOCK]);
}
@ -2394,4 +2394,4 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler,
public int getNumberOfOnlineRegions() {
return onlineRegions.size();
}
}
}

View File

@ -71,13 +71,6 @@
hbase.client.write.buffer * hbase.regionserver.handler.count
</description>
</property>
<property>
<name>hbase.master.meta.thread.rescanfrequency</name>
<value>60000</value>
<description>How long the HMaster sleeps (in milliseconds) between scans of
the root and meta tables.
</description>
</property>
<property>
<name>hbase.regionserver.port</name>
<value>60020</value>
@ -164,10 +157,10 @@
</property>
<property>
<name>hbase.regionserver.msginterval</name>
<value>1000</value>
<value>3000</value>
<description>Interval between messages from the RegionServer to HMaster
in milliseconds. Use a high value like 3000 for clusters with more than 10
nodes. Default is 1 second so that HBase seems more 'live'.
in milliseconds. Use a high value for clusters with more than 100
nodes. Default is 3 seconds.
</description>
</property>
<property>
@ -268,27 +261,10 @@
</description>
</property>
<property>
<name>hbase.regionserver.global.memstore.upperLimit</name>
<value>0.4</value>
<description>Maximum size of all memstores in a region server before new
updates are blocked and flushes are forced. Defaults to 40% of heap
</description>
</property>
<property>
<name>hbase.regionserver.global.memstore.lowerLimit</name>
<value>0.35</value>
<description>When memstores are being forced to flush to make room in
memory, keep flushing until we hit this mark. Defaults to 30% of heap.
This value equal to hbase.regionserver.global.memstore.upperLimit causes
the minimum possible flushing to occur when updates are blocked due to
memstore limiting.
</description>
</property>
<property>
<name>hbase.hbasemaster.maxregionopen</name>
<value>120000</value>
<description>Period to wait for a region open. If regionserver
takes longer than this interval, assign to a new regionserver.
<name>hbase.master.balancer.period
</name>
<value>300000</value>
<description>Period at which the balancer runs in the master.
</description>
</property>
<property>
@ -310,17 +286,27 @@
</description>
</property>
<property>
<name>hbase.regions.percheckin</name>
<value>10</value>
<description>Maximum number of regions that can be assigned in a single go
to a region server.
<name>hbase.regionserver.global.memstore.upperLimit</name>
<value>0.4</value>
<description>Maximum size of all memstores in a region server before new
updates are blocked and flushes are forced. Defaults to 40% of heap
</description>
</property>
<property>
<name>hbase.regionserver.global.memstore.lowerLimit</name>
<value>0.35</value>
<description>When memstores are being forced to flush to make room in
memory, keep flushing until we hit this mark. Defaults to 30% of heap.
This value equal to hbase.regionserver.global.memstore.upperLimit causes
the minimum possible flushing to occur when updates are blocked due to
memstore limiting.
</description>
</property>
<property>
<name>hbase.server.thread.wakefrequency</name>
<value>10000</value>
<description>Time to sleep in between searches for work (in milliseconds).
Used as sleep interval by service threads such as META scanner and log roller.
Used as sleep interval by service threads such as log roller.
</description>
</property>
<property>
@ -417,13 +403,6 @@
Set to 0 to disable automated major compactions.
</description>
</property>
<property>
<name>hbase.regions.slop</name>
<value>0.3</value>
<description>Rebalance if regionserver has average + (average * slop) regions.
Default is 30% slop.
</description>
</property>
<property>
<name>hfile.min.blocksize.size</name>
<value>65536</value>

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.util.Bytes;
/**
* Tests master cleanup of rows in meta table where there is no HRegionInfo
* TODO: Does this test make sense any more?
*/
public class TestEmptyMetaInfo extends HBaseClusterTestCase {
/**
@ -50,8 +51,7 @@ public class TestEmptyMetaInfo extends HBaseClusterTestCase {
Bytes.toBytes("localhost:1234"));
t.put(put);
}
long sleepTime =
conf.getLong("hbase.master.meta.thread.rescanfrequency", 10000);
long sleepTime = conf.getLong("hbase.master.monitor.interval", 1000);
int tries = conf.getInt("hbase.client.retries.number", 5);
int count = 0;
do {
@ -83,4 +83,4 @@ public class TestEmptyMetaInfo extends HBaseClusterTestCase {
assertTrue(tries >= 0);
assertEquals(0, count);
}
}
}

View File

@ -396,7 +396,7 @@ public class TestFromClientSide {
for (int i = 0; i < TEST_UTIL.getConfiguration().getInt("hbase.test.retries", 30); i++) {
Thread.currentThread();
try {
Thread.sleep(TEST_UTIL.getConfiguration().getInt("hbase.server.thread.wakefrequency", 1000));
Thread.sleep(1000);
} catch (InterruptedException e) {
e.printStackTrace();
}

View File

@ -64,7 +64,6 @@ public class TestZKBasedOpenCloseRegion {
Configuration c = TEST_UTIL.getConfiguration();
c.setBoolean("dfs.support.append", true);
c.setInt("hbase.regionserver.info.port", 0);
c.setInt("hbase.master.meta.thread.rescanfrequency", 5*1000);
TEST_UTIL.startMiniCluster(2);
TEST_UTIL.createTable(Bytes.toBytes(TABLENAME), FAMILIES);
HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);

View File

@ -45,13 +45,6 @@
Default: 10.
</description>
</property>
<property>
<name>hbase.master.meta.thread.rescanfrequency</name>
<value>10000</value>
<description>How long the HMaster sleeps (in milliseconds) between scans of
the root and meta tables.
</description>
</property>
<property>
<name>hbase.server.thread.wakefrequency</name>
<value>1000</value>