HBASE-7412 Fix how HTableDescriptor handles default max file size and flush size

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1426657 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
jxiang 2012-12-28 23:12:12 +00:00
parent 5807515e54
commit 0c504ad79f
6 changed files with 63 additions and 24 deletions

View File

@ -642,18 +642,19 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/** /**
* Returns the maximum size upto which a region can grow to after which a region * Returns the maximum size upto which a region can grow to after which a region
* split is triggered. The region size is represented by the size of the biggest * split is triggered. The region size is represented by the size of the biggest
* store file in that region. * store file in that region.
* *
* @return max hregion size for table * @return max hregion size for table, -1 if not set.
* *
* @see #setMaxFileSize(long) * @see #setMaxFileSize(long)
*/ */
public long getMaxFileSize() { public long getMaxFileSize() {
byte [] value = getValue(MAX_FILESIZE_KEY); byte [] value = getValue(MAX_FILESIZE_KEY);
if (value != null) if (value != null) {
return Long.valueOf(Bytes.toString(value)).longValue(); return Long.parseLong(Bytes.toString(value));
return HConstants.DEFAULT_MAX_FILE_SIZE; }
return -1;
} }
/** /**
@ -677,16 +678,17 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/** /**
* Returns the size of the memstore after which a flush to filesystem is triggered. * Returns the size of the memstore after which a flush to filesystem is triggered.
* *
* @return memory cache flush size for each hregion * @return memory cache flush size for each hregion, -1 if not set.
* *
* @see #setMemStoreFlushSize(long) * @see #setMemStoreFlushSize(long)
*/ */
public long getMemStoreFlushSize() { public long getMemStoreFlushSize() {
byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY); byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
if (value != null) if (value != null) {
return Long.valueOf(Bytes.toString(value)).longValue(); return Long.parseLong(Bytes.toString(value));
return DEFAULT_MEMSTORE_FLUSH_SIZE; }
return -1;
} }
/** /**

View File

@ -18,7 +18,9 @@
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
/** /**
* A {@link RegionSplitPolicy} implementation which splits a region * A {@link RegionSplitPolicy} implementation which splits a region
@ -36,14 +38,15 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy {
@Override @Override
protected void configureForRegion(HRegion region) { protected void configureForRegion(HRegion region) {
super.configureForRegion(region); super.configureForRegion(region);
long maxFileSize = region.getTableDesc().getMaxFileSize(); Configuration conf = getConf();
HTableDescriptor desc = region.getTableDesc();
// By default we split region if a file > HConstants.DEFAULT_MAX_FILE_SIZE. if (desc != null) {
if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) { this.desiredMaxFileSize = desc.getMaxFileSize();
maxFileSize = getConf().getLong(HConstants.HREGION_MAX_FILESIZE, }
if (this.desiredMaxFileSize <= 0) {
this.desiredMaxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
HConstants.DEFAULT_MAX_FILE_SIZE); HConstants.DEFAULT_MAX_FILE_SIZE);
} }
this.desiredMaxFileSize = maxFileSize;
} }
@Override @Override

View File

@ -514,9 +514,9 @@ public class HRegion implements HeapSize { // , Writable{
if (this.htableDescriptor == null) return; if (this.htableDescriptor == null) return;
long flushSize = this.htableDescriptor.getMemStoreFlushSize(); long flushSize = this.htableDescriptor.getMemStoreFlushSize();
if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) { if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
} }
this.memstoreFlushSize = flushSize; this.memstoreFlushSize = flushSize;
this.blockingMemStoreSize = this.memstoreFlushSize * this.blockingMemStoreSize = this.memstoreFlushSize *

View File

@ -22,6 +22,7 @@ import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -45,10 +46,15 @@ extends ConstantSizeRegionSplitPolicy {
@Override @Override
protected void configureForRegion(HRegion region) { protected void configureForRegion(HRegion region) {
super.configureForRegion(region); super.configureForRegion(region);
this.flushSize = region.getTableDesc() != null? Configuration conf = getConf();
region.getTableDesc().getMemStoreFlushSize(): HTableDescriptor desc = region.getTableDesc();
getConf().getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, if (desc != null) {
this.flushSize = desc.getMemStoreFlushSize();
}
if (this.flushSize <= 0) {
this.flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE); HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
} }
@Override @Override

View File

@ -78,4 +78,26 @@ public class TestHTableDescriptor {
desc.remove(key); desc.remove(key);
assertEquals(null, desc.getValue(key)); assertEquals(null, desc.getValue(key));
} }
/**
* Test default value handling for maxFileSize
*/
@Test
public void testGetMaxFileSize() {
HTableDescriptor desc = new HTableDescriptor("table");
assertEquals(-1, desc.getMaxFileSize());
desc.setMaxFileSize(1111L);
assertEquals(1111L, desc.getMaxFileSize());
}
/**
* Test default value handling for memStoreFlushSize
*/
@Test
public void testGetMemStoreFlushSize() {
HTableDescriptor desc = new HTableDescriptor("table");
assertEquals(-1, desc.getMemStoreFlushSize());
desc.setMemStoreFlushSize(1111L);
assertEquals(1111L, desc.getMemStoreFlushSize());
}
} }

View File

@ -372,6 +372,9 @@ public class TestAdmin {
assertTrue(htd.equals(copy)); assertTrue(htd.equals(copy));
// Now amend the copy. Introduce differences. // Now amend the copy. Introduce differences.
long newFlushSize = htd.getMemStoreFlushSize() / 2; long newFlushSize = htd.getMemStoreFlushSize() / 2;
if (newFlushSize <=0) {
newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
}
copy.setMemStoreFlushSize(newFlushSize); copy.setMemStoreFlushSize(newFlushSize);
final String key = "anyoldkey"; final String key = "anyoldkey";
assertTrue(htd.getValue(key) == null); assertTrue(htd.getValue(key) == null);
@ -460,6 +463,9 @@ public class TestAdmin {
assertTrue(htd.equals(copy)); assertTrue(htd.equals(copy));
// Now amend the copy. Introduce differences. // Now amend the copy. Introduce differences.
long newFlushSize = htd.getMemStoreFlushSize() / 2; long newFlushSize = htd.getMemStoreFlushSize() / 2;
if (newFlushSize <=0) {
newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
}
copy.setMemStoreFlushSize(newFlushSize); copy.setMemStoreFlushSize(newFlushSize);
final String key = "anyoldkey"; final String key = "anyoldkey";
assertTrue(htd.getValue(key) == null); assertTrue(htd.getValue(key) == null);