HBASE-7412 Fix how HTableDescriptor handles default max file size and flush size

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1426657 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
jxiang 2012-12-28 23:12:12 +00:00
parent 5807515e54
commit 0c504ad79f
6 changed files with 63 additions and 24 deletions

View File

@ -645,15 +645,16 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
* split is triggered. The region size is represented by the size of the biggest
* store file in that region.
*
* @return max hregion size for table
* @return max hregion size for table, -1 if not set.
*
* @see #setMaxFileSize(long)
*/
public long getMaxFileSize() {
byte [] value = getValue(MAX_FILESIZE_KEY);
if (value != null)
return Long.valueOf(Bytes.toString(value)).longValue();
return HConstants.DEFAULT_MAX_FILE_SIZE;
if (value != null) {
return Long.parseLong(Bytes.toString(value));
}
return -1;
}
/**
@ -678,15 +679,16 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* Returns the size of the memstore after which a flush to filesystem is triggered.
*
* @return memory cache flush size for each hregion
* @return memory cache flush size for each hregion, -1 if not set.
*
* @see #setMemStoreFlushSize(long)
*/
public long getMemStoreFlushSize() {
byte [] value = getValue(MEMSTORE_FLUSHSIZE_KEY);
if (value != null)
return Long.valueOf(Bytes.toString(value)).longValue();
return DEFAULT_MEMSTORE_FLUSH_SIZE;
if (value != null) {
return Long.parseLong(Bytes.toString(value));
}
return -1;
}
/**

View File

@ -18,7 +18,9 @@
package org.apache.hadoop.hbase.regionserver;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
/**
* A {@link RegionSplitPolicy} implementation which splits a region
@ -36,14 +38,15 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy {
@Override
protected void configureForRegion(HRegion region) {
super.configureForRegion(region);
long maxFileSize = region.getTableDesc().getMaxFileSize();
// By default we split region if a file > HConstants.DEFAULT_MAX_FILE_SIZE.
if (maxFileSize == HConstants.DEFAULT_MAX_FILE_SIZE) {
maxFileSize = getConf().getLong(HConstants.HREGION_MAX_FILESIZE,
Configuration conf = getConf();
HTableDescriptor desc = region.getTableDesc();
if (desc != null) {
this.desiredMaxFileSize = desc.getMaxFileSize();
}
if (this.desiredMaxFileSize <= 0) {
this.desiredMaxFileSize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
HConstants.DEFAULT_MAX_FILE_SIZE);
}
this.desiredMaxFileSize = maxFileSize;
}
@Override

View File

@ -514,9 +514,9 @@ public class HRegion implements HeapSize { // , Writable{
if (this.htableDescriptor == null) return;
long flushSize = this.htableDescriptor.getMemStoreFlushSize();
if (flushSize == HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE) {
if (flushSize <= 0) {
flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
this.memstoreFlushSize = flushSize;
this.blockingMemStoreSize = this.memstoreFlushSize *

View File

@ -22,6 +22,7 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.util.Bytes;
@ -45,10 +46,15 @@ extends ConstantSizeRegionSplitPolicy {
@Override
protected void configureForRegion(HRegion region) {
super.configureForRegion(region);
this.flushSize = region.getTableDesc() != null?
region.getTableDesc().getMemStoreFlushSize():
getConf().getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
Configuration conf = getConf();
HTableDescriptor desc = region.getTableDesc();
if (desc != null) {
this.flushSize = desc.getMemStoreFlushSize();
}
if (this.flushSize <= 0) {
this.flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE);
}
}
@Override

View File

@ -78,4 +78,26 @@ public class TestHTableDescriptor {
desc.remove(key);
assertEquals(null, desc.getValue(key));
}
/**
* Test default value handling for maxFileSize
*/
@Test
public void testGetMaxFileSize() {
HTableDescriptor desc = new HTableDescriptor("table");
assertEquals(-1, desc.getMaxFileSize());
desc.setMaxFileSize(1111L);
assertEquals(1111L, desc.getMaxFileSize());
}
/**
* Test default value handling for memStoreFlushSize
*/
@Test
public void testGetMemStoreFlushSize() {
HTableDescriptor desc = new HTableDescriptor("table");
assertEquals(-1, desc.getMemStoreFlushSize());
desc.setMemStoreFlushSize(1111L);
assertEquals(1111L, desc.getMemStoreFlushSize());
}
}

View File

@ -372,6 +372,9 @@ public class TestAdmin {
assertTrue(htd.equals(copy));
// Now amend the copy. Introduce differences.
long newFlushSize = htd.getMemStoreFlushSize() / 2;
if (newFlushSize <=0) {
newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
}
copy.setMemStoreFlushSize(newFlushSize);
final String key = "anyoldkey";
assertTrue(htd.getValue(key) == null);
@ -460,6 +463,9 @@ public class TestAdmin {
assertTrue(htd.equals(copy));
// Now amend the copy. Introduce differences.
long newFlushSize = htd.getMemStoreFlushSize() / 2;
if (newFlushSize <=0) {
newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
}
copy.setMemStoreFlushSize(newFlushSize);
final String key = "anyoldkey";
assertTrue(htd.getValue(key) == null);