HBASE-8450 Update hbase-default.xml and general recommendations to better suit current hw, h2, experience, etc.

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1485561 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2013-05-23 04:11:12 +00:00
parent f6a5cabe6d
commit b5146ebf6e
19 changed files with 485 additions and 438 deletions

View File

@ -118,7 +118,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
/**
* Default number of versions of a record to keep.
*/
public static final int DEFAULT_VERSIONS = 3;
public static final int DEFAULT_VERSIONS = 1;
/**
* Default is not to keep a minimum of versions.
@ -151,7 +151,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
* is enabled.
*/
public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false;
/**
* Default setting for whether to cache index blocks on write if block
* caching is enabled.
@ -166,7 +166,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
/**
* Default setting for whether or not to use bloomfilters.
*/
public static final String DEFAULT_BLOOMFILTER = BloomType.NONE.toString();
public static final String DEFAULT_BLOOMFILTER = BloomType.ROW.toString();
/**
* Default setting for whether to cache bloom filter blocks on write if block
@ -543,7 +543,7 @@ public class HColumnDescriptor implements WritableComparable<HColumnDescriptor>
return Compression.Algorithm.valueOf(n.toUpperCase());
}
/** @return compression type being used for the column family for major
/** @return compression type being used for the column family for major
compression */
public Compression.Algorithm getCompactionCompression() {
String n = getValue(COMPRESSION_COMPACT);

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Writables;
@ -1287,6 +1288,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
.setInMemory(true)
.setBlocksize(8 * 1024)
.setScope(HConstants.REPLICATION_SCOPE_LOCAL)
// Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore.
.setBloomFilterType(BloomType.NONE)
});
static {

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
/**
@ -79,8 +78,9 @@ public class CompactionConfiguration {
throttlePoint = conf.getLong("hbase.regionserver.thread.compaction.throttle",
2 * maxFilesToCompact * storeConfigInfo.getMemstoreFlushSize());
shouldDeleteExpired = conf.getBoolean("hbase.store.delete.expired.storefile", true);
majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24);
majorCompactionJitter = conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.20F);
majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 1000*60*60*24*7);
// Make it 0.5 so jitter has us fall evenly either side of when the compaction should run
majorCompactionJitter = conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);
LOG.info("Compaction configuration " + this.toString());
}

View File

@ -1064,7 +1064,12 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
throws IOException {
HTableDescriptor desc = new HTableDescriptor(tableName);
for(byte[] family : families) {
desc.addFamily(new HColumnDescriptor(family));
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Disable blooms (they are on by default as of 0.95) but we disable them here because
// tests have hard coded counts of what to expect in block cache, etc., and blooms being
// on is interfering.
hcd.setBloomFilterType(BloomType.NONE);
desc.addFamily(hcd);
}
getHBaseAdmin().createTable(desc);
return new HTable(c, tableName);
@ -1118,8 +1123,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
throws IOException {
HTableDescriptor desc = new HTableDescriptor(tableName);
for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family)
.setMaxVersions(numVersions);
HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
desc.addFamily(hcd);
}
getHBaseAdmin().createTable(desc);

View File

@ -95,7 +95,9 @@ public class TestMultiVersions {
@Test
public void testTimestamps() throws Exception {
HTableDescriptor desc = new HTableDescriptor("testTimestamps");
desc.addFamily(new HColumnDescriptor(TimestampTestBase.FAMILY_NAME));
HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME);
hcd.setMaxVersions(3);
desc.addFamily(hcd);
this.admin.createTable(desc);
HTable table = new HTable(UTIL.getConfiguration(), desc.getName());
// TODO: Remove these deprecated classes or pull them in here if this is
@ -134,7 +136,9 @@ public class TestMultiVersions {
final long timestamp1 = 100L;
final long timestamp2 = 200L;
final HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(contents));
HColumnDescriptor hcd = new HColumnDescriptor(contents);
hcd.setMaxVersions(3);
desc.addFamily(hcd);
this.admin.createTable(desc);
Put put = new Put(row, timestamp1);
put.add(contents, contents, value1);

View File

@ -171,7 +171,7 @@ public class TestFromClientSide {
final byte[] T2 = Bytes.toBytes("T2");
final byte[] T3 = Bytes.toBytes("T3");
HColumnDescriptor hcd = new HColumnDescriptor(FAMILY)
.setKeepDeletedCells(true);
.setKeepDeletedCells(true).setMaxVersions(3);
HTableDescriptor desc = new HTableDescriptor(TABLENAME);
desc.addFamily(hcd);
@ -1730,7 +1730,7 @@ public class TestFromClientSide {
byte [][] VALUES = makeN(VALUE, 5);
long [] ts = {1000, 2000, 3000, 4000, 5000};
HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES);
HTable ht = TEST_UTIL.createTable(TABLE, FAMILIES, 3);
Put put = new Put(ROW);
put.add(FAMILIES[0], QUALIFIER, ts[0], VALUES[0]);
@ -4459,7 +4459,7 @@ public class TestFromClientSide {
conf.setInt(HConstants.HBASE_CLIENT_IPC_POOL_SIZE, poolSize);
final HTable table = TEST_UTIL.createTable(tableName,
new byte[][] { FAMILY }, conf);
new byte[][] { FAMILY }, conf, 3);
table.setAutoFlush(true);
final long ts = EnvironmentEdgeManager.currentTimeMillis();

View File

@ -47,7 +47,7 @@ public class TestColumnPrefixFilter {
public void testColumnPrefixFilter() throws IOException {
String family = "Family";
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family));
htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);
@ -109,7 +109,7 @@ public class TestColumnPrefixFilter {
public void testColumnPrefixFilterWithFilterList() throws IOException {
String family = "Family";
HTableDescriptor htd = new HTableDescriptor("TestColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family));
htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3));
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

View File

@ -71,8 +71,12 @@ public class TestDependentColumnFilter {
testVals = makeTestVals();
HTableDescriptor htd = new HTableDescriptor(this.getClass().getName());
htd.addFamily(new HColumnDescriptor(FAMILIES[0]));
htd.addFamily(new HColumnDescriptor(FAMILIES[1]));
HColumnDescriptor hcd0 = new HColumnDescriptor(FAMILIES[0]);
hcd0.setMaxVersions(3);
htd.addFamily(hcd0);
HColumnDescriptor hcd1 = new HColumnDescriptor(FAMILIES[1]);
hcd1.setMaxVersions(3);
htd.addFamily(hcd1);
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
this.region = HRegion.createHRegion(info, TEST_UTIL.getDataTestDir(),
TEST_UTIL.getConfiguration(), htd);

View File

@ -47,7 +47,9 @@ public class TestMultipleColumnPrefixFilter {
public void testMultipleColumnPrefixFilter() throws IOException {
String family = "Family";
HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family));
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setMaxVersions(3);
htd.addFamily(hcd);
// HRegionInfo info = new HRegionInfo(htd, null, null, false);
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
@ -109,8 +111,12 @@ public class TestMultipleColumnPrefixFilter {
String family1 = "Family1";
String family2 = "Family2";
HTableDescriptor htd = new HTableDescriptor("TestMultipleColumnPrefixFilter");
htd.addFamily(new HColumnDescriptor(family1));
htd.addFamily(new HColumnDescriptor(family2));
HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
hcd1.setMaxVersions(3);
htd.addFamily(hcd1);
HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
hcd2.setMaxVersions(3);
htd.addFamily(hcd2);
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
HRegion region = HRegion.createHRegion(info, TEST_UTIL.
getDataTestDir(), TEST_UTIL.getConfiguration(), htd);

View File

@ -27,6 +27,7 @@ import java.util.Map;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.client.Get;
@ -34,9 +35,9 @@ import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.MultiThreadedWriter;
import org.apache.hadoop.hbase.util.Strings;
import org.apache.hadoop.hbase.util.test.LoadTestKVGenerator;
import org.junit.Test;
@ -91,17 +92,16 @@ public class TestEncodedSeekers {
@Test
public void testEncodedSeeker() throws IOException {
System.err.println("Testing encoded seekers for encoding " + encoding);
LruBlockCache cache = (LruBlockCache)
new CacheConfig(testUtil.getConfiguration()).getBlockCache();
LruBlockCache cache =
(LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
cache.clearCache();
HRegion region = testUtil.createTestRegion(
TABLE_NAME, new HColumnDescriptor(CF_NAME)
.setMaxVersions(MAX_VERSIONS)
.setDataBlockEncoding(encoding)
.setEncodeOnDisk(encodeOnDisk)
.setBlocksize(BLOCK_SIZE)
);
// Need to disable default row bloom filter for this test to pass.
HColumnDescriptor hcd = (new HColumnDescriptor(CF_NAME)).setMaxVersions(MAX_VERSIONS).
setDataBlockEncoding(encoding).
setEncodeOnDisk(encodeOnDisk).
setBlocksize(BLOCK_SIZE).
setBloomFilterType(BloomType.NONE);
HRegion region = testUtil.createTestRegion(TABLE_NAME, hcd);
//write the data, but leave some in the memstore
doPuts(region);

View File

@ -133,7 +133,7 @@ public class TestImportExport {
@Test
public void testSimpleCase() throws Exception {
String EXPORT_TABLE = "exportSimpleCase";
HTable t = UTIL.createTable(Bytes.toBytes(EXPORT_TABLE), FAMILYA);
HTable t = UTIL.createTable(Bytes.toBytes(EXPORT_TABLE), FAMILYA, 3);
Put p = new Put(ROW1);
p.add(FAMILYA, QUAL, now, QUAL);
p.add(FAMILYA, QUAL, now+1, QUAL);
@ -153,7 +153,7 @@ public class TestImportExport {
assertTrue(runExport(args));
String IMPORT_TABLE = "importTableSimpleCase";
t = UTIL.createTable(Bytes.toBytes(IMPORT_TABLE), FAMILYB);
t = UTIL.createTable(Bytes.toBytes(IMPORT_TABLE), FAMILYB, 3);
args = new String[] {
"-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING,
IMPORT_TABLE,

View File

@ -56,6 +56,7 @@ public class TestColumnSeeking {
HColumnDescriptor hcd =
new HColumnDescriptor(familyBytes).setMaxVersions(1000);
hcd.setMaxVersions(3);
HTableDescriptor htd = new HTableDescriptor(table);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
@ -168,7 +169,9 @@ public class TestColumnSeeking {
String table = "TestSingleVersions";
HTableDescriptor htd = new HTableDescriptor(table);
htd.addFamily(new HColumnDescriptor(family));
HColumnDescriptor hcd = new HColumnDescriptor(family);
hcd.setMaxVersions(3);
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(Bytes.toBytes(table), null, null, false);
HRegion region =

View File

@ -4014,7 +4014,10 @@ public class TestHRegion extends HBaseTestCase {
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.setReadOnly(isReadOnly);
for(byte [] family : families) {
htd.addFamily(new HColumnDescriptor(family));
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Set default to be three versions.
hcd.setMaxVersions(Integer.MAX_VALUE);
htd.addFamily(hcd);
}
HRegionInfo info = new HRegionInfo(htd.getName(), startKey, stopKey, false);
Path path = new Path(DIR + callingMethod);

View File

@ -35,6 +35,8 @@ import org.junit.experimental.categories.Category;
@Category(MediumTests.class)
@SuppressWarnings("deprecation")
public class TestHRegionBusyWait extends TestHRegion {
// TODO: This subclass runs all the tests in TestHRegion as well as the test below which means
// all TestHRegion tests are run twice.
public TestHRegionBusyWait() {
conf.set("hbase.busy.wait.duration", "1000");
}
@ -87,4 +89,4 @@ public class TestHRegionBusyWait extends TestHRegion {
region = null;
}
}
}
}

View File

@ -143,6 +143,7 @@ public class TestSeekOptimizations {
new HColumnDescriptor(FAMILY)
.setCompressionType(comprAlgo)
.setBloomFilterType(bloomType)
.setMaxVersions(3)
);
// Delete the given timestamp and everything before.

View File

@ -120,6 +120,7 @@ public class TestReplicationBase {
HTableDescriptor table = new HTableDescriptor(tableName);
HColumnDescriptor fam = new HColumnDescriptor(famName);
fam.setMaxVersions(3);
fam.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);
table.addFamily(fam);
fam = new HColumnDescriptor(noRepfamName);

View File

@ -78,9 +78,9 @@ public class TestRemoteTable {
HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
if (!admin.tableExists(TABLE)) {
HTableDescriptor htd = new HTableDescriptor(TABLE);
htd.addFamily(new HColumnDescriptor(COLUMN_1));
htd.addFamily(new HColumnDescriptor(COLUMN_2));
htd.addFamily(new HColumnDescriptor(COLUMN_3));
htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
admin.createTable(htd);
HTable table = new HTable(TEST_UTIL.getConfiguration(), TABLE);
Put put = new Put(ROW_1);

View File

@ -76,9 +76,8 @@ public class TestThriftHBaseServiceHandler {
private static byte[] valueAname = Bytes.toBytes("valueA");
private static byte[] valueBname = Bytes.toBytes("valueB");
private static HColumnDescriptor[] families = new HColumnDescriptor[] {
new HColumnDescriptor(familyAname),
new HColumnDescriptor(familyBname)
.setMaxVersions(2)
new HColumnDescriptor(familyAname).setMaxVersions(3),
new HColumnDescriptor(familyBname).setMaxVersions(2)
};