HBASE-17294: External configuration for memory compaction

Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
eshcar 2016-12-13 23:54:12 +02:00 committed by Michael Stack
parent de98f68408
commit a9310436d5
25 changed files with 205 additions and 92 deletions

View File

@ -66,6 +66,32 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION";
/**
* Enum describing all possible memory compaction policies
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum MemoryCompaction {
/**
* No memory compaction, when size threshold is exceeded data is flushed to disk
*/
NONE,
/**
* Basic policy applies optimizations which modify the index to a more compacted representation.
* This is beneficial in all access patterns. The smaller the cells are the greater the
* benefit of this policy.
* This is the default policy.
*/
BASIC,
/**
* In addition to compacting the index representation as the basic policy, eager policy
* eliminates duplication while the data is still in memory (much like the
* on-disk compaction does after the data is flushed to disk). This policy is most useful for
* applications with high data churn or small working sets.
*/
EAGER
}
// These constants are used as FileInfo keys // These constants are used as FileInfo keys
public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION = "COMPRESSION";
public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
@ -173,11 +199,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
*/ */
public static final boolean DEFAULT_IN_MEMORY = false; public static final boolean DEFAULT_IN_MEMORY = false;
/**
* Default setting for whether to set the memstore of this column family as compacting or not.
*/
public static final boolean DEFAULT_IN_MEMORY_COMPACTION = false;
/** /**
* Default setting for preventing deleted from being collected immediately. * Default setting for preventing deleted from being collected immediately.
*/ */
@ -263,7 +284,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
DEFAULT_VALUES.put(IN_MEMORY_COMPACTION, String.valueOf(DEFAULT_IN_MEMORY_COMPACTION));
DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
@ -329,7 +349,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
setMinVersions(DEFAULT_MIN_VERSIONS); setMinVersions(DEFAULT_MIN_VERSIONS);
setKeepDeletedCells(DEFAULT_KEEP_DELETED); setKeepDeletedCells(DEFAULT_KEEP_DELETED);
setInMemory(DEFAULT_IN_MEMORY); setInMemory(DEFAULT_IN_MEMORY);
setInMemoryCompaction(DEFAULT_IN_MEMORY_COMPACTION);
setBlockCacheEnabled(DEFAULT_BLOCKCACHE); setBlockCacheEnabled(DEFAULT_BLOCKCACHE);
setTimeToLive(DEFAULT_TTL); setTimeToLive(DEFAULT_TTL);
setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT))); setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT)));
@ -688,24 +707,24 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
} }
/** /**
* @return True if we prefer to keep the in-memory data compacted * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for
* for this column family * for this column family
*/ */
public boolean isInMemoryCompaction() { public MemoryCompaction getInMemoryCompaction() {
String value = getValue(IN_MEMORY_COMPACTION); String value = getValue(IN_MEMORY_COMPACTION);
if (value != null) { if (value != null) {
return Boolean.parseBoolean(value); return MemoryCompaction.valueOf(value);
} }
return DEFAULT_IN_MEMORY_COMPACTION; return null;
} }
/** /**
* @param inMemoryCompaction True if we prefer to keep the in-memory data compacted * @param inMemoryCompaction the prefered in-memory compaction policy
* for this column family * for this column family
* @return this (for chained invocation) * @return this (for chained invocation)
*/ */
public HColumnDescriptor setInMemoryCompaction(boolean inMemoryCompaction) { public HColumnDescriptor setInMemoryCompaction(MemoryCompaction inMemoryCompaction) {
return setValue(IN_MEMORY_COMPACTION, Boolean.toString(inMemoryCompaction)); return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString());
} }
public KeepDeletedCells getKeepDeletedCells() { public KeepDeletedCells getKeepDeletedCells() {

View File

@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
@ -51,6 +52,11 @@ import org.apache.hadoop.hbase.wal.WAL;
@InterfaceAudience.Private @InterfaceAudience.Private
public class CompactingMemStore extends AbstractMemStore { public class CompactingMemStore extends AbstractMemStore {
// The external setting of the compacting MemStore behaviour
public static final String COMPACTING_MEMSTORE_TYPE_KEY =
"hbase.hregion.compacting.memstore.type";
public static final String COMPACTING_MEMSTORE_TYPE_DEFAULT =
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC);
// Default fraction of in-memory-flush size w.r.t. flush-to-disk size // Default fraction of in-memory-flush size w.r.t. flush-to-disk size
public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY = public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY =
"hbase.memstore.inmemoryflush.threshold.factor"; "hbase.memstore.inmemoryflush.threshold.factor";
@ -75,12 +81,13 @@ public class CompactingMemStore extends AbstractMemStore {
+ CompactionPipeline.DEEP_OVERHEAD + MemStoreCompactor.DEEP_OVERHEAD; + CompactionPipeline.DEEP_OVERHEAD + MemStoreCompactor.DEEP_OVERHEAD;
public CompactingMemStore(Configuration conf, CellComparator c, public CompactingMemStore(Configuration conf, CellComparator c,
HStore store, RegionServicesForStores regionServices) throws IOException { HStore store, RegionServicesForStores regionServices,
HColumnDescriptor.MemoryCompaction compactionPolicy) throws IOException {
super(conf, c); super(conf, c);
this.store = store; this.store = store;
this.regionServices = regionServices; this.regionServices = regionServices;
this.pipeline = new CompactionPipeline(getRegionServices()); this.pipeline = new CompactionPipeline(getRegionServices());
this.compactor = new MemStoreCompactor(this); this.compactor = new MemStoreCompactor(this, compactionPolicy);
initInmemoryFlushSize(conf); initInmemoryFlushSize(conf);
} }
@ -416,8 +423,8 @@ public class CompactingMemStore extends AbstractMemStore {
} }
@VisibleForTesting @VisibleForTesting
void initiateType() { void initiateType(HColumnDescriptor.MemoryCompaction compactionType) {
compactor.initiateAction(); compactor.initiateAction(compactionType);
} }
/** /**

View File

@ -241,12 +241,22 @@ public class HStore implements Store {
// to clone it? // to clone it?
scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator);
String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName());
if (family.isInMemoryCompaction()) { HColumnDescriptor.MemoryCompaction inMemoryCompaction = family.getInMemoryCompaction();
className = CompactingMemStore.class.getName(); if(inMemoryCompaction == null) {
this.memstore = new CompactingMemStore(conf, this.comparator, this, inMemoryCompaction = HColumnDescriptor.MemoryCompaction.valueOf(conf.get
this.getHRegion().getRegionServicesForStores()); (CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
} else { CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT));
this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { }
switch (inMemoryCompaction) {
case BASIC :
case EAGER :
className = CompactingMemStore.class.getName();
this.memstore = new CompactingMemStore(conf, this.comparator, this,
this.getHRegion().getRegionServicesForStores(), inMemoryCompaction);
break;
case NONE :
default:
this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator }); Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator });
} }
LOG.info("Memstore class name is " + className); LOG.info("Memstore class name is " + className);

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
@ -53,15 +54,6 @@ public class MemStoreCompactor {
+ ClassSize.ATOMIC_BOOLEAN // isInterrupted (the internals) + ClassSize.ATOMIC_BOOLEAN // isInterrupted (the internals)
); );
// Configuration options for MemStore compaction
static final String INDEX_COMPACTION_CONFIG = "index-compaction";
static final String DATA_COMPACTION_CONFIG = "data-compaction";
// The external setting of the compacting MemStore behaviour
// Compaction of the index without the data is the default
static final String COMPACTING_MEMSTORE_TYPE_KEY = "hbase.hregion.compacting.memstore.type";
static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = INDEX_COMPACTION_CONFIG;
// The upper bound for the number of segments we store in the pipeline prior to merging. // The upper bound for the number of segments we store in the pipeline prior to merging.
// This constant is subject to further experimentation. // This constant is subject to further experimentation.
private static final int THRESHOLD_PIPELINE_SEGMENTS = 1; private static final int THRESHOLD_PIPELINE_SEGMENTS = 1;
@ -93,11 +85,12 @@ public class MemStoreCompactor {
private Action action = Action.FLATTEN; private Action action = Action.FLATTEN;
public MemStoreCompactor(CompactingMemStore compactingMemStore) { public MemStoreCompactor(CompactingMemStore compactingMemStore,
MemoryCompaction compactionPolicy) {
this.compactingMemStore = compactingMemStore; this.compactingMemStore = compactingMemStore;
this.compactionKVMax = compactingMemStore.getConfiguration() this.compactionKVMax = compactingMemStore.getConfiguration()
.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); .getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
initiateAction(); initiateAction(compactionPolicy);
} }
/**---------------------------------------------------------------------- /**----------------------------------------------------------------------
@ -277,17 +270,17 @@ public class MemStoreCompactor {
* Initiate the action according to user config, after its default is Action.MERGE * Initiate the action according to user config, after its default is Action.MERGE
*/ */
@VisibleForTesting @VisibleForTesting
void initiateAction() { void initiateAction(MemoryCompaction compType) {
String memStoreType = compactingMemStore.getConfiguration().get(COMPACTING_MEMSTORE_TYPE_KEY,
COMPACTING_MEMSTORE_TYPE_DEFAULT);
switch (memStoreType) { switch (compType){
case INDEX_COMPACTION_CONFIG: action = Action.MERGE; case NONE: action = Action.NOOP;
break; break;
case DATA_COMPACTION_CONFIG: action = Action.COMPACT; case BASIC: action = Action.MERGE;
break;
case EAGER: action = Action.COMPACT;
break; break;
default: default:
throw new RuntimeException("Unknown memstore type " + memStoreType); // sanity check throw new RuntimeException("Unknown memstore type " + compType); // sanity check
} }
} }
} }

View File

@ -1881,9 +1881,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
for (byte[] family : families) { for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family); HColumnDescriptor hcd = new HColumnDescriptor(family);
if(compactedMemStore != null && i < compactedMemStore.length) { if(compactedMemStore != null && i < compactedMemStore.length) {
hcd.setInMemoryCompaction(true); hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.BASIC);
} else { } else {
hcd.setInMemoryCompaction(false); hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE);
} }
i++; i++;

View File

@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.RandomDistribution; import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration; import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.util.*; import org.apache.hadoop.hbase.util.*;
@ -375,9 +376,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
if (opts.inMemoryCF) { if (opts.inMemoryCF) {
family.setInMemory(true); family.setInMemory(true);
} }
if(opts.inMemoryCompaction) { family.setInMemoryCompaction(opts.inMemoryCompaction);
family.setInMemoryCompaction(true);
}
desc.addFamily(family); desc.addFamily(family);
if (opts.replicas != DEFAULT_OPTS.replicas) { if (opts.replicas != DEFAULT_OPTS.replicas) {
desc.setRegionReplication(opts.replicas); desc.setRegionReplication(opts.replicas);
@ -636,7 +635,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
int columns = 1; int columns = 1;
int caching = 30; int caching = 30;
boolean addColumns = true; boolean addColumns = true;
boolean inMemoryCompaction = false; HColumnDescriptor.MemoryCompaction inMemoryCompaction =
HColumnDescriptor.MemoryCompaction.valueOf(
CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT);
public TestOptions() {} public TestOptions() {}
@ -981,11 +982,11 @@ public class PerformanceEvaluation extends Configured implements Tool {
this.addColumns = addColumns; this.addColumns = addColumns;
} }
public void setInMemoryCompaction(boolean inMemoryCompaction) { public void setInMemoryCompaction(HColumnDescriptor.MemoryCompaction inMemoryCompaction) {
this.inMemoryCompaction = inMemoryCompaction; this.inMemoryCompaction = inMemoryCompaction;
} }
public boolean getInMemoryCompaction() { public HColumnDescriptor.MemoryCompaction getInMemoryCompaction() {
return this.inMemoryCompaction; return this.inMemoryCompaction;
} }
} }
@ -2139,7 +2140,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
final String inMemoryCompaction = "--inmemoryCompaction="; final String inMemoryCompaction = "--inmemoryCompaction=";
if (cmd.startsWith(inMemoryCompaction)) { if (cmd.startsWith(inMemoryCompaction)) {
opts.inMemoryCompaction = Boolean.parseBoolean(cmd.substring(inMemoryCompaction.length())); opts.inMemoryCompaction = opts.inMemoryCompaction.valueOf(cmd.substring
(inMemoryCompaction.length()));
continue; continue;
} }

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.FlakeyTests;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -100,6 +101,8 @@ public class TestAcidGuarantees implements Tool {
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
conf.setInt("hfile.format.version", 3); // for mob tests conf.setInt("hfile.format.version", 3); // for mob tests
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
util = new HBaseTestingUtility(conf); util = new HBaseTestingUtility(conf);
} }

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
@ -249,6 +250,8 @@ public class TestIOFencing {
c.setLong("hbase.hstore.blockingStoreFiles", 1000); c.setLong("hbase.hstore.blockingStoreFiles", 1000);
// Compact quickly after we tell it to! // Compact quickly after we tell it to!
c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000); c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
LOG.info("Starting mini cluster"); LOG.info("Starting mini cluster");
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
CompactionBlockerRegion compactingRegion = null; CompactionBlockerRegion compactingRegion = null;

View File

@ -35,11 +35,13 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
@ -92,6 +94,9 @@ public class TestHFileArchiving {
// prevent aggressive region split // prevent aggressive region split
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
// no memory compaction
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
} }
@After @After

View File

@ -20,7 +20,9 @@ package org.apache.hadoop.hbase.client;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
@ -50,6 +52,8 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
protected static void setupConf(Configuration conf) { protected static void setupConf(Configuration conf) {
TestSnapshotFromClient.setupConf(conf); TestSnapshotFromClient.setupConf(conf);
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0); conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
} }
@Override @Override

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
@ -117,6 +118,8 @@ public class TestSnapshotCloneIndependence {
// will even trigger races between creating the directory containing back references and // will even trigger races between creating the directory containing back references and
// the back reference itself. // the back reference itself.
conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL); conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
} }
@Before @Before

View File

@ -37,6 +37,7 @@ import java.util.concurrent.Future;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.ChoreService;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -62,6 +64,7 @@ import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.junit.After; import org.junit.After;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
@ -96,6 +99,12 @@ public class TestTableLockManager {
TEST_UTIL.startMiniZKCluster(1); TEST_UTIL.startMiniZKCluster(1);
} }
@Before
public void setUp() throws IOException {
TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
}
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
TEST_UTIL.shutdownMiniCluster(); TEST_UTIL.shutdownMiniCluster();

View File

@ -28,11 +28,13 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure; import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure;
import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
@ -55,6 +57,8 @@ public class TestMasterProcedureSchedulerConcurrency {
@Before @Before
public void setUp() throws IOException { public void setUp() throws IOException {
conf = HBaseConfiguration.create(); conf = HBaseConfiguration.create();
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager()); queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager());
queue.start(); queue.start();
} }

View File

@ -82,7 +82,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void setUp() throws Exception { public void setUp() throws Exception {
compactingSetUp(); compactingSetUp();
this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR,
store, regionServicesForStores); store, regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER);
} }
protected void compactingSetUp() throws Exception { protected void compactingSetUp() throws Exception {
@ -135,7 +135,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
// use case 3: first in snapshot second in kvset // use case 3: first in snapshot second in kvset
this.memstore = new CompactingMemStore(HBaseConfiguration.create(), this.memstore = new CompactingMemStore(HBaseConfiguration.create(),
CellComparator.COMPARATOR, store, regionServicesForStores); CellComparator.COMPARATOR, store, regionServicesForStores,
HColumnDescriptor.MemoryCompaction.EAGER);
this.memstore.add(kv1.clone(), null); this.memstore.add(kv1.clone(), null);
// As compaction is starting in the background the repetition // As compaction is starting in the background the repetition
// of the k1 might be removed BUT the scanners created earlier // of the k1 might be removed BUT the scanners created earlier
@ -468,8 +469,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
throws IOException { throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
((CompactingMemStore)memstore).initiateType(); memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType);
byte[] row = Bytes.toBytes("testrow"); byte[] row = Bytes.toBytes("testrow");
byte[] fam = Bytes.toBytes("testfamily"); byte[] fam = Bytes.toBytes("testfamily");
@ -549,8 +552,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void testCompaction1Bucket() throws IOException { public void testCompaction1Bucket() throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
((CompactingMemStore)memstore).initiateType(); memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType);
String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4
@ -584,8 +589,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void testCompaction2Buckets() throws IOException { public void testCompaction2Buckets() throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
((CompactingMemStore)memstore).initiateType(); memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType);
String[] keys1 = { "A", "A", "B", "C" }; String[] keys1 = { "A", "A", "B", "C" };
String[] keys2 = { "A", "B", "D" }; String[] keys2 = { "A", "B", "D" };
@ -637,8 +644,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void testCompaction3Buckets() throws IOException { public void testCompaction3Buckets() throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
((CompactingMemStore)memstore).initiateType(); memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType);
String[] keys1 = { "A", "A", "B", "C" }; String[] keys1 = { "A", "A", "B", "C" };
String[] keys2 = { "A", "B", "D" }; String[] keys2 = { "A", "B", "D" };
String[] keys3 = { "D", "B", "B" }; String[] keys3 = { "D", "B", "B" };

View File

@ -62,12 +62,13 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
compactingSetUp(); compactingSetUp();
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction
conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
this.memstore = this.memstore =
new CompactingMemStore(conf, CellComparator.COMPARATOR, store, new CompactingMemStore(conf, CellComparator.COMPARATOR, store,
regionServicesForStores); regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER);
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -266,8 +267,10 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
String[] keys2 = { "A", "B", "D", "G", "I", "J"}; String[] keys2 = { "A", "B", "D", "G", "I", "J"};
String[] keys3 = { "D", "B", "B", "E" }; String[] keys3 = { "D", "B", "B", "E" };
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "index-compaction"); HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.BASIC;
((CompactingMemStore)memstore).initiateType(); memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType);
addRowsByKeys(memstore, keys1); addRowsByKeys(memstore, keys1);
((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline should not compact ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline should not compact

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
@ -94,6 +95,8 @@ public class TestMajorCompaction {
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100); conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
secondRowBytes = START_KEY_BYTES.clone(); secondRowBytes = START_KEY_BYTES.clone();
// Increment the least significant character so we get to next row. // Increment the least significant character so we get to next row.

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL;
import org.junit.Before;
import org.junit.Ignore; import org.junit.Ignore;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
@ -82,6 +83,12 @@ public class TestPerColumnFamilyFlush {
public static final byte[] FAMILY3 = FAMILIES[2]; public static final byte[] FAMILY3 = FAMILIES[2];
@Before
public void setUp() throws IOException {
TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
}
private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
HTableDescriptor htd = new HTableDescriptor(TABLENAME); HTableDescriptor htd = new HTableDescriptor(TABLENAME);
for (byte[] family : FAMILIES) { for (byte[] family : FAMILIES) {
@ -128,7 +135,9 @@ public class TestPerColumnFamilyFlush {
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024);
conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName()); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName());
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
40 * 1024); 40 * 1024);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
// Intialize the region // Intialize the region
Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf); Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf);
// Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3

View File

@ -74,6 +74,8 @@ public class TestRecoveredEdits {
Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
// Set it so we flush every 1M or so. Thats a lot. // Set it so we flush every 1M or so. Thats a lot.
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
// The file of recovered edits has a column family of 'meta'. Also has an encoded regionname // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
// of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay. // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f"; final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";

View File

@ -71,7 +71,12 @@ public class TestWalAndCompactingMemStoreFlush {
for (byte[] family : FAMILIES) { for (byte[] family : FAMILIES) {
HColumnDescriptor hcd = new HColumnDescriptor(family); HColumnDescriptor hcd = new HColumnDescriptor(family);
// even column families are going to have compacted memstore // even column families are going to have compacted memstore
if(i%2 == 0) hcd.setInMemoryCompaction(true); if(i%2 == 0) {
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf(
conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY)));
} else {
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE);
}
htd.addFamily(hcd); htd.addFamily(hcd);
i++; i++;
} }
@ -123,7 +128,7 @@ public class TestWalAndCompactingMemStoreFlush {
} }
@Test(timeout = 180000) @Test(timeout = 180000)
public void testSelectiveFlushWithDataCompaction() throws IOException { public void testSelectiveFlushWithEager() throws IOException {
// Set up the configuration // Set up the configuration
Configuration conf = HBaseConfiguration.create(); Configuration conf = HBaseConfiguration.create();
@ -133,10 +138,11 @@ public class TestWalAndCompactingMemStoreFlush {
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024);
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25);
// set memstore to do data compaction // set memstore to do data compaction
conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
// Intialize the region // Intialize the region
Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf); Region region = initHRegion("testSelectiveFlushWithEager", conf);
// Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
for (int i = 1; i <= 1200; i++) { for (int i = 1; i <= 1200; i++) {
@ -368,7 +374,8 @@ public class TestWalAndCompactingMemStoreFlush {
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024);
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to index-compaction // set memstore to index-compaction
conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC));
// Initialize the region // Initialize the region
Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf); Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf);
@ -621,7 +628,8 @@ public class TestWalAndCompactingMemStoreFlush {
1024); 1024);
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
// Intialize the HRegion // Intialize the HRegion
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
@ -751,7 +759,8 @@ public class TestWalAndCompactingMemStoreFlush {
200 * 1024); 200 * 1024);
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC));
// Intialize the HRegion // Intialize the HRegion
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
@ -874,7 +883,8 @@ public class TestWalAndCompactingMemStoreFlush {
200 * 1024); 200 * 1024);
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC));
// Successfully initialize the HRegion // Successfully initialize the HRegion
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.regionserver.Store;
@ -92,27 +93,30 @@ public abstract class AbstractTestLogRolling {
/**** configuration for testLogRolling ****/ /**** configuration for testLogRolling ****/
// Force a region split after every 768KB // Force a region split after every 768KB
TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L); Configuration conf= TEST_UTIL.getConfiguration();
conf.setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L);
// We roll the log after every 32 writes // We roll the log after every 32 writes
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32); conf.setInt("hbase.regionserver.maxlogentries", 32);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.logroll.errors.tolerated", 2); conf.setInt("hbase.regionserver.logroll.errors.tolerated", 2);
TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000); conf.setInt("hbase.rpc.timeout", 10 * 1000);
// For less frequently updated regions flush after every 2 flushes // For less frequently updated regions flush after every 2 flushes
TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2); conf.setInt("hbase.hregion.memstore.optionalflushcount", 2);
// We flush the cache after every 8192 bytes // We flush the cache after every 8192 bytes
TEST_UTIL.getConfiguration().setInt( conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192);
HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192);
// Increase the amount of time between client retries // Increase the amount of time between client retries
TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 10 * 1000); conf.setLong("hbase.client.pause", 10 * 1000);
// Reduce thread wake frequency so that other threads can get // Reduce thread wake frequency so that other threads can get
// a chance to run. // a chance to run.
TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
} }
@Before @Before

View File

@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher; import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
@ -138,6 +139,8 @@ public abstract class AbstractTestWALReplay {
Configuration conf = TEST_UTIL.getConfiguration(); Configuration conf = TEST_UTIL.getConfiguration();
// The below config supported by 0.20-append and CDH3b2 // The below config supported by 0.20-append and CDH3b2
conf.setInt("dfs.client.block.recovery.retries", 2); conf.setInt("dfs.client.block.recovery.retries", 2);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
TEST_UTIL.startMiniCluster(3); TEST_UTIL.startMiniCluster(3);
Path hbaseRootDir = Path hbaseRootDir =
TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));

View File

@ -22,8 +22,10 @@ import static org.junit.Assert.assertEquals;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;

View File

@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
@ -74,14 +75,15 @@ public class TestLogRolling extends AbstractTestLogRolling {
/**** configuration for testLogRollOnDatanodeDeath ****/ /**** configuration for testLogRollOnDatanodeDeath ****/
// lower the namenode & datanode heartbeat so the namenode // lower the namenode & datanode heartbeat so the namenode
// quickly detects datanode failures // quickly detects datanode failures
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000); Configuration conf= TEST_UTIL.getConfiguration();
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1); conf.setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
conf.setInt("dfs.heartbeat.interval", 1);
// the namenode might still try to choose the recently-dead datanode // the namenode might still try to choose the recently-dead datanode
// for a pipeline, so try to a new pipeline multiple times // for a pipeline, so try to a new pipeline multiple times
TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30); conf.setInt("dfs.client.block.write.retries", 30);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2); conf.setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3); conf.setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3);
TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); conf.set(WALFactory.WAL_PROVIDER, "filesystem");
AbstractTestLogRolling.setUpBeforeClass(); AbstractTestLogRolling.setUpBeforeClass();
} }

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.CategoryBasedTimeout;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription; import org.apache.hadoop.hbase.client.SnapshotDescription;
@ -103,7 +105,9 @@ public class TestFlushSnapshotFromClient {
// Enable snapshot // Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
} }
@Before @Before

View File

@ -816,7 +816,7 @@ module Hbase
family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE)
family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
family.setInMemoryCompaction( family.setInMemoryCompaction(
JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)
family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)