HBASE-17294: External configuration for memory compaction
Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
parent
de98f68408
commit
a9310436d5
|
@ -66,6 +66,32 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
|
||||
public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION";
|
||||
|
||||
/**
|
||||
* Enum describing all possible memory compaction policies
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public enum MemoryCompaction {
|
||||
/**
|
||||
* No memory compaction, when size threshold is exceeded data is flushed to disk
|
||||
*/
|
||||
NONE,
|
||||
/**
|
||||
* Basic policy applies optimizations which modify the index to a more compacted representation.
|
||||
* This is beneficial in all access patterns. The smaller the cells are the greater the
|
||||
* benefit of this policy.
|
||||
* This is the default policy.
|
||||
*/
|
||||
BASIC,
|
||||
/**
|
||||
* In addition to compacting the index representation as the basic policy, eager policy
|
||||
* eliminates duplication while the data is still in memory (much like the
|
||||
* on-disk compaction does after the data is flushed to disk). This policy is most useful for
|
||||
* applications with high data churn or small working sets.
|
||||
*/
|
||||
EAGER
|
||||
}
|
||||
|
||||
// These constants are used as FileInfo keys
|
||||
public static final String COMPRESSION = "COMPRESSION";
|
||||
public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
|
||||
|
@ -173,11 +199,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
*/
|
||||
public static final boolean DEFAULT_IN_MEMORY = false;
|
||||
|
||||
/**
|
||||
* Default setting for whether to set the memstore of this column family as compacting or not.
|
||||
*/
|
||||
public static final boolean DEFAULT_IN_MEMORY_COMPACTION = false;
|
||||
|
||||
/**
|
||||
* Default setting for preventing deleted from being collected immediately.
|
||||
*/
|
||||
|
@ -263,7 +284,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL));
|
||||
DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE));
|
||||
DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY));
|
||||
DEFAULT_VALUES.put(IN_MEMORY_COMPACTION, String.valueOf(DEFAULT_IN_MEMORY_COMPACTION));
|
||||
DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE));
|
||||
DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED));
|
||||
DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING));
|
||||
|
@ -329,7 +349,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
setMinVersions(DEFAULT_MIN_VERSIONS);
|
||||
setKeepDeletedCells(DEFAULT_KEEP_DELETED);
|
||||
setInMemory(DEFAULT_IN_MEMORY);
|
||||
setInMemoryCompaction(DEFAULT_IN_MEMORY_COMPACTION);
|
||||
setBlockCacheEnabled(DEFAULT_BLOCKCACHE);
|
||||
setTimeToLive(DEFAULT_TTL);
|
||||
setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT)));
|
||||
|
@ -688,24 +707,24 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
|
|||
}
|
||||
|
||||
/**
|
||||
* @return True if we prefer to keep the in-memory data compacted
|
||||
* @return in-memory compaction policy if set for the cf. Returns null if no policy is set for
|
||||
* for this column family
|
||||
*/
|
||||
public boolean isInMemoryCompaction() {
|
||||
public MemoryCompaction getInMemoryCompaction() {
|
||||
String value = getValue(IN_MEMORY_COMPACTION);
|
||||
if (value != null) {
|
||||
return Boolean.parseBoolean(value);
|
||||
return MemoryCompaction.valueOf(value);
|
||||
}
|
||||
return DEFAULT_IN_MEMORY_COMPACTION;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param inMemoryCompaction True if we prefer to keep the in-memory data compacted
|
||||
* @param inMemoryCompaction the prefered in-memory compaction policy
|
||||
* for this column family
|
||||
* @return this (for chained invocation)
|
||||
*/
|
||||
public HColumnDescriptor setInMemoryCompaction(boolean inMemoryCompaction) {
|
||||
return setValue(IN_MEMORY_COMPACTION, Boolean.toString(inMemoryCompaction));
|
||||
public HColumnDescriptor setInMemoryCompaction(MemoryCompaction inMemoryCompaction) {
|
||||
return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString());
|
||||
}
|
||||
|
||||
public KeepDeletedCells getKeepDeletedCells() {
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellComparator;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
|
@ -51,6 +52,11 @@ import org.apache.hadoop.hbase.wal.WAL;
|
|||
@InterfaceAudience.Private
|
||||
public class CompactingMemStore extends AbstractMemStore {
|
||||
|
||||
// The external setting of the compacting MemStore behaviour
|
||||
public static final String COMPACTING_MEMSTORE_TYPE_KEY =
|
||||
"hbase.hregion.compacting.memstore.type";
|
||||
public static final String COMPACTING_MEMSTORE_TYPE_DEFAULT =
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC);
|
||||
// Default fraction of in-memory-flush size w.r.t. flush-to-disk size
|
||||
public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY =
|
||||
"hbase.memstore.inmemoryflush.threshold.factor";
|
||||
|
@ -75,12 +81,13 @@ public class CompactingMemStore extends AbstractMemStore {
|
|||
+ CompactionPipeline.DEEP_OVERHEAD + MemStoreCompactor.DEEP_OVERHEAD;
|
||||
|
||||
public CompactingMemStore(Configuration conf, CellComparator c,
|
||||
HStore store, RegionServicesForStores regionServices) throws IOException {
|
||||
HStore store, RegionServicesForStores regionServices,
|
||||
HColumnDescriptor.MemoryCompaction compactionPolicy) throws IOException {
|
||||
super(conf, c);
|
||||
this.store = store;
|
||||
this.regionServices = regionServices;
|
||||
this.pipeline = new CompactionPipeline(getRegionServices());
|
||||
this.compactor = new MemStoreCompactor(this);
|
||||
this.compactor = new MemStoreCompactor(this, compactionPolicy);
|
||||
initInmemoryFlushSize(conf);
|
||||
}
|
||||
|
||||
|
@ -416,8 +423,8 @@ public class CompactingMemStore extends AbstractMemStore {
|
|||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void initiateType() {
|
||||
compactor.initiateAction();
|
||||
void initiateType(HColumnDescriptor.MemoryCompaction compactionType) {
|
||||
compactor.initiateAction(compactionType);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -241,12 +241,22 @@ public class HStore implements Store {
|
|||
// to clone it?
|
||||
scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator);
|
||||
String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName());
|
||||
if (family.isInMemoryCompaction()) {
|
||||
className = CompactingMemStore.class.getName();
|
||||
this.memstore = new CompactingMemStore(conf, this.comparator, this,
|
||||
this.getHRegion().getRegionServicesForStores());
|
||||
} else {
|
||||
this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
|
||||
HColumnDescriptor.MemoryCompaction inMemoryCompaction = family.getInMemoryCompaction();
|
||||
if(inMemoryCompaction == null) {
|
||||
inMemoryCompaction = HColumnDescriptor.MemoryCompaction.valueOf(conf.get
|
||||
(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT));
|
||||
}
|
||||
switch (inMemoryCompaction) {
|
||||
case BASIC :
|
||||
case EAGER :
|
||||
className = CompactingMemStore.class.getName();
|
||||
this.memstore = new CompactingMemStore(conf, this.comparator, this,
|
||||
this.getHRegion().getRegionServicesForStores(), inMemoryCompaction);
|
||||
break;
|
||||
case NONE :
|
||||
default:
|
||||
this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] {
|
||||
Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator });
|
||||
}
|
||||
LOG.info("Memstore class name is " + className);
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -53,15 +54,6 @@ public class MemStoreCompactor {
|
|||
+ ClassSize.ATOMIC_BOOLEAN // isInterrupted (the internals)
|
||||
);
|
||||
|
||||
// Configuration options for MemStore compaction
|
||||
static final String INDEX_COMPACTION_CONFIG = "index-compaction";
|
||||
static final String DATA_COMPACTION_CONFIG = "data-compaction";
|
||||
|
||||
// The external setting of the compacting MemStore behaviour
|
||||
// Compaction of the index without the data is the default
|
||||
static final String COMPACTING_MEMSTORE_TYPE_KEY = "hbase.hregion.compacting.memstore.type";
|
||||
static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = INDEX_COMPACTION_CONFIG;
|
||||
|
||||
// The upper bound for the number of segments we store in the pipeline prior to merging.
|
||||
// This constant is subject to further experimentation.
|
||||
private static final int THRESHOLD_PIPELINE_SEGMENTS = 1;
|
||||
|
@ -93,11 +85,12 @@ public class MemStoreCompactor {
|
|||
|
||||
private Action action = Action.FLATTEN;
|
||||
|
||||
public MemStoreCompactor(CompactingMemStore compactingMemStore) {
|
||||
public MemStoreCompactor(CompactingMemStore compactingMemStore,
|
||||
MemoryCompaction compactionPolicy) {
|
||||
this.compactingMemStore = compactingMemStore;
|
||||
this.compactionKVMax = compactingMemStore.getConfiguration()
|
||||
.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
|
||||
initiateAction();
|
||||
initiateAction(compactionPolicy);
|
||||
}
|
||||
|
||||
/**----------------------------------------------------------------------
|
||||
|
@ -277,17 +270,17 @@ public class MemStoreCompactor {
|
|||
* Initiate the action according to user config, after its default is Action.MERGE
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void initiateAction() {
|
||||
String memStoreType = compactingMemStore.getConfiguration().get(COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
COMPACTING_MEMSTORE_TYPE_DEFAULT);
|
||||
void initiateAction(MemoryCompaction compType) {
|
||||
|
||||
switch (memStoreType) {
|
||||
case INDEX_COMPACTION_CONFIG: action = Action.MERGE;
|
||||
switch (compType){
|
||||
case NONE: action = Action.NOOP;
|
||||
break;
|
||||
case DATA_COMPACTION_CONFIG: action = Action.COMPACT;
|
||||
case BASIC: action = Action.MERGE;
|
||||
break;
|
||||
case EAGER: action = Action.COMPACT;
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Unknown memstore type " + memStoreType); // sanity check
|
||||
throw new RuntimeException("Unknown memstore type " + compType); // sanity check
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1881,9 +1881,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
|||
for (byte[] family : families) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
||||
if(compactedMemStore != null && i < compactedMemStore.length) {
|
||||
hcd.setInMemoryCompaction(true);
|
||||
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.BASIC);
|
||||
} else {
|
||||
hcd.setInMemoryCompaction(false);
|
||||
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE);
|
||||
|
||||
}
|
||||
i++;
|
||||
|
|
|
@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|||
import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
|
||||
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
|
||||
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
|
||||
import org.apache.hadoop.hbase.util.*;
|
||||
|
@ -375,9 +376,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
if (opts.inMemoryCF) {
|
||||
family.setInMemory(true);
|
||||
}
|
||||
if(opts.inMemoryCompaction) {
|
||||
family.setInMemoryCompaction(true);
|
||||
}
|
||||
family.setInMemoryCompaction(opts.inMemoryCompaction);
|
||||
desc.addFamily(family);
|
||||
if (opts.replicas != DEFAULT_OPTS.replicas) {
|
||||
desc.setRegionReplication(opts.replicas);
|
||||
|
@ -636,7 +635,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
int columns = 1;
|
||||
int caching = 30;
|
||||
boolean addColumns = true;
|
||||
boolean inMemoryCompaction = false;
|
||||
HColumnDescriptor.MemoryCompaction inMemoryCompaction =
|
||||
HColumnDescriptor.MemoryCompaction.valueOf(
|
||||
CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT);
|
||||
|
||||
public TestOptions() {}
|
||||
|
||||
|
@ -981,11 +982,11 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
this.addColumns = addColumns;
|
||||
}
|
||||
|
||||
public void setInMemoryCompaction(boolean inMemoryCompaction) {
|
||||
public void setInMemoryCompaction(HColumnDescriptor.MemoryCompaction inMemoryCompaction) {
|
||||
this.inMemoryCompaction = inMemoryCompaction;
|
||||
}
|
||||
|
||||
public boolean getInMemoryCompaction() {
|
||||
public HColumnDescriptor.MemoryCompaction getInMemoryCompaction() {
|
||||
return this.inMemoryCompaction;
|
||||
}
|
||||
}
|
||||
|
@ -2139,7 +2140,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
|||
|
||||
final String inMemoryCompaction = "--inmemoryCompaction=";
|
||||
if (cmd.startsWith(inMemoryCompaction)) {
|
||||
opts.inMemoryCompaction = Boolean.parseBoolean(cmd.substring(inMemoryCompaction.length()));
|
||||
opts.inMemoryCompaction = opts.inMemoryCompaction.valueOf(cmd.substring
|
||||
(inMemoryCompaction.length()));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Result;
|
|||
import org.apache.hadoop.hbase.client.ResultScanner;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
|
@ -100,6 +101,8 @@ public class TestAcidGuarantees implements Tool {
|
|||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
conf.setInt("hfile.format.version", 3); // for mob tests
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
util = new HBaseTestingUtility(conf);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
|
@ -249,6 +250,8 @@ public class TestIOFencing {
|
|||
c.setLong("hbase.hstore.blockingStoreFiles", 1000);
|
||||
// Compact quickly after we tell it to!
|
||||
c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
|
||||
c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
LOG.info("Starting mini cluster");
|
||||
TEST_UTIL.startMiniCluster(1);
|
||||
CompactionBlockerRegion compactingRegion = null;
|
||||
|
|
|
@ -35,11 +35,13 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hbase.ChoreService;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.Stoppable;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
|
@ -92,6 +94,9 @@ public class TestHFileArchiving {
|
|||
// prevent aggressive region split
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
// no memory compaction
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -20,7 +20,9 @@ package org.apache.hadoop.hbase.client;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.mob.MobConstants;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
|
@ -50,6 +52,8 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
|
|||
protected static void setupConf(Configuration conf) {
|
||||
TestSnapshotFromClient.setupConf(conf);
|
||||
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
|
@ -117,6 +118,8 @@ public class TestSnapshotCloneIndependence {
|
|||
// will even trigger races between creating the directory containing back references and
|
||||
// the back reference itself.
|
||||
conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
}
|
||||
|
||||
@Before
|
||||
|
|
|
@ -37,6 +37,7 @@ import java.util.concurrent.Future;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.ChoreService;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.client.Admin;
|
|||
import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
|
@ -62,6 +64,7 @@ import org.apache.hadoop.hbase.util.Threads;
|
|||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
|
@ -96,6 +99,12 @@ public class TestTableLockManager {
|
|||
TEST_UTIL.startMiniZKCluster(1);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
|
|
|
@ -28,11 +28,13 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.master.TableLockManager;
|
||||
import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure;
|
||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
|
||||
|
@ -55,6 +57,8 @@ public class TestMasterProcedureSchedulerConcurrency {
|
|||
@Before
|
||||
public void setUp() throws IOException {
|
||||
conf = HBaseConfiguration.create();
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager());
|
||||
queue.start();
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
public void setUp() throws Exception {
|
||||
compactingSetUp();
|
||||
this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR,
|
||||
store, regionServicesForStores);
|
||||
store, regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER);
|
||||
}
|
||||
|
||||
protected void compactingSetUp() throws Exception {
|
||||
|
@ -135,7 +135,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
|
||||
// use case 3: first in snapshot second in kvset
|
||||
this.memstore = new CompactingMemStore(HBaseConfiguration.create(),
|
||||
CellComparator.COMPARATOR, store, regionServicesForStores);
|
||||
CellComparator.COMPARATOR, store, regionServicesForStores,
|
||||
HColumnDescriptor.MemoryCompaction.EAGER);
|
||||
this.memstore.add(kv1.clone(), null);
|
||||
// As compaction is starting in the background the repetition
|
||||
// of the k1 might be removed BUT the scanners created earlier
|
||||
|
@ -468,8 +469,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
throws IOException {
|
||||
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction");
|
||||
((CompactingMemStore)memstore).initiateType();
|
||||
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
|
||||
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(compactionType));
|
||||
((CompactingMemStore)memstore).initiateType(compactionType);
|
||||
|
||||
byte[] row = Bytes.toBytes("testrow");
|
||||
byte[] fam = Bytes.toBytes("testfamily");
|
||||
|
@ -549,8 +552,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
public void testCompaction1Bucket() throws IOException {
|
||||
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction");
|
||||
((CompactingMemStore)memstore).initiateType();
|
||||
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
|
||||
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(compactionType));
|
||||
((CompactingMemStore)memstore).initiateType(compactionType);
|
||||
|
||||
String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4
|
||||
|
||||
|
@ -584,8 +589,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
public void testCompaction2Buckets() throws IOException {
|
||||
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction");
|
||||
((CompactingMemStore)memstore).initiateType();
|
||||
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
|
||||
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(compactionType));
|
||||
((CompactingMemStore)memstore).initiateType(compactionType);
|
||||
String[] keys1 = { "A", "A", "B", "C" };
|
||||
String[] keys2 = { "A", "B", "D" };
|
||||
|
||||
|
@ -637,8 +644,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
|
|||
public void testCompaction3Buckets() throws IOException {
|
||||
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction");
|
||||
((CompactingMemStore)memstore).initiateType();
|
||||
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER;
|
||||
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(compactionType));
|
||||
((CompactingMemStore)memstore).initiateType(compactionType);
|
||||
String[] keys1 = { "A", "A", "B", "C" };
|
||||
String[] keys2 = { "A", "B", "D" };
|
||||
String[] keys3 = { "D", "B", "B" };
|
||||
|
|
|
@ -62,12 +62,13 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
|
|||
compactingSetUp();
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
conf.set("hbase.hregion.compacting.memstore.type", "data-compaction");
|
||||
// set memstore to do data compaction
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
|
||||
|
||||
this.memstore =
|
||||
new CompactingMemStore(conf, CellComparator.COMPARATOR, store,
|
||||
regionServicesForStores);
|
||||
regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -266,8 +267,10 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
|
|||
String[] keys2 = { "A", "B", "D", "G", "I", "J"};
|
||||
String[] keys3 = { "D", "B", "B", "E" };
|
||||
|
||||
memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "index-compaction");
|
||||
((CompactingMemStore)memstore).initiateType();
|
||||
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.BASIC;
|
||||
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(compactionType));
|
||||
((CompactingMemStore)memstore).initiateType(compactionType);
|
||||
addRowsByKeys(memstore, keys1);
|
||||
|
||||
((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline should not compact
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.Cell;
|
|||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
|
@ -94,6 +95,8 @@ public class TestMajorCompaction {
|
|||
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
|
||||
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
|
||||
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
|
||||
secondRowBytes = START_KEY_BYTES.clone();
|
||||
// Increment the least significant character so we get to next row.
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
|
|||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
|
||||
import org.apache.hadoop.hbase.wal.WAL;
|
||||
import org.junit.Before;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -82,6 +83,12 @@ public class TestPerColumnFamilyFlush {
|
|||
|
||||
public static final byte[] FAMILY3 = FAMILIES[2];
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
}
|
||||
|
||||
private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
|
||||
for (byte[] family : FAMILIES) {
|
||||
|
@ -128,7 +135,9 @@ public class TestPerColumnFamilyFlush {
|
|||
conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024);
|
||||
conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName());
|
||||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
|
||||
40 * 1024);
|
||||
40 * 1024);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
// Intialize the region
|
||||
Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf);
|
||||
// Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
|
||||
|
|
|
@ -74,6 +74,8 @@ public class TestRecoveredEdits {
|
|||
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
|
||||
// Set it so we flush every 1M or so. Thats a lot.
|
||||
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
// The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
|
||||
// of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
|
||||
final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
|
||||
|
|
|
@ -71,7 +71,12 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
for (byte[] family : FAMILIES) {
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
||||
// even column families are going to have compacted memstore
|
||||
if(i%2 == 0) hcd.setInMemoryCompaction(true);
|
||||
if(i%2 == 0) {
|
||||
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf(
|
||||
conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY)));
|
||||
} else {
|
||||
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE);
|
||||
}
|
||||
htd.addFamily(hcd);
|
||||
i++;
|
||||
}
|
||||
|
@ -123,7 +128,7 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
}
|
||||
|
||||
@Test(timeout = 180000)
|
||||
public void testSelectiveFlushWithDataCompaction() throws IOException {
|
||||
public void testSelectiveFlushWithEager() throws IOException {
|
||||
|
||||
// Set up the configuration
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
|
@ -133,10 +138,11 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25);
|
||||
// set memstore to do data compaction
|
||||
conf.set("hbase.hregion.compacting.memstore.type", "data-compaction");
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
|
||||
|
||||
// Intialize the region
|
||||
Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf);
|
||||
Region region = initHRegion("testSelectiveFlushWithEager", conf);
|
||||
|
||||
// Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
|
||||
for (int i = 1; i <= 1200; i++) {
|
||||
|
@ -368,7 +374,8 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
|
||||
// set memstore to index-compaction
|
||||
conf.set("hbase.hregion.compacting.memstore.type", "index-compaction");
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC));
|
||||
|
||||
// Initialize the region
|
||||
Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf);
|
||||
|
@ -621,7 +628,8 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
1024);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
conf.set("hbase.hregion.compacting.memstore.type", "data-compaction");
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
|
||||
|
||||
// Intialize the HRegion
|
||||
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
|
||||
|
@ -751,7 +759,8 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
200 * 1024);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
conf.set("hbase.hregion.compacting.memstore.type", "index-compaction");
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC));
|
||||
|
||||
// Intialize the HRegion
|
||||
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
|
||||
|
@ -874,7 +883,8 @@ public class TestWalAndCompactingMemStoreFlush {
|
|||
200 * 1024);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
|
||||
// set memstore to do data compaction and not to use the speculative scan
|
||||
conf.set("hbase.hregion.compacting.memstore.type", "index-compaction");
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC));
|
||||
|
||||
// Successfully initialize the HRegion
|
||||
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Get;
|
|||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
|
@ -92,27 +93,30 @@ public abstract class AbstractTestLogRolling {
|
|||
|
||||
/**** configuration for testLogRolling ****/
|
||||
// Force a region split after every 768KB
|
||||
TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L);
|
||||
Configuration conf= TEST_UTIL.getConfiguration();
|
||||
conf.setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L);
|
||||
|
||||
// We roll the log after every 32 writes
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32);
|
||||
conf.setInt("hbase.regionserver.maxlogentries", 32);
|
||||
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.logroll.errors.tolerated", 2);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000);
|
||||
conf.setInt("hbase.regionserver.logroll.errors.tolerated", 2);
|
||||
conf.setInt("hbase.rpc.timeout", 10 * 1000);
|
||||
|
||||
// For less frequently updated regions flush after every 2 flushes
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2);
|
||||
conf.setInt("hbase.hregion.memstore.optionalflushcount", 2);
|
||||
|
||||
// We flush the cache after every 8192 bytes
|
||||
TEST_UTIL.getConfiguration().setInt(
|
||||
HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192);
|
||||
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192);
|
||||
|
||||
// Increase the amount of time between client retries
|
||||
TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 10 * 1000);
|
||||
conf.setLong("hbase.client.pause", 10 * 1000);
|
||||
|
||||
// Reduce thread wake frequency so that other threads can get
|
||||
// a chance to run.
|
||||
TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
|
||||
conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
|
||||
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
}
|
||||
|
||||
@Before
|
||||
|
|
|
@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
|
||||
import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine;
|
||||
import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher;
|
||||
|
@ -138,6 +139,8 @@ public abstract class AbstractTestWALReplay {
|
|||
Configuration conf = TEST_UTIL.getConfiguration();
|
||||
// The below config supported by 0.20-append and CDH3b2
|
||||
conf.setInt("dfs.client.block.recovery.retries", 2);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
TEST_UTIL.startMiniCluster(3);
|
||||
Path hbaseRootDir =
|
||||
TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));
|
||||
|
|
|
@ -22,8 +22,10 @@ import static org.junit.Assert.assertEquals;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
|
||||
import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
|
@ -74,14 +75,15 @@ public class TestLogRolling extends AbstractTestLogRolling {
|
|||
/**** configuration for testLogRollOnDatanodeDeath ****/
|
||||
// lower the namenode & datanode heartbeat so the namenode
|
||||
// quickly detects datanode failures
|
||||
TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
|
||||
TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
|
||||
Configuration conf= TEST_UTIL.getConfiguration();
|
||||
conf.setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
|
||||
conf.setInt("dfs.heartbeat.interval", 1);
|
||||
// the namenode might still try to choose the recently-dead datanode
|
||||
// for a pipeline, so try to a new pipeline multiple times
|
||||
TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3);
|
||||
TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem");
|
||||
conf.setInt("dfs.client.block.write.retries", 30);
|
||||
conf.setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2);
|
||||
conf.setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3);
|
||||
conf.set(WALFactory.WAL_PROVIDER, "filesystem");
|
||||
AbstractTestLogRolling.setUpBeforeClass();
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.client.SnapshotType;
|
|||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.client.SnapshotDescription;
|
||||
|
@ -103,7 +105,9 @@ public class TestFlushSnapshotFromClient {
|
|||
// Enable snapshot
|
||||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
|
||||
}
|
||||
|
||||
@Before
|
||||
|
|
|
@ -816,7 +816,7 @@ module Hbase
|
|||
family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE)
|
||||
family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
|
||||
family.setInMemoryCompaction(
|
||||
JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)
|
||||
org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)
|
||||
family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
|
||||
family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
|
||||
family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)
|
||||
|
|
Loading…
Reference in New Issue