HBASE-17492 Fix the compacting memstore part in hbase shell ruby script

Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
anastas 2017-01-23 11:06:59 +02:00 committed by Michael Stack
parent 980c8c2047
commit aa5d9a9ad3
28 changed files with 121 additions and 191 deletions

View File

@ -66,32 +66,6 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION";
/**
* Enum describing all possible memory compaction policies
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum MemoryCompaction {
/**
* No memory compaction, when size threshold is exceeded data is flushed to disk
*/
NONE,
/**
* Basic policy applies optimizations which modify the index to a more compacted representation.
* This is beneficial in all access patterns. The smaller the cells are the greater the
* benefit of this policy.
* This is the default policy.
*/
BASIC,
/**
* In addition to compacting the index representation as the basic policy, eager policy
* eliminates duplication while the data is still in memory (much like the
* on-disk compaction does after the data is flushed to disk). This policy is most useful for
* applications with high data churn or small working sets.
*/
EAGER
}
// These constants are used as FileInfo keys // These constants are used as FileInfo keys
public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION = "COMPRESSION";
public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT";
@ -712,10 +686,10 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
* @return in-memory compaction policy if set for the cf. Returns null if no policy is set for * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for
* for this column family * for this column family
*/ */
public MemoryCompaction getInMemoryCompaction() { public MemoryCompactionPolicy getInMemoryCompaction() {
String value = getValue(IN_MEMORY_COMPACTION); String value = getValue(IN_MEMORY_COMPACTION);
if (value != null) { if (value != null) {
return MemoryCompaction.valueOf(value); return MemoryCompactionPolicy.valueOf(value);
} }
return null; return null;
} }
@ -725,7 +699,7 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
* for this column family * for this column family
* @return this (for chained invocation) * @return this (for chained invocation)
*/ */
public HColumnDescriptor setInMemoryCompaction(MemoryCompaction inMemoryCompaction) { public HColumnDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) {
return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString()); return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString());
} }

View File

@ -0,0 +1,48 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Enum describing all possible memory compaction policies
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum MemoryCompactionPolicy {
/**
* No memory compaction, when size threshold is exceeded data is flushed to disk
*/
NONE,
/**
* Basic policy applies optimizations which modify the index to a more compacted representation.
* This is beneficial in all access patterns. The smaller the cells are the greater the
* benefit of this policy.
* This is the default policy.
*/
BASIC,
/**
* In addition to compacting the index representation as the basic policy, eager policy
* eliminates duplication while the data is still in memory (much like the
* on-disk compaction does after the data is flushed to disk). This policy is most useful for
* applications with high data churn or small working sets.
*/
EAGER
}

View File

@ -31,8 +31,8 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
@ -57,7 +57,7 @@ public class CompactingMemStore extends AbstractMemStore {
public static final String COMPACTING_MEMSTORE_TYPE_KEY = public static final String COMPACTING_MEMSTORE_TYPE_KEY =
"hbase.hregion.compacting.memstore.type"; "hbase.hregion.compacting.memstore.type";
public static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = public static final String COMPACTING_MEMSTORE_TYPE_DEFAULT =
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE); String.valueOf(MemoryCompactionPolicy.NONE);
// Default fraction of in-memory-flush size w.r.t. flush-to-disk size // Default fraction of in-memory-flush size w.r.t. flush-to-disk size
public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY = public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY =
"hbase.memstore.inmemoryflush.threshold.factor"; "hbase.memstore.inmemoryflush.threshold.factor";
@ -84,7 +84,7 @@ public class CompactingMemStore extends AbstractMemStore {
public CompactingMemStore(Configuration conf, CellComparator c, public CompactingMemStore(Configuration conf, CellComparator c,
HStore store, RegionServicesForStores regionServices, HStore store, RegionServicesForStores regionServices,
HColumnDescriptor.MemoryCompaction compactionPolicy) throws IOException { MemoryCompactionPolicy compactionPolicy) throws IOException {
super(conf, c); super(conf, c);
this.store = store; this.store = store;
this.regionServices = regionServices; this.regionServices = regionServices;
@ -496,7 +496,7 @@ public class CompactingMemStore extends AbstractMemStore {
} }
@VisibleForTesting @VisibleForTesting
void initiateType(HColumnDescriptor.MemoryCompaction compactionType) { void initiateType(MemoryCompactionPolicy compactionType) {
compactor.initiateAction(compactionType); compactor.initiateAction(compactionType);
} }

View File

@ -53,14 +53,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.CompoundConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.FailedArchiveException; import org.apache.hadoop.hbase.backup.FailedArchiveException;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
@ -259,10 +252,10 @@ public class HStore implements Store {
// to clone it? // to clone it?
scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator);
String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName());
HColumnDescriptor.MemoryCompaction inMemoryCompaction = family.getInMemoryCompaction(); MemoryCompactionPolicy inMemoryCompaction = family.getInMemoryCompaction();
if(inMemoryCompaction == null) { if(inMemoryCompaction == null) {
inMemoryCompaction = HColumnDescriptor.MemoryCompaction.valueOf(conf.get inMemoryCompaction = MemoryCompactionPolicy.valueOf(
(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT)); CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT));
} }
switch (inMemoryCompaction) { switch (inMemoryCompaction) {

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.regionserver;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ClassSize;
@ -86,7 +86,7 @@ public class MemStoreCompactor {
private Action action = Action.FLATTEN; private Action action = Action.FLATTEN;
public MemStoreCompactor(CompactingMemStore compactingMemStore, public MemStoreCompactor(CompactingMemStore compactingMemStore,
MemoryCompaction compactionPolicy) { MemoryCompactionPolicy compactionPolicy) {
this.compactingMemStore = compactingMemStore; this.compactingMemStore = compactingMemStore;
this.compactionKVMax = compactingMemStore.getConfiguration() this.compactionKVMax = compactingMemStore.getConfiguration()
.getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); .getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT);
@ -270,7 +270,7 @@ public class MemStoreCompactor {
* Initiate the action according to user config, after its default is Action.MERGE * Initiate the action according to user config, after its default is Action.MERGE
*/ */
@VisibleForTesting @VisibleForTesting
void initiateAction(MemoryCompaction compType) { void initiateAction(MemoryCompactionPolicy compType) {
switch (compType){ switch (compType){
case NONE: action = Action.NOOP; case NONE: action = Action.NOOP;

View File

@ -1881,9 +1881,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
for (byte[] family : families) { for (byte[] family : families) {
HColumnDescriptor hcd = new HColumnDescriptor(family); HColumnDescriptor hcd = new HColumnDescriptor(family);
if(compactedMemStore != null && i < compactedMemStore.length) { if(compactedMemStore != null && i < compactedMemStore.length) {
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.BASIC); hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC);
} else { } else {
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
} }
i++; i++;

View File

@ -631,8 +631,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
int columns = 1; int columns = 1;
int caching = 30; int caching = 30;
boolean addColumns = true; boolean addColumns = true;
HColumnDescriptor.MemoryCompaction inMemoryCompaction = MemoryCompactionPolicy inMemoryCompaction =
HColumnDescriptor.MemoryCompaction.valueOf( MemoryCompactionPolicy.valueOf(
CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT);
public TestOptions() {} public TestOptions() {}
@ -978,11 +978,11 @@ public class PerformanceEvaluation extends Configured implements Tool {
this.addColumns = addColumns; this.addColumns = addColumns;
} }
public void setInMemoryCompaction(HColumnDescriptor.MemoryCompaction inMemoryCompaction) { public void setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) {
this.inMemoryCompaction = inMemoryCompaction; this.inMemoryCompaction = inMemoryCompaction;
} }
public HColumnDescriptor.MemoryCompaction getInMemoryCompaction() { public MemoryCompactionPolicy getInMemoryCompaction() {
return this.inMemoryCompaction; return this.inMemoryCompaction;
} }
} }

View File

@ -102,7 +102,7 @@ public class TestAcidGuarantees implements Tool {
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
conf.setInt("hfile.format.version", 3); // for mob tests conf.setInt("hfile.format.version", 3); // for mob tests
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
util = new HBaseTestingUtility(conf); util = new HBaseTestingUtility(conf);
} }

View File

@ -251,7 +251,7 @@ public class TestIOFencing {
// Compact quickly after we tell it to! // Compact quickly after we tell it to!
c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000); c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
LOG.info("Starting mini cluster"); LOG.info("Starting mini cluster");
TEST_UTIL.startMiniCluster(1); TEST_UTIL.startMiniCluster(1);
CompactionBlockerRegion compactingRegion = null; CompactionBlockerRegion compactingRegion = null;

View File

@ -33,12 +33,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
@ -96,7 +91,7 @@ public class TestHFileArchiving {
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
// no memory compaction // no memory compaction
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
} }
@After @After

View File

@ -37,11 +37,7 @@ import java.util.stream.Collectors;
import java.util.stream.IntStream; import java.util.stream.IntStream;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.ByteBufferPool; import org.apache.hadoop.hbase.io.ByteBufferPool;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
@ -77,10 +73,10 @@ public class TestAsyncTableGetMultiThreaded {
@BeforeClass @BeforeClass
public static void setUp() throws Exception { public static void setUp() throws Exception {
setUp(HColumnDescriptor.MemoryCompaction.NONE); setUp(MemoryCompactionPolicy.NONE);
} }
protected static void setUp(HColumnDescriptor.MemoryCompaction memoryCompaction) protected static void setUp(MemoryCompactionPolicy memoryCompaction)
throws Exception { throws Exception {
TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none"); TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none");
TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L); TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -29,7 +30,7 @@ public class TestAsyncTableGetMultiThreadedWithBasicCompaction extends
@BeforeClass @BeforeClass
public static void setUp() throws Exception { public static void setUp() throws Exception {
setUp(HColumnDescriptor.MemoryCompaction.BASIC); setUp(MemoryCompactionPolicy.BASIC);
} }
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.client; package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -29,7 +30,7 @@ public class TestAsyncTableGetMultiThreadedWithEagerCompaction extends
@BeforeClass @BeforeClass
public static void setUp() throws Exception { public static void setUp() throws Exception {
setUp(HColumnDescriptor.MemoryCompaction.EAGER); setUp(MemoryCompactionPolicy.EAGER);
} }
} }

View File

@ -21,6 +21,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
@ -53,7 +54,7 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
TestSnapshotFromClient.setupConf(conf); TestSnapshotFromClient.setupConf(conf);
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0); conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
} }
@Override @Override

View File

@ -24,13 +24,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
@ -119,7 +113,7 @@ public class TestSnapshotCloneIndependence {
// the back reference itself. // the back reference itself.
conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL); conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
} }
@Before @Before

View File

@ -30,12 +30,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
@ -99,7 +94,7 @@ public class TestSnapshotFromClient {
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
} }

View File

@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure; import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure;
import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.Procedure;
@ -54,7 +55,7 @@ public class TestMasterProcedureSchedulerConcurrency {
public void setUp() throws IOException { public void setUp() throws IOException {
conf = HBaseConfiguration.create(); conf = HBaseConfiguration.create();
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
queue = new MasterProcedureScheduler(conf); queue = new MasterProcedureScheduler(conf);
queue.start(); queue.start();
} }

View File

@ -26,15 +26,7 @@ import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.KeepDeletedCells;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueTestUtil;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -82,7 +74,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void setUp() throws Exception { public void setUp() throws Exception {
compactingSetUp(); compactingSetUp();
this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR,
store, regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER); store, regionServicesForStores, MemoryCompactionPolicy.EAGER);
} }
protected void compactingSetUp() throws Exception { protected void compactingSetUp() throws Exception {
@ -136,7 +128,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
// use case 3: first in snapshot second in kvset // use case 3: first in snapshot second in kvset
this.memstore = new CompactingMemStore(HBaseConfiguration.create(), this.memstore = new CompactingMemStore(HBaseConfiguration.create(),
CellComparator.COMPARATOR, store, regionServicesForStores, CellComparator.COMPARATOR, store, regionServicesForStores,
HColumnDescriptor.MemoryCompaction.EAGER); MemoryCompactionPolicy.EAGER);
this.memstore.add(kv1.clone(), null); this.memstore.add(kv1.clone(), null);
// As compaction is starting in the background the repetition // As compaction is starting in the background the repetition
// of the k1 might be removed BUT the scanners created earlier // of the k1 might be removed BUT the scanners created earlier
@ -475,7 +467,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
throws IOException { throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER;
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType)); String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType); ((CompactingMemStore)memstore).initiateType(compactionType);
@ -562,7 +554,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void testCompaction1Bucket() throws IOException { public void testCompaction1Bucket() throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER;
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType)); String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType); ((CompactingMemStore)memstore).initiateType(compactionType);
@ -599,7 +591,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void testCompaction2Buckets() throws IOException { public void testCompaction2Buckets() throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER;
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType)); String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType); ((CompactingMemStore)memstore).initiateType(compactionType);
@ -654,7 +646,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
public void testCompaction3Buckets() throws IOException { public void testCompaction3Buckets() throws IOException {
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER;
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType)); String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType); ((CompactingMemStore)memstore).initiateType(compactionType);

View File

@ -64,11 +64,11 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
// set memstore to do data compaction // set memstore to do data compaction
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); String.valueOf(MemoryCompactionPolicy.EAGER));
this.memstore = this.memstore =
new CompactingMemStore(conf, CellComparator.COMPARATOR, store, new CompactingMemStore(conf, CellComparator.COMPARATOR, store,
regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER); regionServicesForStores, MemoryCompactionPolicy.EAGER);
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -267,7 +267,7 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore
String[] keys2 = { "A", "B", "D", "G", "I", "J"}; String[] keys2 = { "A", "B", "D", "G", "I", "J"};
String[] keys3 = { "D", "B", "B", "E" }; String[] keys3 = { "D", "B", "B", "E" };
HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.BASIC; MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC;
memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(compactionType)); String.valueOf(compactionType));
((CompactingMemStore)memstore).initiateType(compactionType); ((CompactingMemStore)memstore).initiateType(compactionType);

View File

@ -43,13 +43,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -109,7 +103,7 @@ public class TestCompaction {
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
NoLimitThroughputController.class.getName()); NoLimitThroughputController.class.getName());
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
secondRowBytes = START_KEY_BYTES.clone(); secondRowBytes = START_KEY_BYTES.clone();

View File

@ -37,13 +37,7 @@ import java.util.Map.Entry;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestCase;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
@ -96,7 +90,7 @@ public class TestMajorCompaction {
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100); conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
secondRowBytes = START_KEY_BYTES.clone(); secondRowBytes = START_KEY_BYTES.clone();
// Increment the least significant character so we get to next row. // Increment the least significant character so we get to next row.

View File

@ -22,16 +22,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
@ -86,7 +77,7 @@ public class TestPerColumnFamilyFlush {
@Before @Before
public void setUp() throws IOException { public void setUp() throws IOException {
TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
} }
private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
@ -137,7 +128,7 @@ public class TestPerColumnFamilyFlush {
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
40 * 1024); 40 * 1024);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
// Intialize the region // Intialize the region
Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf); Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf);
// Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3

View File

@ -28,16 +28,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.CellComparator;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@ -75,7 +66,7 @@ public class TestRecoveredEdits {
// Set it so we flush every 1M or so. Thats a lot. // Set it so we flush every 1M or so. Thats a lot.
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
// The file of recovered edits has a column family of 'meta'. Also has an encoded regionname // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
// of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay. // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f"; final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";

View File

@ -22,13 +22,7 @@ import java.util.Arrays;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
@ -72,10 +66,10 @@ public class TestWalAndCompactingMemStoreFlush {
HColumnDescriptor hcd = new HColumnDescriptor(family); HColumnDescriptor hcd = new HColumnDescriptor(family);
// even column families are going to have compacted memstore // even column families are going to have compacted memstore
if(i%2 == 0) { if(i%2 == 0) {
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf( hcd.setInMemoryCompaction(MemoryCompactionPolicy.valueOf(
conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY))); conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY)));
} else { } else {
hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE);
} }
htd.addFamily(hcd); htd.addFamily(hcd);
i++; i++;
@ -139,7 +133,7 @@ public class TestWalAndCompactingMemStoreFlush {
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25);
// set memstore to do data compaction // set memstore to do data compaction
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); String.valueOf(MemoryCompactionPolicy.EAGER));
// Intialize the region // Intialize the region
Region region = initHRegion("testSelectiveFlushWithEager", conf); Region region = initHRegion("testSelectiveFlushWithEager", conf);
@ -375,7 +369,7 @@ public class TestWalAndCompactingMemStoreFlush {
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to index-compaction // set memstore to index-compaction
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); String.valueOf(MemoryCompactionPolicy.BASIC));
// Initialize the region // Initialize the region
Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf); Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf);
@ -628,7 +622,7 @@ public class TestWalAndCompactingMemStoreFlush {
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); String.valueOf(MemoryCompactionPolicy.EAGER));
// Intialize the HRegion // Intialize the HRegion
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
@ -763,7 +757,7 @@ public class TestWalAndCompactingMemStoreFlush {
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); String.valueOf(MemoryCompactionPolicy.BASIC));
// Intialize the HRegion // Intialize the HRegion
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
@ -887,7 +881,7 @@ public class TestWalAndCompactingMemStoreFlush {
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
// set memstore to do data compaction and not to use the speculative scan // set memstore to do data compaction and not to use the speculative scan
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); String.valueOf(MemoryCompactionPolicy.BASIC));
// Successfully initialize the HRegion // Successfully initialize the HRegion
HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);

View File

@ -27,14 +27,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -116,7 +109,7 @@ public abstract class AbstractTestLogRolling {
conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
} }
@Before @Before

View File

@ -51,19 +51,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Put;
@ -140,7 +128,7 @@ public abstract class AbstractTestWALReplay {
// The below config supported by 0.20-append and CDH3b2 // The below config supported by 0.20-append and CDH3b2
conf.setInt("dfs.client.block.recovery.retries", 2); conf.setInt("dfs.client.block.recovery.retries", 2);
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
TEST_UTIL.startMiniCluster(3); TEST_UTIL.startMiniCluster(3);
Path hbaseRootDir = Path hbaseRootDir =
TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase"));

View File

@ -34,13 +34,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.*;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
@ -107,7 +101,7 @@ public class TestFlushSnapshotFromClient {
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName()); ConstantSizeRegionSplitPolicy.class.getName());
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); String.valueOf(MemoryCompactionPolicy.NONE));
} }
@Before @Before

View File

@ -816,7 +816,7 @@ module Hbase
family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE)
family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY)
family.setInMemoryCompaction( family.setInMemoryCompaction(
org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION)
family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL)
family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING)
family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE)