HBASE-17575: Run critical tests with each of the Inmemory Compaction Policies enabled
Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
parent
d8f3c6cff9
commit
edbf2bb8de
|
@ -19,7 +19,9 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.regionserver.MemStoreCompactor;
|
||||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
@ -74,7 +76,7 @@ public class IntegrationTestAcidGuarantees extends IntegrationTestBase {
|
|||
|
||||
// replace the HBaseTestingUtility in the unit test with the integration test's
|
||||
// IntegrationTestingUtility
|
||||
tag = new TestAcidGuarantees();
|
||||
tag = new TestAcidGuarantees(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT);
|
||||
tag.setHBaseTestingUtil(util);
|
||||
}
|
||||
|
||||
|
|
|
@ -51,10 +51,10 @@ public abstract class MemStoreSegmentsIterator implements Iterator<Cell> {
|
|||
// list of Scanners of segments in the pipeline, when compaction starts
|
||||
List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>();
|
||||
|
||||
// create the list of scanners with the smallest read point, meaning that
|
||||
// only relevant KVs are going to be returned by the pipeline traversing
|
||||
for (Segment segment : segments) {
|
||||
scanners.add(segment.getScanner(store.getSmallestReadPoint()));
|
||||
// create the list of scanners to traverse over all the data
|
||||
// no dirty reads here as these are immutable segments
|
||||
for (ImmutableSegment segment : segments) {
|
||||
scanners.add(segment.getScanner(Integer.MAX_VALUE));
|
||||
}
|
||||
|
||||
scanner = new MemStoreScanner(comparator, scanners, true);
|
||||
|
|
|
@ -39,16 +39,21 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.regionserver.MemStoreLAB;
|
||||
import org.apache.hadoop.hbase.testclassification.FlakeyTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
/**
|
||||
* Test case that uses multiple threads to read and write multifamily rows
|
||||
|
@ -58,7 +63,12 @@ import com.google.common.collect.Lists;
|
|||
* a real cluster (eg for testing with failures, region movement, etc)
|
||||
*/
|
||||
@Category({FlakeyTests.class, MediumTests.class})
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestAcidGuarantees implements Tool {
|
||||
@Parameterized.Parameters
|
||||
public static Object[] data() {
|
||||
return new Object[] { "NONE", "BASIC", "EAGER" };
|
||||
}
|
||||
protected static final Log LOG = LogFactory.getLog(TestAcidGuarantees.class);
|
||||
public static final TableName TABLE_NAME = TableName.valueOf("TestAcidGuarantees");
|
||||
public static final byte [] FAMILY_A = Bytes.toBytes("A");
|
||||
|
@ -93,16 +103,19 @@ public class TestAcidGuarantees implements Tool {
|
|||
}
|
||||
}
|
||||
|
||||
public TestAcidGuarantees() {
|
||||
public TestAcidGuarantees(String compType) {
|
||||
// Set small flush size for minicluster so we exercise reseeking scanners
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(128*1024));
|
||||
conf.set(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, String.valueOf(128 * 1024));
|
||||
// prevent aggressive region split
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
conf.setInt("hfile.format.version", 3); // for mob tests
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, compType);
|
||||
if(MemoryCompactionPolicy.valueOf(compType) == MemoryCompactionPolicy.EAGER) {
|
||||
conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, false);
|
||||
conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.9);
|
||||
}
|
||||
util = new HBaseTestingUtility(conf);
|
||||
}
|
||||
|
||||
|
@ -390,70 +403,50 @@ public class TestAcidGuarantees implements Tool {
|
|||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
util.startMiniCluster(1);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
util.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetAtomicity() throws Exception {
|
||||
util.startMiniCluster(1);
|
||||
try {
|
||||
runTestAtomicity(20000, 5, 5, 0, 3);
|
||||
} finally {
|
||||
util.shutdownMiniCluster();
|
||||
}
|
||||
runTestAtomicity(20000, 5, 5, 0, 3);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testScanAtomicity() throws Exception {
|
||||
util.startMiniCluster(1);
|
||||
try {
|
||||
runTestAtomicity(20000, 5, 0, 5, 3);
|
||||
} finally {
|
||||
util.shutdownMiniCluster();
|
||||
}
|
||||
runTestAtomicity(20000, 5, 0, 5, 3);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMixedAtomicity() throws Exception {
|
||||
util.startMiniCluster(1);
|
||||
try {
|
||||
runTestAtomicity(20000, 5, 2, 2, 3);
|
||||
} finally {
|
||||
util.shutdownMiniCluster();
|
||||
}
|
||||
runTestAtomicity(20000, 5, 2, 2, 3);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMobGetAtomicity() throws Exception {
|
||||
util.startMiniCluster(1);
|
||||
try {
|
||||
boolean systemTest = false;
|
||||
boolean useMob = true;
|
||||
runTestAtomicity(20000, 5, 5, 0, 3, systemTest, useMob);
|
||||
} finally {
|
||||
util.shutdownMiniCluster();
|
||||
}
|
||||
boolean systemTest = false;
|
||||
boolean useMob = true;
|
||||
runTestAtomicity(20000, 5, 5, 0, 3, systemTest, useMob);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMobScanAtomicity() throws Exception {
|
||||
util.startMiniCluster(1);
|
||||
try {
|
||||
boolean systemTest = false;
|
||||
boolean useMob = true;
|
||||
runTestAtomicity(20000, 5, 0, 5, 3, systemTest, useMob);
|
||||
} finally {
|
||||
util.shutdownMiniCluster();
|
||||
}
|
||||
boolean systemTest = false;
|
||||
boolean useMob = true;
|
||||
runTestAtomicity(20000, 5, 0, 5, 3, systemTest, useMob);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMobMixedAtomicity() throws Exception {
|
||||
util.startMiniCluster(1);
|
||||
try {
|
||||
boolean systemTest = false;
|
||||
boolean useMob = true;
|
||||
runTestAtomicity(20000, 5, 2, 2, 3, systemTest, useMob);
|
||||
} finally {
|
||||
util.shutdownMiniCluster();
|
||||
}
|
||||
boolean systemTest = false;
|
||||
boolean useMob = true;
|
||||
runTestAtomicity(20000, 5, 2, 2, 3, systemTest, useMob);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -488,7 +481,8 @@ public class TestAcidGuarantees implements Tool {
|
|||
Configuration c = HBaseConfiguration.create();
|
||||
int status;
|
||||
try {
|
||||
TestAcidGuarantees test = new TestAcidGuarantees();
|
||||
TestAcidGuarantees test = new TestAcidGuarantees(CompactingMemStore
|
||||
.COMPACTING_MEMSTORE_TYPE_DEFAULT);
|
||||
status = ToolRunner.run(c, test, args);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exiting due to error", e);
|
||||
|
|
|
@ -223,7 +223,9 @@ public class TestIOFencing {
|
|||
*/
|
||||
@Test
|
||||
public void testFencingAroundCompaction() throws Exception {
|
||||
doTest(BlockCompactionsInPrepRegion.class);
|
||||
for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {
|
||||
doTest(BlockCompactionsInPrepRegion.class, policy);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -234,10 +236,12 @@ public class TestIOFencing {
|
|||
*/
|
||||
@Test
|
||||
public void testFencingAroundCompactionAfterWALSync() throws Exception {
|
||||
doTest(BlockCompactionsInCompletionRegion.class);
|
||||
for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {
|
||||
doTest(BlockCompactionsInCompletionRegion.class, policy);
|
||||
}
|
||||
}
|
||||
|
||||
public void doTest(Class<?> regionClass) throws Exception {
|
||||
public void doTest(Class<?> regionClass, MemoryCompactionPolicy policy) throws Exception {
|
||||
Configuration c = TEST_UTIL.getConfiguration();
|
||||
// Insert our custom region
|
||||
c.setClass(HConstants.REGION_IMPL, regionClass, HRegion.class);
|
||||
|
@ -250,8 +254,7 @@ public class TestIOFencing {
|
|||
c.setLong("hbase.hstore.blockingStoreFiles", 1000);
|
||||
// Compact quickly after we tell it to!
|
||||
c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000);
|
||||
c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy));
|
||||
LOG.info("Starting mini cluster");
|
||||
TEST_UTIL.startMiniCluster(1);
|
||||
CompactionBlockerRegion compactingRegion = null;
|
||||
|
@ -344,7 +347,11 @@ public class TestIOFencing {
|
|||
Thread.sleep(1000);
|
||||
assertTrue("New region never compacted", System.currentTimeMillis() - startWaitTime < 180000);
|
||||
}
|
||||
assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table));
|
||||
if(policy == MemoryCompactionPolicy.EAGER) {
|
||||
assertTrue(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT >= TEST_UTIL.countRows(table));
|
||||
} else {
|
||||
assertEquals(FIRST_BATCH_COUNT + SECOND_BATCH_COUNT, TEST_UTIL.countRows(table));
|
||||
}
|
||||
} finally {
|
||||
if (compactingRegion != null) {
|
||||
compactingRegion.allowCompactions();
|
||||
|
|
|
@ -89,9 +89,6 @@ public class TestHFileArchiving {
|
|||
// prevent aggressive region split
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
// no memory compaction
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -20,10 +20,7 @@ package org.apache.hadoop.hbase.client;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
|
||||
import org.apache.hadoop.hbase.mob.MobConstants;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
|
@ -53,8 +50,6 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
|
|||
protected static void setupConf(Configuration conf) {
|
||||
TestSnapshotFromClient.setupConf(conf);
|
||||
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,14 +24,19 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
|
||||
import org.apache.hadoop.hbase.testclassification.ClientTests;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Threads;
|
||||
import org.junit.After;
|
||||
|
@ -112,8 +117,6 @@ public class TestSnapshotCloneIndependence {
|
|||
// will even trigger races between creating the directory containing back references and
|
||||
// the back reference itself.
|
||||
conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
}
|
||||
|
||||
@Before
|
||||
|
|
|
@ -30,9 +30,12 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||
|
@ -93,8 +96,6 @@ public class TestSnapshotFromClient {
|
|||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -27,12 +27,9 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure;
|
||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
|
||||
|
@ -54,8 +51,6 @@ public class TestMasterProcedureSchedulerConcurrency {
|
|||
@Before
|
||||
public void setUp() throws IOException {
|
||||
conf = HBaseConfiguration.create();
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
queue = new MasterProcedureScheduler(conf);
|
||||
queue.start();
|
||||
}
|
||||
|
|
|
@ -43,7 +43,13 @@ import org.apache.hadoop.fs.FSDataOutputStream;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.ChoreService;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
|
@ -102,8 +108,6 @@ public class TestCompaction {
|
|||
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
|
||||
conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY,
|
||||
NoLimitThroughputController.class.getName());
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
|
||||
|
||||
secondRowBytes = START_KEY_BYTES.clone();
|
||||
|
|
|
@ -37,7 +37,12 @@ import java.util.Map.Entry;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.Delete;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
|
@ -60,14 +65,20 @@ import org.junit.Rule;
|
|||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
|
||||
import org.junit.runner.RunWith;
|
||||
import org.junit.runners.Parameterized;
|
||||
|
||||
/**
|
||||
* Test major compactions
|
||||
*/
|
||||
@Category({RegionServerTests.class, MediumTests.class})
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestMajorCompaction {
|
||||
@Rule public TestName name = new TestName();
|
||||
@Parameterized.Parameters
|
||||
public static Object[] data() {
|
||||
return new Object[] { "NONE", "BASIC", "EAGER" };
|
||||
}
|
||||
@Rule public TestName name;
|
||||
private static final Log LOG = LogFactory.getLog(TestMajorCompaction.class.getName());
|
||||
private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU();
|
||||
protected Configuration conf = UTIL.getConfiguration();
|
||||
|
@ -82,15 +93,14 @@ public class TestMajorCompaction {
|
|||
private static final long MAX_FILES_TO_COMPACT = 10;
|
||||
|
||||
/** constructor */
|
||||
public TestMajorCompaction() {
|
||||
public TestMajorCompaction(String compType) {
|
||||
super();
|
||||
|
||||
name = new TestName();
|
||||
// Set cache flush size to 1MB
|
||||
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
|
||||
conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100);
|
||||
compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compType));
|
||||
|
||||
secondRowBytes = START_KEY_BYTES.clone();
|
||||
// Increment the least significant character so we get to next row.
|
||||
|
@ -101,7 +111,7 @@ public class TestMajorCompaction {
|
|||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
this.htd = UTIL.createTableDescriptor(name.getMethodName());
|
||||
this.htd = UTIL.createTableDescriptor(name.getMethodName().replace('[','i').replace(']','i'));
|
||||
this.r = UTIL.createLocalHRegion(htd, null, null);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,16 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
|
@ -74,12 +83,6 @@ public class TestPerColumnFamilyFlush {
|
|||
|
||||
public static final byte[] FAMILY3 = FAMILIES[2];
|
||||
|
||||
@Before
|
||||
public void setUp() throws IOException {
|
||||
TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
}
|
||||
|
||||
private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLENAME);
|
||||
for (byte[] family : FAMILIES) {
|
||||
|
@ -127,8 +130,6 @@ public class TestPerColumnFamilyFlush {
|
|||
conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName());
|
||||
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
|
||||
40 * 1024);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
// Intialize the region
|
||||
Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf);
|
||||
// Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
|
||||
|
|
|
@ -28,7 +28,17 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellComparator;
|
||||
import org.apache.hadoop.hbase.CellScanner;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MemoryCompactionPolicy;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||
|
@ -60,13 +70,20 @@ public class TestRecoveredEdits {
|
|||
* made it in.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test (timeout=60000)
|
||||
public void testReplayWorksThoughLotsOfFlushing() throws IOException {
|
||||
@Test (timeout=180000)
|
||||
public void testReplayWorksThoughLotsOfFlushing() throws
|
||||
IOException {
|
||||
for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {
|
||||
testReplayWorksWithMemoryCompactionPolicy(policy);
|
||||
}
|
||||
}
|
||||
|
||||
private void testReplayWorksWithMemoryCompactionPolicy(MemoryCompactionPolicy policy) throws
|
||||
IOException {
|
||||
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
|
||||
// Set it so we flush every 1M or so. Thats a lot.
|
||||
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(policy));
|
||||
// The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
|
||||
// of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
|
||||
final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
|
||||
|
@ -122,7 +139,11 @@ public class TestRecoveredEdits {
|
|||
// Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
|
||||
// we flush at 1MB, that there are at least 3 flushed files that are there because of the
|
||||
// replay of edits.
|
||||
assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
|
||||
if(policy == MemoryCompactionPolicy.EAGER) {
|
||||
assertTrue("Files count=" + storeFiles.size(), storeFiles.size() >= 1);
|
||||
} else {
|
||||
assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
|
||||
}
|
||||
// Now verify all edits made it into the region.
|
||||
int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
|
||||
LOG.info("Checked " + count + " edits made it in");
|
||||
|
|
|
@ -27,13 +27,19 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Get;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
|
@ -107,9 +113,6 @@ public abstract class AbstractTestLogRolling {
|
|||
// Reduce thread wake frequency so that other threads can get
|
||||
// a chance to run.
|
||||
conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000);
|
||||
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
}
|
||||
|
||||
@Before
|
||||
|
|
|
@ -22,10 +22,8 @@ import static org.junit.Assert.assertEquals;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
|
||||
import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
|
||||
|
|
|
@ -34,13 +34,17 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.*;
|
||||
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.SnapshotType;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.client.SnapshotDescription;
|
||||
|
@ -100,8 +104,6 @@ public class TestFlushSnapshotFromClient {
|
|||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
|
||||
ConstantSizeRegionSplitPolicy.class.getName());
|
||||
conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
|
||||
String.valueOf(MemoryCompactionPolicy.NONE));
|
||||
}
|
||||
|
||||
@Before
|
||||
|
|
Loading…
Reference in New Issue