HBASE-19493 Make TestWALMonotonicallyIncreasingSeqId also work with AsyncFSWAL

This commit is contained in:
zhangduo 2017-12-12 11:33:02 +08:00
parent 3aa56b3932
commit 46d9b4cf08
1 changed files with 104 additions and 77 deletions

View File

@ -18,80 +18,101 @@
*/ */
package org.apache.hadoop.hbase.regionserver; package org.apache.hadoop.hbase.regionserver;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.client.*; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALFactory;
import org.junit.Assert; import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Rule; import org.junit.Rule;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;
import org.junit.rules.TestName; import org.junit.rules.TestName;
import org.junit.runner.RunWith;
import java.io.IOException; import org.junit.runners.Parameterized;
import java.util.ArrayList; import org.junit.runners.Parameterized.Parameter;
import java.util.List; import org.junit.runners.Parameterized.Parameters;
import java.util.concurrent.CountDownLatch;
/** /**
* Test for HBASE-17471 * Test for HBASE-17471.
* MVCCPreAssign is added by HBASE-16698, but pre-assign mvcc is only used in put/delete * <p>
* path. Other write paths like increment/append still assign mvcc in ringbuffer's consumer * MVCCPreAssign is added by HBASE-16698, but pre-assign mvcc is only used in put/delete path. Other
* thread. If put and increment are used parallel. Then seqid in WAL may not increase monotonically * write paths like increment/append still assign mvcc in ringbuffer's consumer thread. If put and
* Disorder in wals will lead to data loss. * increment are used parallel. Then seqid in WAL may not increase monotonically Disorder in wals
* This case use two thread to put and increment at the same time in a single region. * will lead to data loss.
* Then check the seqid in WAL. If seqid is wal is not monotonically increasing, this case will fail * <p>
* * This case use two thread to put and increment at the same time in a single region. Then check the
* seqid in WAL. If seqid is wal is not monotonically increasing, this case will fail
*/ */
@Category({RegionServerTests.class, SmallTests.class}) @RunWith(Parameterized.class)
@Category({ RegionServerTests.class, SmallTests.class })
public class TestWALMonotonicallyIncreasingSeqId { public class TestWALMonotonicallyIncreasingSeqId {
final Log LOG = LogFactory.getLog(getClass()); private final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Path testDir = TEST_UTIL.getDataTestDir("TestWALMonotonicallyIncreasingSeqId"); private static Path testDir = TEST_UTIL.getDataTestDir("TestWALMonotonicallyIncreasingSeqId");
private WALFactory wals; private WALFactory wals;
private FileSystem fileSystem; private FileSystem fileSystem;
private Configuration walConf; private Configuration walConf;
private HRegion region;
public static final String KEY_SEED = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; @Parameter
public String walProvider;
private static final int KEY_SEED_LEN = KEY_SEED.length();
private static final char[] KEY_SEED_CHARS = KEY_SEED.toCharArray();
@Rule @Rule
public TestName name = new TestName(); public TestName name = new TestName();
private HTableDescriptor getTableDesc(TableName tableName, byte[]... families) { @Parameters(name = "{index}: wal={0}")
HTableDescriptor htd = new HTableDescriptor(tableName); public static List<Object[]> data() {
for (byte[] family : families) { return Arrays.asList(new Object[] { "asyncfs" }, new Object[] { "filesystem" });
HColumnDescriptor hcd = new HColumnDescriptor(family);
// Set default to be three versions.
hcd.setMaxVersions(Integer.MAX_VALUE);
htd.addFamily(hcd);
}
return htd;
} }
private Region initHRegion(HTableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId) private TableDescriptor getTableDesc(TableName tableName, byte[]... families) {
TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
Arrays.stream(families).map(
f -> ColumnFamilyDescriptorBuilder.newBuilder(f).setMaxVersions(Integer.MAX_VALUE).build())
.forEachOrdered(builder::addColumnFamily);
return builder.build();
}
private HRegion initHRegion(TableDescriptor htd, byte[] startKey, byte[] stopKey, int replicaId)
throws IOException { throws IOException {
Configuration conf = TEST_UTIL.getConfiguration(); Configuration conf = TEST_UTIL.getConfiguration();
conf.set("hbase.wal.provider", walProvider);
conf.setBoolean("hbase.hregion.mvcc.preassign", false); conf.setBoolean("hbase.hregion.mvcc.preassign", false);
Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName()); Path tableDir = FSUtils.getTableDir(testDir, htd.getTableName());
HRegionInfo info = new HRegionInfo(htd.getTableName(), startKey, stopKey, false, 0, replicaId); RegionInfo info = RegionInfoBuilder.newBuilder(htd.getTableName()).setStartKey(startKey)
.setEndKey(stopKey).setReplicaId(replicaId).setRegionId(0).build();
fileSystem = tableDir.getFileSystem(conf); fileSystem = tableDir.getFileSystem(conf);
HRegionFileSystem fs = new HRegionFileSystem(conf, fileSystem, tableDir, info);
final Configuration walConf = new Configuration(conf); final Configuration walConf = new Configuration(conf);
FSUtils.setRootDir(walConf, tableDir); FSUtils.setRootDir(walConf, tableDir);
this.walConf = walConf; this.walConf = walConf;
@ -103,8 +124,10 @@ public class TestWALMonotonicallyIncreasingSeqId {
} }
CountDownLatch latch = new CountDownLatch(1); CountDownLatch latch = new CountDownLatch(1);
public class PutThread extends Thread { public class PutThread extends Thread {
HRegion region; HRegion region;
public PutThread(HRegion region) { public PutThread(HRegion region) {
this.region = region; this.region = region;
} }
@ -112,102 +135,106 @@ public class TestWALMonotonicallyIncreasingSeqId {
@Override @Override
public void run() { public void run() {
try { try {
for(int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
byte[] row = Bytes.toBytes("putRow" + i); byte[] row = Bytes.toBytes("putRow" + i);
Put put = new Put(row); Put put = new Put(row);
put.addColumn("cf".getBytes(), Bytes.toBytes(0), Bytes.toBytes("")); put.addColumn("cf".getBytes(), Bytes.toBytes(0), Bytes.toBytes(""));
//put.setDurability(Durability.ASYNC_WAL);
latch.await(); latch.await();
region.batchMutate(new Mutation[]{put}); region.batchMutate(new Mutation[] { put });
Thread.sleep(10); Thread.sleep(10);
} }
} catch (Throwable t) { } catch (Throwable t) {
LOG.warn("Error happend when Increment: ", t); LOG.warn("Error happend when Increment: ", t);
} }
} }
} }
public class IncThread extends Thread { public class IncThread extends Thread {
HRegion region; HRegion region;
public IncThread(HRegion region) { public IncThread(HRegion region) {
this.region = region; this.region = region;
} }
@Override @Override
public void run() { public void run() {
try { try {
for(int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
byte[] row = Bytes.toBytes("incrementRow" + i); byte[] row = Bytes.toBytes("incrementRow" + i);
Increment inc = new Increment(row); Increment inc = new Increment(row);
inc.addColumn("cf".getBytes(), Bytes.toBytes(0), 1); inc.addColumn("cf".getBytes(), Bytes.toBytes(0), 1);
//inc.setDurability(Durability.ASYNC_WAL); // inc.setDurability(Durability.ASYNC_WAL);
region.increment(inc); region.increment(inc);
latch.countDown(); latch.countDown();
Thread.sleep(10); Thread.sleep(10);
} }
} catch (Throwable t) { } catch (Throwable t) {
LOG.warn("Error happend when Put: ", t); LOG.warn("Error happend when Put: ", t);
} }
}
}
@Before
public void setUp() throws IOException {
byte[][] families = new byte[][] { Bytes.toBytes("cf") };
TableDescriptor htd = getTableDesc(
TableName.valueOf(name.getMethodName().replaceAll("[^0-9A-Za-z_]", "_")), families);
region = initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
}
@After
public void tearDown() throws IOException {
if (region != null) {
region.close();
}
}
@AfterClass
public static void tearDownAfterClass() throws IOException {
TEST_UTIL.cleanupTestDir();
}
private WAL.Reader createReader(Path logPath, Path oldWalsDir) throws IOException {
try {
return wals.createReader(fileSystem, logPath);
} catch (IOException e) {
return wals.createReader(fileSystem, new Path(oldWalsDir, logPath.getName()));
} }
} }
@Test @Test
public void TestWALMonotonicallyIncreasingSeqId() throws Exception { public void testWALMonotonicallyIncreasingSeqId() throws Exception {
byte[][] families = new byte[][] {Bytes.toBytes("cf")};
byte[] qf = Bytes.toBytes("cq");
HTableDescriptor htd = getTableDesc(TableName.valueOf(name.getMethodName()), families);
HRegion region = (HRegion)initHRegion(htd, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, 0);
List<Thread> putThreads = new ArrayList<>(); List<Thread> putThreads = new ArrayList<>();
for(int i = 0; i < 1; i++) { for (int i = 0; i < 1; i++) {
putThreads.add(new PutThread(region)); putThreads.add(new PutThread(region));
} }
IncThread incThread = new IncThread(region); IncThread incThread = new IncThread(region);
for(int i = 0; i < 1; i++) { for (int i = 0; i < 1; i++) {
putThreads.get(i).start(); putThreads.get(i).start();
} }
incThread.start(); incThread.start();
incThread.join(); incThread.join();
Path logPath = ((FSHLog) region.getWAL()).getCurrentFileName(); Path logPath = ((AbstractFSWAL<?>) region.getWAL()).getCurrentFileName();
region.getWAL().rollWriter(); region.getWAL().rollWriter();
Thread.sleep(10); Thread.sleep(10);
Path hbaseDir = new Path(walConf.get(HConstants.HBASE_DIR)); Path hbaseDir = new Path(walConf.get(HConstants.HBASE_DIR));
Path oldWalsDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME); Path oldWalsDir = new Path(hbaseDir, HConstants.HREGION_OLDLOGDIR_NAME);
WAL.Reader reader = null; try (WAL.Reader reader = createReader(logPath, oldWalsDir)) {
try {
reader = wals.createReader(fileSystem, logPath);
} catch (Throwable t) {
reader = wals.createReader(fileSystem, new Path(oldWalsDir, logPath.getName()));
}
WAL.Entry e;
try {
long currentMaxSeqid = 0; long currentMaxSeqid = 0;
while ((e = reader.next()) != null) { for (WAL.Entry e; (e = reader.next()) != null;) {
if (!WALEdit.isMetaEditFamily(e.getEdit().getCells().get(0))) { if (!WALEdit.isMetaEditFamily(e.getEdit().getCells().get(0))) {
long currentSeqid = e.getKey().getSequenceId(); long currentSeqid = e.getKey().getSequenceId();
if(currentSeqid > currentMaxSeqid) { if (currentSeqid > currentMaxSeqid) {
currentMaxSeqid = currentSeqid; currentMaxSeqid = currentSeqid;
} else { } else {
Assert.fail("Current max Seqid is " + currentMaxSeqid fail("Current max Seqid is " + currentMaxSeqid +
+ ", but the next seqid in wal is smaller:" + currentSeqid); ", but the next seqid in wal is smaller:" + currentSeqid);
} }
} }
} }
} finally {
if(reader != null) {
reader.close();
}
if(region != null) {
region.close();
} }
} }
}
} }