HBASE-24131 [Flakey Tests] TestExportSnapshot takes too long; up against 13min max (#1452)

Split TestExportSnapshot and TestVerifyReplication to two smaller tests
rather than one big one that can take 13minutes+ when contention.

Signed-off-by: Peter Somogyi <psomogyi@apache.org>
This commit is contained in:
Michael Stack 2020-04-08 10:57:18 -07:00 committed by stack
parent ed830222da
commit 082ebdd53c
5 changed files with 501 additions and 289 deletions

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.replication;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -69,8 +68,6 @@ import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
@Category({ ReplicationTests.class, LargeTests.class })
public class TestVerifyReplication extends TestReplicationBase {
@ -108,7 +105,7 @@ public class TestVerifyReplication extends TestReplicationBase {
htable3 = connection2.getTable(peerTableName);
}
private void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows)
static void runVerifyReplication(String[] args, int expectedGoodRows, int expectedBadRows)
throws IOException, InterruptedException, ClassNotFoundException {
Job job = new VerifyReplication().createSubmittableJob(new Configuration(CONF1), args);
if (job == null) {
@ -240,185 +237,7 @@ public class TestVerifyReplication extends TestReplicationBase {
}
}
// VerifyReplication should honor versions option
@Test
public void testHBase14905() throws Exception {
// normal Batch tests
byte[] qualifierName = Bytes.toBytes("f1");
Put put = new Put(Bytes.toBytes("r1"));
long ts = System.currentTimeMillis();
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1002"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v1001"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 3, Bytes.toBytes("v1112"));
htable1.put(put);
Scan scan = new Scan();
scan.readVersions(100);
ResultScanner scanner1 = htable1.getScanner(scan);
Result[] res1 = scanner1.next(1);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
for (int i = 0; i < NB_RETRIES; i++) {
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(1);
scanner1.close();
if (res1.length != 1) {
LOG.info("Only got " + res1.length + " rows");
Thread.sleep(SLEEP_TIME);
} else {
int cellNumber = res1[0].getColumnCells(famName, Bytes.toBytes("f1")).size();
if (cellNumber != 3) {
LOG.info("Only got " + cellNumber + " cells");
Thread.sleep(SLEEP_TIME);
} else {
break;
}
}
if (i == NB_RETRIES - 1) {
fail("Waited too much time for normal batch replication");
}
}
put.addColumn(famName, qualifierName, ts + 4, Bytes.toBytes("v1111"));
htable2.put(put);
put.addColumn(famName, qualifierName, ts + 5, Bytes.toBytes("v1112"));
htable2.put(put);
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(NB_ROWS_IN_BATCH);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(5, res1[0].getColumnCells(famName, qualifierName).size());
String[] args = new String[] { "--versions=100", PEER_ID, tableName.getNameAsString() };
runVerifyReplication(args, 0, 1);
}
// VerifyReplication should honor versions option
@Test
public void testVersionMismatchHBase14905() throws Exception {
// normal Batch tests
byte[] qualifierName = Bytes.toBytes("f1");
Put put = new Put(Bytes.toBytes("r1"));
long ts = System.currentTimeMillis();
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v2"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 3, Bytes.toBytes("v3"));
htable1.put(put);
Scan scan = new Scan();
scan.readVersions(100);
ResultScanner scanner1 = htable1.getScanner(scan);
Result[] res1 = scanner1.next(1);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
for (int i = 0; i < NB_RETRIES; i++) {
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(1);
scanner1.close();
if (res1.length != 1) {
LOG.info("Only got " + res1.length + " rows");
Thread.sleep(SLEEP_TIME);
} else {
int cellNumber = res1[0].getColumnCells(famName, Bytes.toBytes("f1")).size();
if (cellNumber != 3) {
LOG.info("Only got " + cellNumber + " cells");
Thread.sleep(SLEEP_TIME);
} else {
break;
}
}
if (i == NB_RETRIES - 1) {
fail("Waited too much time for normal batch replication");
}
}
try {
// Disabling replication and modifying the particular version of the cell to validate the
// feature.
hbaseAdmin.disableReplicationPeer(PEER_ID);
Put put2 = new Put(Bytes.toBytes("r1"));
put2.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v99"));
htable2.put(put2);
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(NB_ROWS_IN_BATCH);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
String[] args = new String[] { "--versions=100", PEER_ID, tableName.getNameAsString() };
runVerifyReplication(args, 0, 1);
} finally {
hbaseAdmin.enableReplicationPeer(PEER_ID);
}
}
@Test
public void testVerifyReplicationPrefixFiltering() throws Exception {
final byte[] prefixRow = Bytes.toBytes("prefixrow");
final byte[] prefixRow2 = Bytes.toBytes("secondrow");
loadData("prefixrow", prefixRow);
loadData("secondrow", prefixRow2);
loadData("aaa", row);
loadData("zzz", row);
waitForReplication(NB_ROWS_IN_BATCH * 4, NB_RETRIES * 4);
String[] args =
new String[] { "--row-prefixes=prefixrow,secondrow", PEER_ID, tableName.getNameAsString() };
runVerifyReplication(args, NB_ROWS_IN_BATCH * 2, 0);
}
@Test
public void testVerifyReplicationSnapshotArguments() {
String[] args =
new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=tmp", "2",
tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotName=snapshot1", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotTmpDir=/tmp/", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotName=snapshot1", "--peerSnapshotTmpDir=/tmp/",
"--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2",
tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=/tmp/",
"--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs",
"--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
}
private void checkRestoreTmpDir(Configuration conf, String restoreTmpDir, int expectedCount)
static void checkRestoreTmpDir(Configuration conf, String restoreTmpDir, int expectedCount)
throws IOException {
FileSystem fs = FileSystem.get(conf);
FileStatus[] subDirectories = fs.listStatus(new Path(restoreTmpDir));
@ -429,67 +248,6 @@ public class TestVerifyReplication extends TestReplicationBase {
}
}
@Test
public void testVerifyReplicationWithSnapshotSupport() throws Exception {
// Populate the tables, at the same time it guarantees that the tables are
// identical since it does the check
runSmallBatchTest();
// Take source and target tables snapshot
Path rootDir = FSUtils.getRootDir(CONF1);
FileSystem fs = rootDir.getFileSystem(CONF1);
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
// Take target snapshot
Path peerRootDir = FSUtils.getRootDir(CONF2);
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
String peerFSAddress = peerFs.getUri().toString();
String temPath1 = UTIL1.getRandomDir().toString();
String temPath2 = "/tmp" + System.currentTimeMillis();
String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
"--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName,
"--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress,
"--peerHBaseRootAddress=" + FSUtils.getRootDir(CONF2), "2", tableName.getNameAsString() };
runVerifyReplication(args, NB_ROWS_IN_BATCH, 0);
checkRestoreTmpDir(CONF1, temPath1, 1);
checkRestoreTmpDir(CONF2, temPath2, 1);
Scan scan = new Scan();
ResultScanner rs = htable2.getScanner(scan);
Put put = null;
for (Result result : rs) {
put = new Put(result.getRow());
Cell firstVal = result.rawCells()[0];
put.addColumn(CellUtil.cloneFamily(firstVal), CellUtil.cloneQualifier(firstVal),
Bytes.toBytes("diff data"));
htable2.put(put);
}
Delete delete = new Delete(put.getRow());
htable2.delete(delete);
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
"--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName,
"--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress,
"--peerHBaseRootAddress=" + FSUtils.getRootDir(CONF2), "2", tableName.getNameAsString() };
runVerifyReplication(args, 0, NB_ROWS_IN_BATCH);
checkRestoreTmpDir(CONF1, temPath1, 2);
checkRestoreTmpDir(CONF2, temPath2, 2);
}
@Test
public void testVerifyRepJobWithQuorumAddress() throws Exception {
@ -580,7 +338,7 @@ public class TestVerifyReplication extends TestReplicationBase {
checkRestoreTmpDir(CONF2, tmpPath2, 2);
}
private static void runBatchCopyTest() throws Exception {
static void runBatchCopyTest() throws Exception {
// normal Batch tests for htable1
loadData("", row, noRepfamName);

View File

@ -0,0 +1,345 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.replication;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.ReplicationTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
/**
* We moved some of {@link TestVerifyReplication}'s tests here because it could take too long to
* complete. In here we have miscellaneous.
*/
@Category({ ReplicationTests.class, LargeTests.class })
public class TestVerifyReplicationAdjunct extends TestReplicationBase {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestVerifyReplicationAdjunct.class);
private static final Logger LOG = LoggerFactory.getLogger(TestVerifyReplicationAdjunct.class);
private static final String PEER_ID = "2";
private static final TableName peerTableName = TableName.valueOf("peerTest");
private static Table htable3;
@Rule
public TestName name = new TestName();
@Before
public void setUp() throws Exception {
cleanUp();
UTIL2.deleteTableData(peerTableName);
}
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TestReplicationBase.setUpBeforeClass();
TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily(
ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100)
.build()).build();
Connection connection2 = ConnectionFactory.createConnection(CONF2);
try (Admin admin2 = connection2.getAdmin()) {
admin2.createTable(peerTable, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE);
}
htable3 = connection2.getTable(peerTableName);
}
// VerifyReplication should honor versions option
@Test
public void testHBase14905() throws Exception {
// normal Batch tests
byte[] qualifierName = Bytes.toBytes("f1");
Put put = new Put(Bytes.toBytes("r1"));
long ts = System.currentTimeMillis();
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1002"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v1001"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 3, Bytes.toBytes("v1112"));
htable1.put(put);
Scan scan = new Scan();
scan.readVersions(100);
ResultScanner scanner1 = htable1.getScanner(scan);
Result[] res1 = scanner1.next(1);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
for (int i = 0; i < NB_RETRIES; i++) {
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(1);
scanner1.close();
if (res1.length != 1) {
LOG.info("Only got " + res1.length + " rows");
Thread.sleep(SLEEP_TIME);
} else {
int cellNumber = res1[0].getColumnCells(famName, Bytes.toBytes("f1")).size();
if (cellNumber != 3) {
LOG.info("Only got " + cellNumber + " cells");
Thread.sleep(SLEEP_TIME);
} else {
break;
}
}
if (i == NB_RETRIES - 1) {
fail("Waited too much time for normal batch replication");
}
}
put.addColumn(famName, qualifierName, ts + 4, Bytes.toBytes("v1111"));
htable2.put(put);
put.addColumn(famName, qualifierName, ts + 5, Bytes.toBytes("v1112"));
htable2.put(put);
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(NB_ROWS_IN_BATCH);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(5, res1[0].getColumnCells(famName, qualifierName).size());
String[] args = new String[] { "--versions=100", PEER_ID, tableName.getNameAsString() };
TestVerifyReplication.runVerifyReplication(args, 0, 1);
}
// VerifyReplication should honor versions option
@Test
public void testVersionMismatchHBase14905() throws Exception {
// normal Batch tests
byte[] qualifierName = Bytes.toBytes("f1");
Put put = new Put(Bytes.toBytes("r1"));
long ts = System.currentTimeMillis();
put.addColumn(famName, qualifierName, ts + 1, Bytes.toBytes("v1"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v2"));
htable1.put(put);
put.addColumn(famName, qualifierName, ts + 3, Bytes.toBytes("v3"));
htable1.put(put);
Scan scan = new Scan();
scan.readVersions(100);
ResultScanner scanner1 = htable1.getScanner(scan);
Result[] res1 = scanner1.next(1);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
for (int i = 0; i < NB_RETRIES; i++) {
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(1);
scanner1.close();
if (res1.length != 1) {
LOG.info("Only got " + res1.length + " rows");
Thread.sleep(SLEEP_TIME);
} else {
int cellNumber = res1[0].getColumnCells(famName, Bytes.toBytes("f1")).size();
if (cellNumber != 3) {
LOG.info("Only got " + cellNumber + " cells");
Thread.sleep(SLEEP_TIME);
} else {
break;
}
}
if (i == NB_RETRIES - 1) {
fail("Waited too much time for normal batch replication");
}
}
try {
// Disabling replication and modifying the particular version of the cell to validate the
// feature.
hbaseAdmin.disableReplicationPeer(PEER_ID);
Put put2 = new Put(Bytes.toBytes("r1"));
put2.addColumn(famName, qualifierName, ts + 2, Bytes.toBytes("v99"));
htable2.put(put2);
scan = new Scan();
scan.readVersions(100);
scanner1 = htable2.getScanner(scan);
res1 = scanner1.next(NB_ROWS_IN_BATCH);
scanner1.close();
assertEquals(1, res1.length);
assertEquals(3, res1[0].getColumnCells(famName, qualifierName).size());
String[] args = new String[] { "--versions=100", PEER_ID, tableName.getNameAsString() };
TestVerifyReplication.runVerifyReplication(args, 0, 1);
} finally {
hbaseAdmin.enableReplicationPeer(PEER_ID);
}
}
@Test
public void testVerifyReplicationPrefixFiltering() throws Exception {
final byte[] prefixRow = Bytes.toBytes("prefixrow");
final byte[] prefixRow2 = Bytes.toBytes("secondrow");
loadData("prefixrow", prefixRow);
loadData("secondrow", prefixRow2);
loadData("aaa", row);
loadData("zzz", row);
waitForReplication(NB_ROWS_IN_BATCH * 4, NB_RETRIES * 4);
String[] args =
new String[] { "--row-prefixes=prefixrow,secondrow", PEER_ID, tableName.getNameAsString() };
TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH * 2, 0);
}
@Test
public void testVerifyReplicationSnapshotArguments() {
String[] args =
new String[] { "--sourceSnapshotName=snapshot1", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotTmpDir=tmp", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=tmp", "2",
tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotName=snapshot1", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotTmpDir=/tmp/", "2", tableName.getNameAsString() };
assertFalse(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--peerSnapshotName=snapshot1", "--peerSnapshotTmpDir=/tmp/",
"--peerFSAddress=tempfs", "--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2",
tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
args = new String[] { "--sourceSnapshotName=snapshot1", "--sourceSnapshotTmpDir=/tmp/",
"--peerSnapshotName=snapshot2", "--peerSnapshotTmpDir=/tmp/", "--peerFSAddress=tempfs",
"--peerHBaseRootAddress=hdfs://tempfs:50070/hbase/", "2", tableName.getNameAsString() };
assertTrue(Lists.newArrayList(args).toString(), new VerifyReplication().doCommandLine(args));
}
@Test
public void testVerifyReplicationWithSnapshotSupport() throws Exception {
// Populate the tables, at the same time it guarantees that the tables are
// identical since it does the check
runSmallBatchTest();
// Take source and target tables snapshot
Path rootDir = FSUtils.getRootDir(CONF1);
FileSystem fs = rootDir.getFileSystem(CONF1);
String sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
// Take target snapshot
Path peerRootDir = FSUtils.getRootDir(CONF2);
FileSystem peerFs = peerRootDir.getFileSystem(CONF2);
String peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
String peerFSAddress = peerFs.getUri().toString();
String temPath1 = UTIL1.getRandomDir().toString();
String temPath2 = "/tmp" + System.currentTimeMillis();
String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
"--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName,
"--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress,
"--peerHBaseRootAddress=" + FSUtils.getRootDir(CONF2), "2", tableName.getNameAsString() };
TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH, 0);
TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 1);
TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 1);
Scan scan = new Scan();
ResultScanner rs = htable2.getScanner(scan);
Put put = null;
for (Result result : rs) {
put = new Put(result.getRow());
Cell firstVal = result.rawCells()[0];
put.addColumn(CellUtil.cloneFamily(firstVal), CellUtil.cloneQualifier(firstVal),
Bytes.toBytes("diff data"));
htable2.put(put);
}
Delete delete = new Delete(put.getRow());
htable2.delete(delete);
sourceSnapshotName = "sourceSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName,
Bytes.toString(famName), sourceSnapshotName, rootDir, fs, true);
peerSnapshotName = "peerSnapshot-" + System.currentTimeMillis();
SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), tableName,
Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true);
args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName,
"--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName,
"--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress,
"--peerHBaseRootAddress=" + FSUtils.getRootDir(CONF2), "2", tableName.getNameAsString() };
TestVerifyReplication.runVerifyReplication(args, 0, NB_ROWS_IN_BATCH);
TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 2);
TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 2);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
htable3.close();
TestReplicationBase.tearDownAfterClass();
}
}

View File

@ -114,7 +114,7 @@ public class TestExportSnapshot {
emptySnapshotName = "emptySnaptb0-" + testName.getMethodName();
// create Table
createTable();
createTable(this.tableName);
// Take an empty snapshot
admin.snapshot(emptySnapshotName, tableName);
@ -127,7 +127,7 @@ public class TestExportSnapshot {
admin.snapshot(snapshotName, tableName);
}
protected void createTable() throws Exception {
protected void createTable(TableName tableName) throws Exception {
SnapshotTestingUtils.createPreSplitTable(TEST_UTIL, tableName, 2, FAMILY);
}
@ -171,7 +171,7 @@ public class TestExportSnapshot {
@Test
public void testConsecutiveExports() throws Exception {
Path copyDir = getLocalDestinationDir();
Path copyDir = getLocalDestinationDir(TEST_UTIL);
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, false);
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, true);
removeExportDir(copyDir);
@ -253,36 +253,6 @@ public class TestExportSnapshot {
assertEquals(filesExpected, snapshotFiles.size());
}
/**
* Check that ExportSnapshot will succeed if something fails but the retry succeed.
*/
@Test
public void testExportRetry() throws Exception {
Path copyDir = getLocalDestinationDir();
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true);
conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2);
conf.setInt("mapreduce.map.maxattempts", 3);
testExportFileSystemState(conf, tableName, snapshotName, snapshotName, tableNumFiles,
TEST_UTIL.getDefaultRootDirPath(), copyDir, true, getBypassRegionPredicate(), true);
}
/**
* Check that ExportSnapshot will fail if we inject failure more times than MR will retry.
*/
@Test
public void testExportFailure() throws Exception {
Path copyDir = getLocalDestinationDir();
FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration());
copyDir = copyDir.makeQualified(fs);
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true);
conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4);
conf.setInt("mapreduce.map.maxattempts", 3);
testExportFileSystemState(conf, tableName, snapshotName, snapshotName, tableNumFiles,
TEST_UTIL.getDefaultRootDirPath(), copyDir, true, getBypassRegionPredicate(), false);
}
/*
* verify if the snapshot folder on file-system 1 match the one on file-system 2
*/
@ -291,12 +261,6 @@ public class TestExportSnapshot {
assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2));
}
protected Set<String> verifySnapshot(final FileSystem fs, final Path rootDir,
final TableName tableName, final String snapshotName) throws IOException {
return verifySnapshot(TEST_UTIL.getConfiguration(), fs, rootDir, tableName,
snapshotName, getBypassRegionPredicate());
}
/*
* Verify if the files exists
*/
@ -364,10 +328,10 @@ public class TestExportSnapshot {
return path;
}
private Path getLocalDestinationDir() {
Path path = TEST_UTIL.getDataTestDir("local-export-" + System.currentTimeMillis());
static Path getLocalDestinationDir(HBaseTestingUtility htu) {
Path path = htu.getDataTestDir("local-export-" + System.currentTimeMillis());
try {
FileSystem fs = FileSystem.getLocal(TEST_UTIL.getConfiguration());
FileSystem fs = FileSystem.getLocal(htu.getConfiguration());
LOG.info("Local export destination path: " + path);
return path.makeQualified(fs.getUri(), fs.getWorkingDirectory());
} catch (IOException ioe) {

View File

@ -0,0 +1,144 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but
* the test suite ran too close to the maximum time limit so we split these out. Uses
* facility from TestExportSnapshot where possible.
* @see TestExportSnapshot
*/
@Category({VerySlowMapReduceTests.class, LargeTests.class})
public class TestExportSnapshotAdjunct {
private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotAdjunct.class);
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestExportSnapshotAdjunct.class);
@Rule
public final TestName testName = new TestName();
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected TableName tableName;
private String emptySnapshotName;
private String snapshotName;
private int tableNumFiles;
private Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TestExportSnapshot.setUpBaseConf(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(3);
TEST_UTIL.startMiniMapReduceCluster();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniMapReduceCluster();
TEST_UTIL.shutdownMiniCluster();
}
/**
* Create a table and take a snapshot of the table used by the export test.
*/
@Before
public void setUp() throws Exception {
this.admin = TEST_UTIL.getAdmin();
tableName = TableName.valueOf("testtb-" + testName.getMethodName());
snapshotName = "snaptb0-" + testName.getMethodName();
emptySnapshotName = "emptySnaptb0-" + testName.getMethodName();
// Create Table
SnapshotTestingUtils.createPreSplitTable(TEST_UTIL, tableName, 2, TestExportSnapshot.FAMILY);
// Take an empty snapshot
admin.snapshot(emptySnapshotName, tableName);
// Add some rows
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50,
TestExportSnapshot.FAMILY);
tableNumFiles = admin.getRegions(tableName).size();
// take a snapshot
admin.snapshot(snapshotName, tableName);
}
@After
public void tearDown() throws Exception {
TEST_UTIL.deleteTable(tableName);
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
}
/**
* Check that ExportSnapshot will succeed if something fails but the retry succeed.
*/
@Test
public void testExportRetry() throws Exception {
Path copyDir = TestExportSnapshot.getLocalDestinationDir(TEST_UTIL);
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true);
conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2);
conf.setInt("mapreduce.map.maxattempts", 3);
TestExportSnapshot.testExportFileSystemState(conf, tableName,
Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName),
tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true,
null, true);
}
/**
* Check that ExportSnapshot will fail if we inject failure more times than MR will retry.
*/
@Test
public void testExportFailure() throws Exception {
Path copyDir = TestExportSnapshot.getLocalDestinationDir(TEST_UTIL);
FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration());
copyDir = copyDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true);
conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4);
conf.setInt("mapreduce.map.maxattempts", 3);
TestExportSnapshot.testExportFileSystemState(conf, tableName,
Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName),
tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, false);
}
}

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobUtils;
@ -51,7 +52,7 @@ public class TestMobExportSnapshot extends TestExportSnapshot {
}
@Override
protected void createTable() throws Exception {
protected void createTable(TableName tableName) throws Exception {
MobSnapshotTestingUtils.createPreSplitMobTable(TEST_UTIL, tableName, 2, FAMILY);
}