HBASE-14466 Remove duplicated code from MOB snapshot tests

This commit is contained in:
Matteo Bertozzi 2015-09-23 10:43:31 -07:00
parent 5aab961a72
commit 5e26ae043d
18 changed files with 318 additions and 2490 deletions

View File

@ -46,21 +46,20 @@ import org.junit.experimental.categories.Category;
public class TestCloneSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestCloneSnapshotFromClient.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final byte[] FAMILY = Bytes.toBytes("cf");
protected final byte[] FAMILY = Bytes.toBytes("cf");
private byte[] emptySnapshot;
private byte[] snapshotName0;
private byte[] snapshotName1;
private byte[] snapshotName2;
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private Admin admin;
protected byte[] emptySnapshot;
protected byte[] snapshotName0;
protected byte[] snapshotName1;
protected byte[] snapshotName2;
protected TableName tableName;
protected int snapshot0Rows;
protected int snapshot1Rows;
protected Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
protected static void setupConfiguration() {
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
@ -69,6 +68,11 @@ public class TestCloneSnapshotFromClient {
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
TEST_UTIL.getConfiguration().setBoolean(
"hbase.master.enabletable.roundrobin", true);
}
@BeforeClass
public static void setUpBeforeClass() throws Exception {
setupConfiguration();
TEST_UTIL.startMiniCluster(3);
}
@ -93,6 +97,10 @@ public class TestCloneSnapshotFromClient {
snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
createTableAndSnapshots();
}
protected void createTableAndSnapshots() throws Exception {
// create Table and disable it
SnapshotTestingUtils.createTable(TEST_UTIL, tableName, getNumReplicas(), FAMILY);
admin.disableTable(tableName);
@ -164,7 +172,7 @@ public class TestCloneSnapshotFromClient {
int snapshotRows) throws IOException, InterruptedException {
// create a new table from snapshot
admin.cloneSnapshot(snapshotName, tableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshotRows);
verifyRowCount(TEST_UTIL, tableName, snapshotRows);
verifyReplicasCameOnline(tableName);
TEST_UTIL.deleteTable(tableName);
@ -193,7 +201,7 @@ public class TestCloneSnapshotFromClient {
// Clone a table from the first snapshot
TableName clonedTableName = TableName.valueOf("clonedtb1-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
// Take a snapshot of this cloned table.
admin.disableTable(clonedTableName);
@ -202,7 +210,7 @@ public class TestCloneSnapshotFromClient {
// Clone the snapshot of the cloned table
TableName clonedTableName2 = TableName.valueOf("clonedtb2-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName2, clonedTableName2);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
admin.disableTable(clonedTableName2);
// Remove the original table
@ -211,11 +219,11 @@ public class TestCloneSnapshotFromClient {
// Verify the first cloned table
admin.enableTable(clonedTableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
// Verify the second cloned table
admin.enableTable(clonedTableName2);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
admin.disableTable(clonedTableName2);
// Delete the first cloned table
@ -224,12 +232,12 @@ public class TestCloneSnapshotFromClient {
// Verify the second cloned table
admin.enableTable(clonedTableName2);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
// Clone a new table from cloned
TableName clonedTableName3 = TableName.valueOf("clonedtb3-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName2, clonedTableName3);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows);
// Delete the cloned tables
TEST_UTIL.deleteTable(clonedTableName2);
@ -244,4 +252,9 @@ public class TestCloneSnapshotFromClient {
private void waitCleanerRun() throws InterruptedException {
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
}
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
SnapshotTestingUtils.verifyRowCount(util, tableName, expectedRows);
}
}

View File

@ -22,80 +22,35 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test clone snapshots from the client
*/
@Category({LargeTests.class, ClientTests.class})
public class TestMobCloneSnapshotFromClient {
final Log LOG = LogFactory.getLog(getClass());
public class TestMobCloneSnapshotFromClient extends TestCloneSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestMobCloneSnapshotFromClient.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final byte[] FAMILY = Bytes.toBytes("cf");
private byte[] emptySnapshot;
private byte[] snapshotName0;
private byte[] snapshotName1;
private byte[] snapshotName2;
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private Admin admin;
protected static void setupConfiguration() {
TestCloneSnapshotFromClient.setupConfiguration();
TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
TEST_UTIL.getConfiguration().setBoolean(
"hbase.master.enabletable.roundrobin", true);
TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
setupConfiguration();
TEST_UTIL.startMiniCluster(3);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
/**
* Initialize the tests with a table filled with some data
* and two snapshots (snapshotName0, snapshotName1) of different states.
* The tableName, snapshotNames and the number of rows in the snapshot are initialized.
*/
@Before
public void setup() throws Exception {
this.admin = TEST_UTIL.getHBaseAdmin();
long tid = System.currentTimeMillis();
tableName = TableName.valueOf("testtb-" + tid);
emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
@Override
protected void createTableAndSnapshots() throws Exception {
// create Table and disable it
MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, getNumReplicas(), FAMILY);
admin.disableTable(tableName);
@ -131,122 +86,9 @@ public class TestMobCloneSnapshotFromClient {
}
}
protected int getNumReplicas() {
return 1;
}
@After
public void tearDown() throws Exception {
if (admin.tableExists(tableName)) {
TEST_UTIL.deleteTable(tableName);
}
SnapshotTestingUtils.deleteAllSnapshots(admin);
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
}
@Test(expected=SnapshotDoesNotExistException.class)
public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
String snapshotName = "random-snapshot-" + System.currentTimeMillis();
TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName, tableName);
}
@Test(expected = NamespaceNotFoundException.class)
public void testCloneOnMissingNamespace() throws IOException, InterruptedException {
TableName clonedTableName = TableName.valueOf("unknownNS:clonetb");
admin.cloneSnapshot(snapshotName1, clonedTableName);
}
@Test
public void testCloneSnapshot() throws IOException, InterruptedException {
TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
testCloneSnapshot(clonedTableName, emptySnapshot, 0);
}
private void testCloneSnapshot(final TableName tableName, final byte[] snapshotName,
int snapshotRows) throws IOException, InterruptedException {
// create a new table from snapshot
admin.cloneSnapshot(snapshotName, tableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshotRows);
verifyReplicasCameOnline(tableName);
TEST_UTIL.deleteTable(tableName);
}
protected void verifyReplicasCameOnline(TableName tableName) throws IOException {
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
}
@Test
public void testCloneSnapshotCrossNamespace() throws IOException, InterruptedException {
String nsName = "testCloneSnapshotCrossNamespace";
admin.createNamespace(NamespaceDescriptor.create(nsName).build());
TableName clonedTableName =
TableName.valueOf(nsName, "clonedtb-" + System.currentTimeMillis());
testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
testCloneSnapshot(clonedTableName, emptySnapshot, 0);
}
/**
* Verify that tables created from the snapshot are still alive after source table deletion.
*/
@Test
public void testCloneLinksAfterDelete() throws IOException, InterruptedException {
// Clone a table from the first snapshot
TableName clonedTableName = TableName.valueOf("clonedtb1-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
// Take a snapshot of this cloned table.
admin.disableTable(clonedTableName);
admin.snapshot(snapshotName2, clonedTableName);
// Clone the snapshot of the cloned table
TableName clonedTableName2 = TableName.valueOf("clonedtb2-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName2, clonedTableName2);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
admin.disableTable(clonedTableName2);
// Remove the original table
TEST_UTIL.deleteTable(tableName);
waitCleanerRun();
// Verify the first cloned table
admin.enableTable(clonedTableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
// Verify the second cloned table
admin.enableTable(clonedTableName2);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
admin.disableTable(clonedTableName2);
// Delete the first cloned table
TEST_UTIL.deleteTable(clonedTableName);
waitCleanerRun();
// Verify the second cloned table
admin.enableTable(clonedTableName2);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
// Clone a new table from cloned
TableName clonedTableName3 = TableName.valueOf("clonedtb3-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName2, clonedTableName3);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows);
// Delete the cloned tables
TEST_UTIL.deleteTable(clonedTableName2);
TEST_UTIL.deleteTable(clonedTableName3);
admin.deleteSnapshot(snapshotName2);
}
// ==========================================================================
// Helpers
// ==========================================================================
private void waitCleanerRun() throws InterruptedException {
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
@Override
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
MobSnapshotTestingUtils.verifyMobRowCount(util, tableName, expectedRows);
}
}

View File

@ -27,280 +27,59 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test restore snapshots from the client
*/
@Category({ClientTests.class, LargeTests.class})
public class TestMobRestoreSnapshotFromClient {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final byte[] FAMILY = Bytes.toBytes("cf");
private byte[] emptySnapshot;
private byte[] snapshotName0;
private byte[] snapshotName1;
private byte[] snapshotName2;
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private Admin admin;
public class TestMobRestoreSnapshotFromClient extends TestRestoreSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestMobRestoreSnapshotFromClient.class);
@BeforeClass
public static void setUpBeforeClass() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
TEST_UTIL.getConfiguration().setBoolean(
"hbase.master.enabletable.roundrobin", true);
TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
public static void setupCluster() throws Exception {
setupConf(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(3);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
protected static void setupConf(Configuration conf) {
TestRestoreSnapshotFromClient.setupConf(conf);
TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
/**
* Initialize the tests with a table filled with some data
* and two snapshots (snapshotName0, snapshotName1) of different states.
* The tableName, snapshotNames and the number of rows in the snapshot are initialized.
*/
@Before
public void setup() throws Exception {
this.admin = TEST_UTIL.getHBaseAdmin();
long tid = System.currentTimeMillis();
tableName =
TableName.valueOf("testtb-" + tid);
emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
// create Table and disable it
@Override
protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, getNumReplicas(), FAMILY);
admin.disableTable(tableName);
// take an empty snapshot
admin.snapshot(emptySnapshot, tableName);
Table table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
.getTable(tableName);
// enable table and insert data
admin.enableTable(tableName);
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
snapshot0Rows = MobSnapshotTestingUtils.countMobRows(table);
admin.disableTable(tableName);
// take a snapshot
admin.snapshot(snapshotName0, tableName);
// enable table and insert more data
admin.enableTable(tableName);
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
snapshot1Rows = MobSnapshotTestingUtils.countMobRows(table);
table.close();
}
@After
public void tearDown() throws Exception {
TEST_UTIL.deleteTable(tableName);
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
}
@Test
public void testRestoreSnapshot() throws IOException {
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot1Rows);
admin.disableTable(tableName);
admin.snapshot(snapshotName1, tableName);
// Restore from snapshot-0
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from emptySnapshot
admin.disableTable(tableName);
admin.restoreSnapshot(emptySnapshot);
admin.enableTable(tableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, 0);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName1);
admin.enableTable(tableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
TEST_UTIL.deleteTable(tableName);
admin.restoreSnapshot(snapshotName1);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
}
protected int getNumReplicas() {
return 1;
}
@Test
public void testRestoreSchemaChange() throws Exception {
byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
Table table = ConnectionFactory.createConnection(TEST_UTIL.getConfiguration())
.getTable(tableName);
// Add one column family and put some data in it
admin.disableTable(tableName);
@Override
protected HColumnDescriptor getTestRestoreSchemaChangeHCD() {
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY2);
hcd.setMobEnabled(true);
hcd.setMobThreshold(3L);
admin.addColumn(tableName, hcd);
admin.enableTable(tableName);
assertEquals(2, table.getTableDescriptor().getFamilies().size());
HTableDescriptor htd = admin.getTableDescriptor(tableName);
assertEquals(2, htd.getFamilies().size());
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, TEST_FAMILY2);
long snapshot2Rows = snapshot1Rows + 500;
assertEquals(snapshot2Rows, MobSnapshotTestingUtils.countMobRows(table));
assertEquals(500, MobSnapshotTestingUtils.countMobRows(table, TEST_FAMILY2));
Set<String> fsFamilies = getFamiliesFromFS(tableName);
assertEquals(2, fsFamilies.size());
// Take a snapshot
admin.disableTable(tableName);
admin.snapshot(snapshotName2, tableName);
// Restore the snapshot (without the cf)
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
assertEquals(1, table.getTableDescriptor().getFamilies().size());
try {
MobSnapshotTestingUtils.countMobRows(table, TEST_FAMILY2);
fail("family '" + Bytes.toString(TEST_FAMILY2) + "' should not exists");
} catch (NoSuchColumnFamilyException e) {
// expected
}
assertEquals(snapshot0Rows, MobSnapshotTestingUtils.countMobRows(table));
htd = admin.getTableDescriptor(tableName);
assertEquals(1, htd.getFamilies().size());
fsFamilies = getFamiliesFromFS(tableName);
assertEquals(1, fsFamilies.size());
// Restore back the snapshot (with the cf)
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName2);
admin.enableTable(tableName);
htd = admin.getTableDescriptor(tableName);
assertEquals(2, htd.getFamilies().size());
assertEquals(2, table.getTableDescriptor().getFamilies().size());
assertEquals(500, MobSnapshotTestingUtils.countMobRows(table, TEST_FAMILY2));
assertEquals(snapshot2Rows, MobSnapshotTestingUtils.countMobRows(table));
fsFamilies = getFamiliesFromFS(tableName);
assertEquals(2, fsFamilies.size());
table.close();
return hcd;
}
@Test
public void testCloneSnapshotOfCloned() throws IOException, InterruptedException {
TableName clonedTableName =
TableName.valueOf("clonedtb-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
admin.disableTable(clonedTableName);
admin.snapshot(snapshotName2, clonedTableName);
TEST_UTIL.deleteTable(clonedTableName);
waitCleanerRun();
admin.cloneSnapshot(snapshotName2, clonedTableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
TEST_UTIL.deleteTable(clonedTableName);
@Override
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
MobSnapshotTestingUtils.verifyMobRowCount(util, tableName, expectedRows);
}
@Test
public void testCloneAndRestoreSnapshot() throws IOException, InterruptedException {
TEST_UTIL.deleteTable(tableName);
waitCleanerRun();
admin.cloneSnapshot(snapshotName0, tableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
waitCleanerRun();
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
MobSnapshotTestingUtils.verifyMobRowCount(TEST_UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
}
@Test
public void testCorruptedSnapshot() throws IOException, InterruptedException {
SnapshotTestingUtils.corruptSnapshot(TEST_UTIL, Bytes.toString(snapshotName0));
TableName cloneName = TableName.valueOf("corruptedClone-" + System.currentTimeMillis());
try {
admin.cloneSnapshot(snapshotName0, cloneName);
fail("Expected CorruptedSnapshotException, got succeeded cloneSnapshot()");
} catch (CorruptedSnapshotException e) {
// Got the expected corruption exception.
// check for no references of the cloned table.
assertFalse(admin.tableExists(cloneName));
} catch (Exception e) {
fail("Expected CorruptedSnapshotException got: " + e);
}
}
// ==========================================================================
// Helpers
// ==========================================================================
private void waitCleanerRun() throws InterruptedException {
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
}
private Set<String> getFamiliesFromFS(final TableName tableName) throws IOException {
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
Set<String> families = new HashSet<String>();
Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) {
for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) {
families.add(familyDir.getName());
}
}
return families;
@Override
protected int countRows(final Table table, final byte[]... families) throws IOException {
return MobSnapshotTestingUtils.countMobRows(table, families);
}
}

View File

@ -23,48 +23,20 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test to verify that the cloned table is independent of the table from which it was cloned
*/
@Category(LargeTests.class)
public class TestMobSnapshotCloneIndependence {
private static final Log LOG = LogFactory.getLog(TestSnapshotCloneIndependence.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
private static final String TEST_FAM_STR = "fam";
private static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
private static final byte[] TABLE_NAME = Bytes.toBytes(STRING_TABLE_NAME);
public class TestMobSnapshotCloneIndependence extends TestSnapshotCloneIndependence {
private static final Log LOG = LogFactory.getLog(TestMobSnapshotCloneIndependence.class);
/**
* Setup the config for the cluster and start it
@ -76,360 +48,23 @@ public class TestMobSnapshotCloneIndependence {
UTIL.startMiniCluster(NUM_RS);
}
private static void setupConf(Configuration conf) {
// enable snapshot support
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around
// some files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
conf.setInt("hbase.regionserver.msginterval", 100);
conf.setBoolean("hbase.master.enabletable.roundrobin", true);
// Avoid potentially aggressive splitting which would cause snapshot to fail
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
protected static void setupConf(Configuration conf) {
TestSnapshotCloneIndependence.setupConf(conf);
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, SnapshotHFileCleaner.class.getName() + ","
+ HFileLinkCleaner.class.getName());
}
@Before
public void setup() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, TableName.valueOf(STRING_TABLE_NAME), TEST_FAM);
@Override
protected Table createTable(final TableName table, byte[] family) throws Exception {
return MobSnapshotTestingUtils.createMobTable(UTIL, table, family);
}
@After
public void tearDown() throws Exception {
UTIL.deleteTable(TABLE_NAME);
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
@Override
public void loadData(final Table table, byte[]... families) throws Exception {
SnapshotTestingUtils.loadData(UTIL, table.getName(), 1000, families);
}
@AfterClass
public static void cleanupTest() throws Exception {
try {
UTIL.shutdownMiniCluster();
} catch (Exception e) {
LOG.warn("failure shutting down cluster", e);
}
}
/**
* Verify that adding data to the cloned table will not affect the original, and vice-versa when
* it is taken as an online snapshot.
*/
@Test (timeout=300000)
public void testOnlineSnapshotAppendIndependent() throws Exception {
runTestSnapshotAppendIndependent(true);
}
/**
* Verify that adding data to the cloned table will not affect the original, and vice-versa when
* it is taken as an offline snapshot.
*/
@Test (timeout=300000)
public void testOfflineSnapshotAppendIndependent() throws Exception {
runTestSnapshotAppendIndependent(false);
}
/**
* Verify that adding metadata to the cloned table will not affect the original, and vice-versa
* when it is taken as an online snapshot.
*/
@Test (timeout=300000)
public void testOnlineSnapshotMetadataChangesIndependent() throws Exception {
runTestSnapshotMetadataChangesIndependent(true);
}
/**
* Verify that adding netadata to the cloned table will not affect the original, and vice-versa
* when is taken as an online snapshot.
*/
@Test (timeout=300000)
public void testOfflineSnapshotMetadataChangesIndependent() throws Exception {
runTestSnapshotMetadataChangesIndependent(false);
}
/**
* Verify that region operations, in this case splitting a region, are independent between the
* cloned table and the original.
*/
@Test (timeout=300000)
public void testOfflineSnapshotRegionOperationsIndependent() throws Exception {
runTestRegionOperationsIndependent(false);
}
/**
* Verify that region operations, in this case splitting a region, are independent between the
* cloned table and the original.
*/
@Test (timeout=300000)
public void testOnlineSnapshotRegionOperationsIndependent() throws Exception {
runTestRegionOperationsIndependent(true);
}
/**
* Verify the mob cells still exist after the table to be cloned is deleted.
*/
@Test (timeout=300000)
public void testDeleteTableToBeCloned() throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
TableName tn = TableName.valueOf("testDeleteTableToBeCloned");
byte[] qf = Bytes.toBytes("qf");
MobSnapshotTestingUtils.createMobTable(UTIL, tn, TEST_FAM);
String row = "row";
String value = "value";
Put put = new Put(Bytes.toBytes(row));
put.addColumn(TEST_FAM, qf, Bytes.toBytes(value));
Admin admin = UTIL.getHBaseAdmin();
BufferedMutator mutator = UTIL.getConnection().getBufferedMutator(tn);
mutator.mutate(put);
mutator.flush();
admin.flush(tn);
// Take a snapshot
final String snapshotNameAsString = "snapshot_" + tn;
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(tn);
Table clonedTable = null;
try {
SnapshotTestingUtils.createSnapshotAndValidate(admin, tn, TEST_FAM_STR, snapshotNameAsString,
rootDir, fs, true);
TableName cloneTableName = TableName.valueOf("test-clone-" + tn);
admin.cloneSnapshot(snapshotName, cloneTableName);
clonedTable = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(
cloneTableName);
admin.deleteSnapshot(snapshotName);
admin.disableTable(tn);
admin.deleteTable(tn);
// run the cleaner
UTIL.getHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
// make sure the mob cell exists
Scan scan = new Scan();
ResultScanner scanner = clonedTable.getScanner(scan);
Result rs = scanner.next();
Cell cell = rs.getColumnLatestCell(TEST_FAM, qf);
Assert.assertEquals(value, Bytes.toString(CellUtil.cloneValue(cell)));
Assert.assertNull(scanner.next());
} finally {
table.close();
if (clonedTable != null) {
clonedTable.close();
}
}
}
private static void waitOnSplit(Connection c, final Table t, int originalCount) throws Exception {
for (int i = 0; i < 200; i++) {
try {
Thread.sleep(50);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
}
try (RegionLocator locator = c.getRegionLocator(t.getName())) {
if (locator.getAllRegionLocations().size() > originalCount) {
return;
}
}
}
throw new Exception("Split did not increase the number of regions");
}
/*
* Take a snapshot of a table, add data, and verify that this only
* affects one table
* @param online - Whether the table is online or not during the snapshot
*/
private void runTestSnapshotAppendIndependent(boolean online) throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
try {
SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
final int origTableRowCount = MobSnapshotTestingUtils.countMobRows(original);
// Take a snapshot
final String snapshotNameAsString = "snapshot_" + localTableName;
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
snapshotNameAsString, rootDir, fs, online);
if (!online) {
admin.enableTable(localTableName);
}
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
admin.cloneSnapshot(snapshotName, cloneTableName);
Table clonedTable = ConnectionFactory.createConnection(UTIL.getConfiguration())
.getTable(cloneTableName);
try {
final int clonedTableRowCount = MobSnapshotTestingUtils.countMobRows(clonedTable);
Assert.assertEquals(
"The line counts of original and cloned tables do not match after clone. ",
origTableRowCount, clonedTableRowCount);
// Attempt to add data to the test
final String rowKey = "new-row-" + System.currentTimeMillis();
Put p = new Put(Bytes.toBytes(rowKey));
p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
original.put(p);
// Verify that it is not present in the original table
Assert.assertEquals("The row count of the original table was not modified by the put",
origTableRowCount + 1, MobSnapshotTestingUtils.countMobRows(original));
Assert.assertEquals(
"The row count of the cloned table changed as a result of addition to the original",
clonedTableRowCount, MobSnapshotTestingUtils.countMobRows(clonedTable));
p = new Put(Bytes.toBytes(rowKey));
p.addColumn(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
clonedTable.put(p);
// Verify that the new family is not in the restored table's description
Assert.assertEquals(
"The row count of the original table was modified by the put to the clone",
origTableRowCount + 1, MobSnapshotTestingUtils.countMobRows(original));
Assert.assertEquals("The row count of the cloned table was not modified by the put",
clonedTableRowCount + 1, MobSnapshotTestingUtils.countMobRows(clonedTable));
} finally {
clonedTable.close();
}
} finally {
original.close();
}
}
/*
* Take a snapshot of a table, do a split, and verify that this only affects one table
* @param online - Whether the table is online or not during the snapshot
*/
private void runTestRegionOperationsIndependent(boolean online) throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
final int loadedTableCount = MobSnapshotTestingUtils.countMobRows(original);
System.out.println("Original table has: " + loadedTableCount + " rows");
final String snapshotNameAsString = "snapshot_" + localTableName;
// Create a snapshot
SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
snapshotNameAsString, rootDir, fs, online);
if (!online) {
admin.enableTable(localTableName);
}
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
admin.cloneSnapshot(snapshotName, cloneTableName);
// Verify that region information is the same pre-split
((HTable)original).clearRegionCache();
List<HRegionInfo> originalTableHRegions = admin.getTableRegions(localTableName);
final int originalRegionCount = originalTableHRegions.size();
final int cloneTableRegionCount = admin.getTableRegions(cloneTableName).size();
Assert.assertEquals(
"The number of regions in the cloned table is different than in the original table.",
originalRegionCount, cloneTableRegionCount);
// Split a region on the parent table
admin.splitRegion(originalTableHRegions.get(0).getRegionName());
waitOnSplit(UTIL.getConnection(), original, originalRegionCount);
// Verify that the cloned table region is not split
final int cloneTableRegionCount2 = admin.getTableRegions(cloneTableName).size();
Assert.assertEquals(
"The number of regions in the cloned table changed though none of its regions were split.",
cloneTableRegionCount, cloneTableRegionCount2);
}
/*
* Take a snapshot of a table, add metadata, and verify that this only
* affects one table
* @param online - Whether the table is online or not during the snapshot
*/
private void runTestSnapshotMetadataChangesIndependent(boolean online) throws Exception {
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
// Create a table
Admin admin = UTIL.getHBaseAdmin();
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
Table original = MobSnapshotTestingUtils.createMobTable(UTIL, localTableName, TEST_FAM);
SnapshotTestingUtils.loadData(UTIL, localTableName, 500, TEST_FAM);
final String snapshotNameAsString = "snapshot_" + localTableName;
// Create a snapshot
SnapshotTestingUtils.createSnapshotAndValidate(admin, localTableName, TEST_FAM_STR,
snapshotNameAsString, rootDir, fs, online);
if (!online) {
admin.enableTable(localTableName);
}
TableName cloneTableName = TableName.valueOf("test-clone-" + localTableName);
// Clone the snapshot
byte[] snapshotName = Bytes.toBytes(snapshotNameAsString);
admin.cloneSnapshot(snapshotName, cloneTableName);
// Add a new column family to the original table
byte[] TEST_FAM_2 = Bytes.toBytes("fam2");
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAM_2);
admin.disableTable(localTableName);
admin.addColumn(localTableName, hcd);
// Verify that it is not in the snapshot
admin.enableTable(localTableName);
// get a description of the cloned table
// get a list of its families
// assert that the family is there
HTableDescriptor originalTableDescriptor = original.getTableDescriptor();
HTableDescriptor clonedTableDescriptor = admin.getTableDescriptor(cloneTableName);
Assert.assertTrue("The original family was not found. There is something wrong. ",
originalTableDescriptor.hasFamily(TEST_FAM));
Assert.assertTrue("The original family was not found in the clone. There is something wrong. ",
clonedTableDescriptor.hasFamily(TEST_FAM));
Assert.assertTrue("The new family was not found. ",
originalTableDescriptor.hasFamily(TEST_FAM_2));
Assert.assertTrue("The new family was not found. ",
!clonedTableDescriptor.hasFamily(TEST_FAM_2));
@Override
protected int countRows(final Table table, final byte[]... families) throws Exception {
return MobSnapshotTestingUtils.countMobRows(table, families);
}
}

View File

@ -17,37 +17,14 @@
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import com.google.common.collect.Lists;
@ -58,14 +35,8 @@ import com.google.common.collect.Lists;
* This is an end-to-end test for the snapshot utility
*/
@Category({LargeTests.class, ClientTests.class})
public class TestMobSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestSnapshotFromClient.class);
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
protected static final TableName TABLE_NAME =
TableName.valueOf(STRING_TABLE_NAME);
public class TestMobSnapshotFromClient extends TestSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestMobSnapshotFromClient.class);
/**
* Setup the config for the cluster
@ -77,229 +48,13 @@ public class TestMobSnapshotFromClient {
UTIL.startMiniCluster(NUM_RS);
}
private static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
protected static void setupConf(Configuration conf) {
TestSnapshotFromClient.setupConf(conf);
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@Before
public void setup() throws Exception {
@Override
protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, getNumReplicas(), TEST_FAM);
}
protected int getNumReplicas() {
return 1;
}
@After
public void tearDown() throws Exception {
UTIL.deleteTable(TABLE_NAME);
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
}
@AfterClass
public static void cleanupTest() throws Exception {
try {
UTIL.shutdownMiniCluster();
} catch (Exception e) {
LOG.warn("failure shutting down cluster", e);
}
}
/**
* Test snapshotting not allowed hbase:meta and -ROOT-
* @throws Exception
*/
@Test (timeout=300000)
public void testMetaTablesSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
byte[] snapshotName = Bytes.toBytes("metaSnapshot");
try {
admin.snapshot(snapshotName, TableName.META_TABLE_NAME);
fail("taking a snapshot of hbase:meta should not be allowed");
} catch (IllegalArgumentException e) {
// expected
}
}
/**
* Test HBaseAdmin#deleteSnapshots(String) which deletes snapshots whose names match the parameter
*
* @throws Exception
*/
@Test (timeout=300000)
public void testSnapshotDeletionWithRegex() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
UTIL.loadTable(table, TEST_FAM);
table.close();
byte[] snapshot1 = Bytes.toBytes("TableSnapshot1");
admin.snapshot(snapshot1, TABLE_NAME);
LOG.debug("Snapshot1 completed.");
byte[] snapshot2 = Bytes.toBytes("TableSnapshot2");
admin.snapshot(snapshot2, TABLE_NAME);
LOG.debug("Snapshot2 completed.");
String snapshot3 = "3rdTableSnapshot";
admin.snapshot(Bytes.toBytes(snapshot3), TABLE_NAME);
LOG.debug(snapshot3 + " completed.");
// delete the first two snapshots
admin.deleteSnapshots("TableSnapshot.*");
List<SnapshotDescription> snapshots = admin.listSnapshots();
assertEquals(1, snapshots.size());
assertEquals(snapshots.get(0).getName(), snapshot3);
admin.deleteSnapshot(snapshot3);
admin.close();
}
/**
* Test snapshotting a table that is offline
* @throws Exception
*/
@Test (timeout=300000)
public void testOfflineTableSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
UTIL.loadTable(table, TEST_FAM, false);
LOG.debug("FS state before disable:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// XXX if this is flakey, might want to consider using the async version and looping as
// disableTable can succeed and still timeout.
admin.disableTable(TABLE_NAME);
LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the disabled table
final String SNAPSHOT_NAME = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
SnapshotDescription desc = SnapshotDescription.newBuilder()
.setType(SnapshotDescription.Type.DISABLED)
.setTable(STRING_TABLE_NAME)
.setName(SNAPSHOT_NAME)
.setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION)
.build();
admin.snapshot(desc);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
SnapshotTestingUtils.assertNoSnapshots(admin);
}
@Test (timeout=300000)
public void testSnapshotFailsOnNonExistantTable() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
String tableName = "_not_a_table";
// make sure the table doesn't exist
boolean fail = false;
do {
try {
admin.getTableDescriptor(TableName.valueOf(tableName));
fail = true;
LOG.error("Table:" + tableName + " already exists, checking a new name");
tableName = tableName+"!";
} catch (TableNotFoundException e) {
fail = false;
}
} while (fail);
// snapshot the non-existant table
try {
admin.snapshot("fail", TableName.valueOf(tableName));
fail("Snapshot succeeded even though there is not table.");
} catch (SnapshotCreationException e) {
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
}
}
@Test (timeout=300000)
public void testOfflineTableSnapshotWithEmptyRegions() throws Exception {
// test with an empty table with one region
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
LOG.debug("FS state before disable:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
admin.disableTable(TABLE_NAME);
LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the disabled table
byte[] snapshot = Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegions");
admin.snapshot(snapshot, TABLE_NAME);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
List<byte[]> nonEmptyCfs = Lists.newArrayList();
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, nonEmptyCfs, emptyCfs,
rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
SnapshotTestingUtils.assertNoSnapshots(admin);
}
}

View File

@ -27,6 +27,7 @@ import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility;
@ -56,21 +57,27 @@ import org.junit.experimental.categories.Category;
public class TestRestoreSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestRestoreSnapshotFromClient.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final byte[] FAMILY = Bytes.toBytes("cf");
protected final byte[] FAMILY = Bytes.toBytes("cf");
protected final byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
protected TableName tableName;
private byte[] emptySnapshot;
private byte[] snapshotName0;
private byte[] snapshotName1;
private byte[] snapshotName2;
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
public static void setupCluster() throws Exception {
setupConf(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(3);
}
protected static void setupConf(Configuration conf) {
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
@ -79,7 +86,6 @@ public class TestRestoreSnapshotFromClient {
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
TEST_UTIL.getConfiguration().setBoolean(
"hbase.master.enabletable.roundrobin", true);
TEST_UTIL.startMiniCluster(3);
}
@AfterClass
@ -105,7 +111,7 @@ public class TestRestoreSnapshotFromClient {
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
// create Table and disable it
SnapshotTestingUtils.createTable(TEST_UTIL, tableName, getNumReplicas(), FAMILY);
createTable();
admin.disableTable(tableName);
// take an empty snapshot
@ -115,7 +121,7 @@ public class TestRestoreSnapshotFromClient {
admin.enableTable(tableName);
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
snapshot0Rows = TEST_UTIL.countRows(table);
snapshot0Rows = countRows(table);
}
admin.disableTable(tableName);
@ -126,10 +132,14 @@ public class TestRestoreSnapshotFromClient {
admin.enableTable(tableName);
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, FAMILY);
try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
snapshot1Rows = TEST_UTIL.countRows(table);
snapshot1Rows = countRows(table);
}
}
protected void createTable() throws Exception {
SnapshotTestingUtils.createTable(TEST_UTIL, tableName, getNumReplicas(), FAMILY);
}
@After
public void tearDown() throws Exception {
TEST_UTIL.deleteTable(tableName);
@ -139,33 +149,33 @@ public class TestRestoreSnapshotFromClient {
@Test
public void testRestoreSnapshot() throws IOException {
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
admin.disableTable(tableName);
admin.snapshot(snapshotName1, tableName);
// Restore from snapshot-0
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot0Rows);
verifyRowCount(TEST_UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from emptySnapshot
admin.disableTable(tableName);
admin.restoreSnapshot(emptySnapshot);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, 0);
verifyRowCount(TEST_UTIL, tableName, 0);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName1);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
TEST_UTIL.deleteTable(tableName);
admin.restoreSnapshot(snapshotName1);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
}
@ -173,23 +183,25 @@ public class TestRestoreSnapshotFromClient {
return 1;
}
protected HColumnDescriptor getTestRestoreSchemaChangeHCD() {
return new HColumnDescriptor(TEST_FAMILY2);
}
@Test
public void testRestoreSchemaChange() throws Exception {
byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
Table table = TEST_UTIL.getConnection().getTable(tableName);
// Add one column family and put some data in it
admin.disableTable(tableName);
admin.addColumnFamily(tableName, new HColumnDescriptor(TEST_FAMILY2));
admin.addColumnFamily(tableName, getTestRestoreSchemaChangeHCD());
admin.enableTable(tableName);
assertEquals(2, table.getTableDescriptor().getFamilies().size());
HTableDescriptor htd = admin.getTableDescriptor(tableName);
assertEquals(2, htd.getFamilies().size());
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 500, TEST_FAMILY2);
long snapshot2Rows = snapshot1Rows + 500;
assertEquals(snapshot2Rows, TEST_UTIL.countRows(table));
assertEquals(500, TEST_UTIL.countRows(table, TEST_FAMILY2));
assertEquals(snapshot2Rows, countRows(table));
assertEquals(500, countRows(table, TEST_FAMILY2));
Set<String> fsFamilies = getFamiliesFromFS(tableName);
assertEquals(2, fsFamilies.size());
@ -202,12 +214,12 @@ public class TestRestoreSnapshotFromClient {
admin.enableTable(tableName);
assertEquals(1, table.getTableDescriptor().getFamilies().size());
try {
TEST_UTIL.countRows(table, TEST_FAMILY2);
countRows(table, TEST_FAMILY2);
fail("family '" + Bytes.toString(TEST_FAMILY2) + "' should not exists");
} catch (NoSuchColumnFamilyException e) {
// expected
}
assertEquals(snapshot0Rows, TEST_UTIL.countRows(table));
assertEquals(snapshot0Rows, countRows(table));
htd = admin.getTableDescriptor(tableName);
assertEquals(1, htd.getFamilies().size());
fsFamilies = getFamiliesFromFS(tableName);
@ -220,8 +232,8 @@ public class TestRestoreSnapshotFromClient {
htd = admin.getTableDescriptor(tableName);
assertEquals(2, htd.getFamilies().size());
assertEquals(2, table.getTableDescriptor().getFamilies().size());
assertEquals(500, TEST_UTIL.countRows(table, TEST_FAMILY2));
assertEquals(snapshot2Rows, TEST_UTIL.countRows(table));
assertEquals(500, countRows(table, TEST_FAMILY2));
assertEquals(snapshot2Rows, countRows(table));
fsFamilies = getFamiliesFromFS(tableName);
assertEquals(2, fsFamilies.size());
table.close();
@ -232,7 +244,7 @@ public class TestRestoreSnapshotFromClient {
TableName clonedTableName =
TableName.valueOf("clonedtb-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
admin.disableTable(clonedTableName);
admin.snapshot(snapshotName2, clonedTableName);
@ -240,7 +252,7 @@ public class TestRestoreSnapshotFromClient {
waitCleanerRun();
admin.cloneSnapshot(snapshotName2, clonedTableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(clonedTableName, admin, getNumReplicas());
TEST_UTIL.deleteTable(clonedTableName);
}
@ -251,14 +263,14 @@ public class TestRestoreSnapshotFromClient {
waitCleanerRun();
admin.cloneSnapshot(snapshotName0, tableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot0Rows);
verifyRowCount(TEST_UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
waitCleanerRun();
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot0Rows);
verifyRowCount(TEST_UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
}
@ -296,4 +308,13 @@ public class TestRestoreSnapshotFromClient {
}
return families;
}
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
SnapshotTestingUtils.verifyRowCount(util, tableName, expectedRows);
}
protected int countRows(final Table table, final byte[]... families) throws IOException {
return TEST_UTIL.countRows(table, families);
}
}

View File

@ -51,13 +51,13 @@ import org.junit.experimental.categories.Category;
public class TestSnapshotCloneIndependence {
private static final Log LOG = LogFactory.getLog(TestSnapshotCloneIndependence.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2;
protected static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
private static final String TEST_FAM_STR = "fam";
private static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
private static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME);
protected static final byte[] TEST_FAM = Bytes.toBytes(TEST_FAM_STR);
protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME);
private static final int CLEANER_INTERVAL = 10;
/**
@ -70,7 +70,7 @@ public class TestSnapshotCloneIndependence {
UTIL.startMiniCluster(NUM_RS);
}
private static void setupConf(Configuration conf) {
protected static void setupConf(Configuration conf) {
// enable snapshot support
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
// disable the ui
@ -99,7 +99,7 @@ public class TestSnapshotCloneIndependence {
@Before
public void setup() throws Exception {
UTIL.createTable(TABLE_NAME, TEST_FAM);
createTable(TABLE_NAME, TEST_FAM);
}
@After
@ -213,9 +213,9 @@ public class TestSnapshotCloneIndependence {
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
try (Table original = UTIL.createTable(localTableName, TEST_FAM)) {
UTIL.loadTable(original, TEST_FAM);
final int origTableRowCount = UTIL.countRows(original);
try (Table original = createTable(localTableName, TEST_FAM)) {
loadData(original, TEST_FAM);
final int origTableRowCount = countRows(original);
// Take a snapshot
final String snapshotNameAsString = "snapshot_" + localTableName;
@ -231,7 +231,7 @@ public class TestSnapshotCloneIndependence {
admin.cloneSnapshot(snapshotName, cloneTableName);
try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
final int clonedTableRowCount = UTIL.countRows(clonedTable);
final int clonedTableRowCount = countRows(clonedTable);
Assert.assertEquals(
"The line counts of original and cloned tables do not match after clone. ",
@ -246,10 +246,10 @@ public class TestSnapshotCloneIndependence {
// Verify that it is not present in the original table
Assert.assertEquals("The row count of the original table was not modified by the put",
origTableRowCount + 1, UTIL.countRows(original));
origTableRowCount + 1, countRows(original));
Assert.assertEquals(
"The row count of the cloned table changed as a result of addition to the original",
clonedTableRowCount, UTIL.countRows(clonedTable));
clonedTableRowCount, countRows(clonedTable));
p = new Put(Bytes.toBytes(rowKey));
p.add(TEST_FAM, Bytes.toBytes("someQualifier"), Bytes.toBytes("someString"));
@ -258,9 +258,9 @@ public class TestSnapshotCloneIndependence {
// Verify that the new family is not in the restored table's description
Assert.assertEquals(
"The row count of the original table was modified by the put to the clone",
origTableRowCount + 1, UTIL.countRows(original));
origTableRowCount + 1, countRows(original));
Assert.assertEquals("The row count of the cloned table was not modified by the put",
clonedTableRowCount + 1, UTIL.countRows(clonedTable));
clonedTableRowCount + 1, countRows(clonedTable));
}
}
}
@ -278,9 +278,9 @@ public class TestSnapshotCloneIndependence {
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
Table original = UTIL.createTable(localTableName, TEST_FAM);
UTIL.loadTable(original, TEST_FAM);
final int loadedTableCount = UTIL.countRows(original);
Table original = createTable(localTableName, TEST_FAM);
loadData(original, TEST_FAM);
final int loadedTableCount = countRows(original);
System.out.println("Original table has: " + loadedTableCount + " rows");
final String snapshotNameAsString = "snapshot_" + localTableName;
@ -334,8 +334,8 @@ public class TestSnapshotCloneIndependence {
final long startTime = System.currentTimeMillis();
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
Table original = UTIL.createTable(localTableName, TEST_FAM);
UTIL.loadTable(original, TEST_FAM);
Table original = createTable(localTableName, TEST_FAM);
loadData(original, TEST_FAM);
final String snapshotNameAsString = "snapshot_" + localTableName;
@ -393,8 +393,8 @@ public class TestSnapshotCloneIndependence {
final TableName localTableName =
TableName.valueOf(STRING_TABLE_NAME + startTime);
try (Table original = UTIL.createTable(localTableName, TEST_FAM)) {
UTIL.loadTable(original, TEST_FAM);
try (Table original = createTable(localTableName, TEST_FAM)) {
loadData(original, TEST_FAM);
}
// Take a snapshot
@ -422,10 +422,22 @@ public class TestSnapshotCloneIndependence {
try (Table original = UTIL.getConnection().getTable(localTableName)) {
try (Table clonedTable = UTIL.getConnection().getTable(cloneTableName)) {
// Verify that all regions of both tables are readable
final int origTableRowCount = UTIL.countRows(original);
final int clonedTableRowCount = UTIL.countRows(clonedTable);
final int origTableRowCount = countRows(original);
final int clonedTableRowCount = countRows(clonedTable);
Assert.assertEquals(origTableRowCount, clonedTableRowCount);
}
}
}
protected Table createTable(final TableName table, byte[] family) throws Exception {
return UTIL.createTable(table, family);
}
protected void loadData(final Table table, byte[]... families) throws Exception {
UTIL.loadTable(table, families);
}
protected int countRows(final Table table, final byte[]... families) throws Exception {
return UTIL.countRows(table, families);
}
}

View File

@ -63,9 +63,10 @@ import com.google.common.collect.Lists;
@Category({LargeTests.class, ClientTests.class})
public class TestSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestSnapshotFromClient.class);
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
protected static final int NUM_RS = 2;
protected static final String STRING_TABLE_NAME = "test";
protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
protected static final TableName TABLE_NAME =
TableName.valueOf(STRING_TABLE_NAME);
@ -80,7 +81,7 @@ public class TestSnapshotFromClient {
UTIL.startMiniCluster(NUM_RS);
}
private static void setupConf(Configuration conf) {
protected static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
@ -99,6 +100,10 @@ public class TestSnapshotFromClient {
@Before
public void setup() throws Exception {
createTable();
}
protected void createTable() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
htd.setRegionReplication(getNumReplicas());
UTIL.createTable(htd, new byte[][]{TEST_FAM}, null);

View File

@ -148,143 +148,12 @@ public class MobSnapshotTestingUtils {
// ==========================================================================
// Snapshot Mock
// ==========================================================================
public static class SnapshotMock {
private final static String TEST_FAMILY = "cf";
public final static int TEST_NUM_REGIONS = 4;
private final Configuration conf;
private final FileSystem fs;
private final Path rootDir;
static class RegionData {
public HRegionInfo hri;
public Path tableDir;
public Path[] files;
public RegionData(final Path tableDir, final HRegionInfo hri,
final int nfiles) {
this.tableDir = tableDir;
this.hri = hri;
this.files = new Path[nfiles];
}
}
public static class SnapshotBuilder {
private final RegionData[] tableRegions;
private final SnapshotDescription desc;
private final HTableDescriptor htd;
private final Configuration conf;
private final FileSystem fs;
private final Path rootDir;
private Path snapshotDir;
private int snapshotted = 0;
public SnapshotBuilder(final Configuration conf, final FileSystem fs,
final Path rootDir, final HTableDescriptor htd,
final SnapshotDescription desc, final RegionData[] tableRegions)
throws IOException {
this.fs = fs;
this.conf = conf;
this.rootDir = rootDir;
this.htd = htd;
this.desc = desc;
this.tableRegions = tableRegions;
this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc,
rootDir);
new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(
snapshotDir, new TableDescriptor(htd), false);
}
public HTableDescriptor getTableDescriptor() {
return this.htd;
}
public SnapshotDescription getSnapshotDescription() {
return this.desc;
}
public Path getSnapshotsDir() {
return this.snapshotDir;
}
public Path[] addRegion() throws IOException {
return addRegion(desc);
}
public Path[] addRegionV1() throws IOException {
return addRegion(desc.toBuilder()
.setVersion(SnapshotManifestV1.DESCRIPTOR_VERSION).build());
}
public Path[] addRegionV2() throws IOException {
return addRegion(desc.toBuilder()
.setVersion(SnapshotManifestV2.DESCRIPTOR_VERSION).build());
}
private Path[] addRegion(final SnapshotDescription desc)
throws IOException {
if (this.snapshotted == tableRegions.length) {
throw new UnsupportedOperationException(
"No more regions in the table");
}
RegionData regionData = tableRegions[this.snapshotted++];
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(
desc.getName());
SnapshotManifest manifest = SnapshotManifest.create(conf, fs,
snapshotDir, desc, monitor);
manifest.addRegion(regionData.tableDir, regionData.hri);
return regionData.files;
}
public Path commit() throws IOException {
ForeignExceptionDispatcher monitor = new ForeignExceptionDispatcher(
desc.getName());
SnapshotManifest manifest = SnapshotManifest.create(conf, fs,
snapshotDir, desc, monitor);
manifest.addTableDescriptor(htd);
manifest.consolidate();
SnapshotDescriptionUtils.completeSnapshot(desc, rootDir, snapshotDir,
fs);
snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc,
rootDir);
return snapshotDir;
}
}
public SnapshotMock(final Configuration conf, final FileSystem fs,
final Path rootDir) {
this.fs = fs;
this.conf = conf;
this.rootDir = rootDir;
}
public SnapshotBuilder createSnapshotV1(final String snapshotName)
throws IOException {
return createSnapshot(snapshotName, SnapshotManifestV1.DESCRIPTOR_VERSION);
}
public SnapshotBuilder createSnapshotV2(final String snapshotName)
throws IOException {
return createSnapshot(snapshotName, SnapshotManifestV2.DESCRIPTOR_VERSION);
}
private SnapshotBuilder createSnapshot(final String snapshotName,
final int version) throws IOException {
HTableDescriptor htd = createHtd(snapshotName);
RegionData[] regions = createTable(htd, TEST_NUM_REGIONS);
SnapshotDescription desc = SnapshotDescription.newBuilder()
.setTable(htd.getNameAsString()).setName(snapshotName)
.setVersion(version).build();
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc,
rootDir);
SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs);
return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
public static class SnapshotMock extends SnapshotTestingUtils.SnapshotMock {
public SnapshotMock(final Configuration conf, final FileSystem fs, final Path rootDir) {
super(conf, fs, rootDir);
}
@Override
public HTableDescriptor createHtd(final String tableName) {
HTableDescriptor htd = new HTableDescriptor(tableName);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
@ -293,55 +162,5 @@ public class MobSnapshotTestingUtils {
htd.addFamily(hcd);
return htd;
}
private RegionData[] createTable(final HTableDescriptor htd,
final int nregions) throws IOException {
Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName());
new FSTableDescriptors(conf).createTableDescriptorForTableDirectory(
tableDir, new TableDescriptor(htd), false);
assertTrue(nregions % 2 == 0);
RegionData[] regions = new RegionData[nregions];
for (int i = 0; i < regions.length; i += 2) {
byte[] startKey = Bytes.toBytes(0 + i * 2);
byte[] endKey = Bytes.toBytes(1 + i * 2);
// First region, simple with one plain hfile.
HRegionInfo hri = new HRegionInfo(htd.getTableName(), startKey, endKey);
HRegionFileSystem rfs = HRegionFileSystem.createRegionOnFileSystem(
conf, fs, tableDir, hri);
regions[i] = new RegionData(tableDir, hri, 3);
for (int j = 0; j < regions[i].files.length; ++j) {
Path storeFile = createStoreFile(rfs.createTempName());
regions[i].files[j] = rfs.commitStoreFile(TEST_FAMILY, storeFile);
}
// Second region, used to test the split case.
// This region contains a reference to the hfile in the first region.
startKey = Bytes.toBytes(2 + i * 2);
endKey = Bytes.toBytes(3 + i * 2);
hri = new HRegionInfo(htd.getTableName());
rfs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, tableDir,
hri);
regions[i + 1] = new RegionData(tableDir, hri, regions[i].files.length);
for (int j = 0; j < regions[i].files.length; ++j) {
String refName = regions[i].files[j].getName() + '.'
+ regions[i].hri.getEncodedName();
Path refFile = createStoreFile(new Path(rootDir, refName));
regions[i + 1].files[j] = rfs.commitStoreFile(TEST_FAMILY, refFile);
}
}
return regions;
}
private Path createStoreFile(final Path storeFile) throws IOException {
FSDataOutputStream out = fs.create(storeFile);
try {
out.write(Bytes.toBytes(storeFile.toString()));
} finally {
out.close();
}
return storeFile;
}
}
}

View File

@ -448,7 +448,7 @@ public class SnapshotTestingUtils {
// Snapshot Mock
// ==========================================================================
public static class SnapshotMock {
private final static String TEST_FAMILY = "cf";
protected final static String TEST_FAMILY = "cf";
public final static int TEST_NUM_REGIONS = 4;
private final Configuration conf;

View File

@ -68,12 +68,12 @@ public class TestExportSnapshot {
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static byte[] FAMILY = Bytes.toBytes("cf");
protected final static byte[] FAMILY = Bytes.toBytes("cf");
protected TableName tableName;
private byte[] emptySnapshotName;
private byte[] snapshotName;
private int tableNumFiles;
private TableName tableName;
private Admin admin;
public static void setUpBaseConf(Configuration conf) {
@ -111,7 +111,7 @@ public class TestExportSnapshot {
emptySnapshotName = Bytes.toBytes("emptySnaptb0-" + tid);
// create Table
SnapshotTestingUtils.createTable(TEST_UTIL, tableName, FAMILY);
createTable();
// Take an empty snapshot
admin.snapshot(emptySnapshotName, tableName);
@ -124,6 +124,10 @@ public class TestExportSnapshot {
admin.snapshot(snapshotName, tableName);
}
protected void createTable() throws Exception {
SnapshotTestingUtils.createTable(TEST_UTIL, tableName, FAMILY);
}
@After
public void tearDown() throws Exception {
TEST_UTIL.deleteTable(tableName);
@ -356,6 +360,10 @@ public class TestExportSnapshot {
assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2));
}
protected boolean bypassRegion(HRegionInfo regionInfo) {
return false;
}
/*
* Verify if the files exists
*/
@ -370,6 +378,9 @@ public class TestExportSnapshot {
@Override
public void storeFile(final HRegionInfo regionInfo, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
if (bypassRegion(regionInfo))
return;
String hfile = storeFile.getName();
snapshotFiles.add(hfile);
if (storeFile.hasReference()) {

View File

@ -67,28 +67,20 @@ import org.junit.experimental.categories.Category;
@Category({RegionServerTests.class, LargeTests.class})
public class TestFlushSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2;
private static final byte[] TEST_FAM = Bytes.toBytes("fam");
private static final TableName TABLE_NAME = TableName.valueOf("test");
private final int DEFAULT_NUM_ROWS = 100;
/**
* Setup the config for the cluster
* @throws Exception on failure
*/
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
protected static final int NUM_RS = 2;
protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
protected static final TableName TABLE_NAME = TableName.valueOf("test");
protected final int DEFAULT_NUM_ROWS = 100;
@BeforeClass
public static void setupCluster() throws Exception {
// Uncomment the following lines if more verbosity is needed for
// debugging (see HBASE-12285 for details).
//((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
//((Log4JLogger)AbstractRpcClient.LOG).getLogger().setLevel(Level.ALL);
//((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_RS);
}
private static void setupConf(Configuration conf) {
protected static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
@ -107,6 +99,10 @@ public class TestFlushSnapshotFromClient {
@Before
public void setup() throws Exception {
createTable();
}
protected void createTable() throws Exception {
SnapshotTestingUtils.createTable(UTIL, TABLE_NAME, TEST_FAM);
}
@ -344,9 +340,9 @@ public class TestFlushSnapshotFromClient {
admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, cloneBeforeMergeName, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, cloneAfterMergeName, numRows);
verifyRowCount(UTIL, TABLE_NAME, numRows);
verifyRowCount(UTIL, cloneBeforeMergeName, numRows);
verifyRowCount(UTIL, cloneAfterMergeName, numRows);
// test that we can delete the snapshot
UTIL.deleteTable(cloneAfterMergeName);
@ -390,8 +386,8 @@ public class TestFlushSnapshotFromClient {
admin.cloneSnapshot(snapshotName, cloneName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneName);
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
SnapshotTestingUtils.verifyRowCount(UTIL, cloneName, numRows);
verifyRowCount(UTIL, TABLE_NAME, numRows);
verifyRowCount(UTIL, cloneName, numRows);
// test that we can delete the snapshot
UTIL.deleteTable(cloneName);
@ -539,4 +535,14 @@ public class TestFlushSnapshotFromClient {
}
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
}
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
SnapshotTestingUtils.verifyRowCount(util, tableName, expectedRows);
}
protected int countRows(final Table table, final byte[]... families) throws IOException {
return UTIL.countRows(table, families);
}
}

View File

@ -18,69 +18,30 @@
package org.apache.hadoop.hbase.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test Export Snapshot Tool
*/
@Category(MediumTests.class)
public class TestMobExportSnapshot {
public class TestMobExportSnapshot extends TestExportSnapshot {
private final Log LOG = LogFactory.getLog(getClass());
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static byte[] FAMILY = Bytes.toBytes("cf");
private byte[] emptySnapshotName;
private byte[] snapshotName;
private int tableNumFiles;
private TableName tableName;
private Admin admin;
public static void setUpBaseConf(Configuration conf) {
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.setInt("hbase.regionserver.msginterval", 100);
conf.setInt("hbase.client.pause", 250);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
conf.setBoolean("hbase.master.enabletable.roundrobin", true);
conf.setInt("mapreduce.map.maxattempts", 10);
TestExportSnapshot.setUpBaseConf(conf);
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@ -91,342 +52,13 @@ public class TestMobExportSnapshot {
TEST_UTIL.startMiniMapReduceCluster();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniMapReduceCluster();
TEST_UTIL.shutdownMiniCluster();
}
/**
* Create a table and take a snapshot of the table used by the export test.
*/
@Before
public void setUp() throws Exception {
this.admin = TEST_UTIL.getHBaseAdmin();
long tid = System.currentTimeMillis();
tableName = TableName.valueOf("testtb-" + tid);
snapshotName = Bytes.toBytes("snaptb0-" + tid);
emptySnapshotName = Bytes.toBytes("emptySnaptb0-" + tid);
// create Table
MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, 1, FAMILY);
// Take an empty snapshot
admin.snapshot(emptySnapshotName, tableName);
// Add some rows
SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, FAMILY);
tableNumFiles = admin.getTableRegions(tableName).size();
// take a snapshot
admin.snapshot(snapshotName, tableName);
}
@After
public void tearDown() throws Exception {
TEST_UTIL.deleteTable(tableName);
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
}
/**
* Verfy the result of getBalanceSplits() method.
* The result are groups of files, used as input list for the "export" mappers.
* All the groups should have similar amount of data.
*
* The input list is a pair of file path and length.
* The getBalanceSplits() function sort it by length,
* and assign to each group a file, going back and forth through the groups.
*/
@Test
public void testBalanceSplit() throws Exception {
// Create a list of files
List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<Pair<SnapshotFileInfo, Long>>();
for (long i = 0; i <= 20; i++) {
SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder()
.setType(SnapshotFileInfo.Type.HFILE)
.setHfile("file-" + i)
.build();
files.add(new Pair<SnapshotFileInfo, Long>(fileInfo, i));
}
// Create 5 groups (total size 210)
// group 0: 20, 11, 10, 1 (total size: 42)
// group 1: 19, 12, 9, 2 (total size: 42)
// group 2: 18, 13, 8, 3 (total size: 42)
// group 3: 17, 12, 7, 4 (total size: 42)
// group 4: 16, 11, 6, 5 (total size: 42)
List<List<Pair<SnapshotFileInfo, Long>>> splits = ExportSnapshot.getBalancedSplits(files, 5);
assertEquals(5, splits.size());
String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"};
verifyBalanceSplit(splits.get(0), split0, 42);
String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"};
verifyBalanceSplit(splits.get(1), split1, 42);
String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"};
verifyBalanceSplit(splits.get(2), split2, 42);
String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"};
verifyBalanceSplit(splits.get(3), split3, 42);
String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"};
verifyBalanceSplit(splits.get(4), split4, 42);
}
private void verifyBalanceSplit(final List<Pair<SnapshotFileInfo, Long>> split,
final String[] expected, final long expectedSize) {
assertEquals(expected.length, split.size());
long totalSize = 0;
for (int i = 0; i < expected.length; ++i) {
Pair<SnapshotFileInfo, Long> fileInfo = split.get(i);
assertEquals(expected[i], fileInfo.getFirst().getHfile());
totalSize += fileInfo.getSecond();
}
assertEquals(expectedSize, totalSize);
}
/**
* Verify if exported snapshot and copied files matches the original one.
*/
@Test
public void testExportFileSystemState() throws Exception {
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles);
}
@Test
public void testExportFileSystemStateWithSkipTmp() throws Exception {
TEST_UTIL.getConfiguration().setBoolean(ExportSnapshot.CONF_SKIP_TMP, true);
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles);
}
@Test
public void testEmptyExportFileSystemState() throws Exception {
testExportFileSystemState(tableName, emptySnapshotName, emptySnapshotName, 0);
}
@Test
public void testConsecutiveExports() throws Exception {
Path copyDir = getLocalDestinationDir();
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, false);
testExportFileSystemState(tableName, snapshotName, snapshotName, tableNumFiles, copyDir, true);
removeExportDir(copyDir);
}
@Test
public void testExportWithTargetName() throws Exception {
final byte[] targetName = Bytes.toBytes("testExportWithTargetName");
testExportFileSystemState(tableName, snapshotName, targetName, tableNumFiles);
}
/**
* Mock a snapshot with files in the archive dir,
* two regions, and one reference file.
*/
@Test
public void testSnapshotWithRefsExportFileSystemState() throws Exception {
Configuration conf = TEST_UTIL.getConfiguration();
Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
FileSystem fs = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
SnapshotMock snapshotMock = new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("tableWithRefsV1");
testSnapshotWithRefsExportFileSystemState(builder);
snapshotMock = new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
builder = snapshotMock.createSnapshotV2("tableWithRefsV2");
testSnapshotWithRefsExportFileSystemState(builder);
}
/**
* Generates a couple of regions for the specified SnapshotMock,
* and then it will run the export and verification.
*/
private void testSnapshotWithRefsExportFileSystemState(SnapshotMock.SnapshotBuilder builder)
throws Exception {
Path[] r1Files = builder.addRegion();
Path[] r2Files = builder.addRegion();
builder.commit();
int snapshotFilesCount = r1Files.length + r2Files.length;
byte[] snapshotName = Bytes.toBytes(builder.getSnapshotDescription().getName());
TableName tableName = builder.getTableDescriptor().getTableName();
testExportFileSystemState(tableName, snapshotName, snapshotName, snapshotFilesCount);
}
private void testExportFileSystemState(final TableName tableName, final byte[] snapshotName,
final byte[] targetName, int filesExpected) throws Exception {
Path copyDir = getHdfsDestinationDir();
testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, copyDir, false);
removeExportDir(copyDir);
}
/**
* Test ExportSnapshot
*/
private void testExportFileSystemState(final TableName tableName, final byte[] snapshotName,
final byte[] targetName, int filesExpected, Path copyDir, boolean overwrite)
throws Exception {
URI hdfsUri = FileSystem.get(TEST_UTIL.getConfiguration()).getUri();
FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration());
copyDir = copyDir.makeQualified(fs);
List<String> opts = new ArrayList<String>();
opts.add("-snapshot");
opts.add(Bytes.toString(snapshotName));
opts.add("-copy-to");
opts.add(copyDir.toString());
if (targetName != snapshotName) {
opts.add("-target");
opts.add(Bytes.toString(targetName));
}
if (overwrite) opts.add("-overwrite");
// Export Snapshot
int res = ExportSnapshot.innerMain(TEST_UTIL.getConfiguration(),
opts.toArray(new String[opts.size()]));
assertEquals(0, res);
// Verify File-System state
FileStatus[] rootFiles = fs.listStatus(copyDir);
assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length);
for (FileStatus fileStatus: rootFiles) {
String name = fileStatus.getPath().getName();
assertTrue(fileStatus.isDirectory());
assertTrue(name.equals(HConstants.SNAPSHOT_DIR_NAME) ||
name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY));
}
// compare the snapshot metadata and verify the hfiles
final FileSystem hdfs = FileSystem.get(hdfsUri, TEST_UTIL.getConfiguration());
final Path snapshotDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(snapshotName));
final Path targetDir = new Path(HConstants.SNAPSHOT_DIR_NAME, Bytes.toString(targetName));
verifySnapshotDir(hdfs, new Path(TEST_UTIL.getDefaultRootDirPath(), snapshotDir),
fs, new Path(copyDir, targetDir));
Set<String> snapshotFiles = verifySnapshot(fs, copyDir, tableName, Bytes.toString(targetName));
assertEquals(filesExpected, snapshotFiles.size());
}
/**
* Check that ExportSnapshot will return a failure if something fails.
*/
@Test
public void testExportFailure() throws Exception {
assertEquals(1, runExportAndInjectFailures(snapshotName, false));
}
/**
* Check that ExportSnapshot will succede if something fails but the retry succede.
*/
@Test
public void testExportRetry() throws Exception {
assertEquals(0, runExportAndInjectFailures(snapshotName, true));
}
/*
* Execute the ExportSnapshot job injecting failures
*/
private int runExportAndInjectFailures(final byte[] snapshotName, boolean retry)
throws Exception {
Path copyDir = getLocalDestinationDir();
URI hdfsUri = FileSystem.get(TEST_UTIL.getConfiguration()).getUri();
FileSystem fs = FileSystem.get(copyDir.toUri(), new Configuration());
copyDir = copyDir.makeQualified(fs);
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
conf.setBoolean(ExportSnapshot.CONF_TEST_FAILURE, true);
conf.setBoolean(ExportSnapshot.CONF_TEST_RETRY, retry);
// Export Snapshot
Path sourceDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
int res = ExportSnapshot.innerMain(conf, new String[] {
"-snapshot", Bytes.toString(snapshotName),
"-copy-from", sourceDir.toString(),
"-copy-to", copyDir.toString()
});
return res;
}
/*
* verify if the snapshot folder on file-system 1 match the one on file-system 2
*/
private void verifySnapshotDir(final FileSystem fs1, final Path root1,
final FileSystem fs2, final Path root2) throws IOException {
assertEquals(listFiles(fs1, root1, root1), listFiles(fs2, root2, root2));
}
/*
* Verify if the files exists
*/
private Set<String> verifySnapshot(final FileSystem fs, final Path rootDir,
final TableName tableName, final String snapshotName) throws IOException {
final Path exportedSnapshot = new Path(rootDir,
new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName));
final Set<String> snapshotFiles = new HashSet<String>();
final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
SnapshotReferenceUtil.visitReferencedFiles(TEST_UTIL.getConfiguration(), fs, exportedSnapshot,
new SnapshotReferenceUtil.SnapshotVisitor() {
@Override
public void storeFile(final HRegionInfo regionInfo, final String family,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
if(MobUtils.isMobRegionInfo(regionInfo))
return;
String hfile = storeFile.getName();
snapshotFiles.add(hfile);
if (storeFile.hasReference()) {
// Nothing to do here, we have already the reference embedded
} else {
verifyNonEmptyFile(new Path(exportedArchive,
new Path(FSUtils.getTableDir(new Path("./"), tableName),
new Path(regionInfo.getEncodedName(), new Path(family, hfile)))));
}
protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, 1, FAMILY);
}
private void verifyNonEmptyFile(final Path path) throws IOException {
assertTrue(path + " should exists", fs.exists(path));
assertTrue(path + " should not be empty", fs.getFileStatus(path).getLen() > 0);
}
});
// Verify Snapshot description
SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, exportedSnapshot);
assertTrue(desc.getName().equals(snapshotName));
assertTrue(desc.getTable().equals(tableName.getNameAsString()));
return snapshotFiles;
}
private Set<String> listFiles(final FileSystem fs, final Path root, final Path dir)
throws IOException {
Set<String> files = new HashSet<String>();
int rootPrefix = root.toString().length();
FileStatus[] list = FSUtils.listStatus(fs, dir);
if (list != null) {
for (FileStatus fstat: list) {
LOG.debug(fstat.getPath());
if (fstat.isDirectory()) {
files.addAll(listFiles(fs, root, fstat.getPath()));
} else {
files.add(fstat.getPath().toString().substring(rootPrefix));
}
}
}
return files;
}
private Path getHdfsDestinationDir() {
Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
Path path = new Path(new Path(rootDir, "export-test"), "export-" + System.currentTimeMillis());
LOG.info("HDFS export destination path: " + path);
return path;
}
private Path getLocalDestinationDir() {
Path path = TEST_UTIL.getDataTestDir("local-export-" + System.currentTimeMillis());
LOG.info("Local export destination path: " + path);
return path;
}
private void removeExportDir(final Path path) throws IOException {
FileSystem fs = FileSystem.get(path.toUri(), new Configuration());
fs.delete(path, true);
@Override
protected boolean bypassRegion(HRegionInfo regionInfo) {
return MobUtils.isMobRegionInfo(regionInfo);
}
}

View File

@ -17,44 +17,19 @@
*/
package org.apache.hadoop.hbase.snapshot;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.*;
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
@ -66,482 +41,33 @@ import org.junit.experimental.categories.Category;
* because there will be a few more flavors of snapshots that need to run these tests.
*/
@Category({ClientTests.class, LargeTests.class})
public class TestMobFlushSnapshotFromClient {
public class TestMobFlushSnapshotFromClient extends TestFlushSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestFlushSnapshotFromClient.class);
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
private static final int NUM_RS = 2;
private static final String STRING_TABLE_NAME = "test";
private static final byte[] TEST_FAM = Bytes.toBytes("fam");
private static final TableName TABLE_NAME =
TableName.valueOf(STRING_TABLE_NAME);
private final int DEFAULT_NUM_ROWS = 100;
/**
* Setup the config for the cluster
* @throws Exception on failure
*/
@BeforeClass
public static void setupCluster() throws Exception {
((Log4JLogger)RpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_RS);
UTIL.startMiniCluster(3);
}
private static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@Before
public void setup() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 1, TEST_FAM);
}
@After
public void tearDown() throws Exception {
UTIL.deleteTable(TABLE_NAME);
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
}
@AfterClass
public static void cleanupTest() throws Exception {
try {
UTIL.shutdownMiniCluster();
} catch (Exception e) {
LOG.warn("failure shutting down cluster", e);
}
}
/**
* Test simple flush snapshotting a table that is online
* @throws Exception
*/
@Test (timeout=300000)
public void testFlushTableSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.FLUSH);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
admin, fs);
}
/**
* Test snapshotting a table that is online without flushing
* @throws Exception
*/
@Test(timeout=30000)
public void testSkipFlushTableSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
UTIL.loadTable(table, TEST_FAM);
LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the enabled table
String snapshotString = "skipFlushTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, TABLE_NAME, SnapshotDescription.Type.SKIPFLUSH);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
admin, fs);
admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots();
SnapshotTestingUtils.assertNoSnapshots(admin);
}
/**
* Test simple flush snapshotting a table that is online
* @throws Exception
*/
@Test (timeout=300000)
public void testFlushTableSnapshotWithProcedure() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(TABLE_NAME);
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
Map<String, String> props = new HashMap<String, String>();
props.put("table", TABLE_NAME.getNameAsString());
admin.execProcedure(SnapshotManager.ONLINE_SNAPSHOT_CONTROLLER_DESCRIPTION,
snapshotString, props);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
admin, fs);
}
@Test (timeout=300000)
public void testSnapshotFailsOnNonExistantTable() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
TableName tableName = TableName.valueOf("_not_a_table");
// make sure the table doesn't exist
boolean fail = false;
do {
try {
admin.getTableDescriptor(tableName);
fail = true;
LOG.error("Table:" + tableName + " already exists, checking a new name");
tableName = TableName.valueOf(tableName+"!");
} catch (TableNotFoundException e) {
fail = false;
}
} while (fail);
// snapshot the non-existant table
try {
admin.snapshot("fail", tableName, SnapshotDescription.Type.FLUSH);
fail("Snapshot succeeded even though there is not table.");
} catch (SnapshotCreationException e) {
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
}
}
@Test(timeout = 300000)
public void testAsyncFlushSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("asyncSnapshot")
.setTable(TABLE_NAME.getNameAsString())
.setType(SnapshotDescription.Type.FLUSH)
.build();
// take the snapshot async
admin.takeSnapshotAsync(snapshot);
// constantly loop, looking for the snapshot to complete
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
LOG.info(" === Async Snapshot Completed ===");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// make sure we get the snapshot
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
}
@Test (timeout=300000)
public void testSnapshotStateAfterMerge() throws Exception {
int numRows = DEFAULT_NUM_ROWS;
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
// Take a snapshot
String snapshotBeforeMergeName = "snapshotBeforeMerge";
admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotDescription.Type.FLUSH);
// Clone the table
TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneBeforeMergeName);
// Merge two regions
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
Collections.sort(regions, new Comparator<HRegionInfo>() {
public int compare(HRegionInfo r1, HRegionInfo r2) {
return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
}
});
int numRegions = admin.getTableRegions(TABLE_NAME).size();
int numRegionsAfterMerge = numRegions - 2;
admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
regions.get(2).getEncodedNameAsBytes(), true);
admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
regions.get(6).getEncodedNameAsBytes(), true);
// Verify that there's one region less
waitRegionsAfterMerge(numRegionsAfterMerge);
assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
// Clone the table
TableName cloneAfterMergeName = TableName.valueOf("cloneAfterMerge");
admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneAfterMergeName);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneBeforeMergeName, numRows);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneAfterMergeName, numRows);
// test that we can delete the snapshot
UTIL.deleteTable(cloneAfterMergeName);
UTIL.deleteTable(cloneBeforeMergeName);
}
@Test (timeout=300000)
public void testTakeSnapshotAfterMerge() throws Exception {
int numRows = DEFAULT_NUM_ROWS;
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
// Merge two regions
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
Collections.sort(regions, new Comparator<HRegionInfo>() {
public int compare(HRegionInfo r1, HRegionInfo r2) {
return Bytes.compareTo(r1.getStartKey(), r2.getStartKey());
}
});
int numRegions = admin.getTableRegions(TABLE_NAME).size();
int numRegionsAfterMerge = numRegions - 2;
admin.mergeRegions(regions.get(1).getEncodedNameAsBytes(),
regions.get(2).getEncodedNameAsBytes(), true);
admin.mergeRegions(regions.get(5).getEncodedNameAsBytes(),
regions.get(6).getEncodedNameAsBytes(), true);
waitRegionsAfterMerge(numRegionsAfterMerge);
assertEquals(numRegionsAfterMerge, admin.getTableRegions(TABLE_NAME).size());
// Take a snapshot
String snapshotName = "snapshotAfterMerge";
SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(),
SnapshotDescription.Type.FLUSH, 3);
// Clone the table
TableName cloneName = TableName.valueOf("cloneMerge");
admin.cloneSnapshot(snapshotName, cloneName);
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, cloneName);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, TABLE_NAME, numRows);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, cloneName, numRows);
// test that we can delete the snapshot
UTIL.deleteTable(cloneName);
}
/**
* Basic end-to-end test of simple-flush-based snapshots
*/
@Test (timeout=300000)
public void testFlushCreateListDestroy() throws Exception {
LOG.debug("------- Starting Snapshot test -------------");
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// load the table so we have some data
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
String snapshotName = "flushSnapshotCreateListDestroy";
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
SnapshotTestingUtils.createSnapshotAndValidate(admin, TABLE_NAME, Bytes.toString(TEST_FAM),
snapshotName, rootDir, fs, true);
}
/**
* Demonstrate that we reject snapshot requests if there is a snapshot already running on the
* same table currently running and that concurrent snapshots on different tables can both
* succeed concurrently.
*/
@Test(timeout=300000)
public void testConcurrentSnapshottingAttempts() throws IOException, InterruptedException {
final String STRING_TABLE2_NAME = STRING_TABLE_NAME + "2";
final TableName TABLE2_NAME =
TableName.valueOf(STRING_TABLE2_NAME);
int ssNum = 20;
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// create second testing table
SnapshotTestingUtils.createTable(UTIL, TABLE2_NAME, TEST_FAM);
// load the table so we have some data
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
SnapshotTestingUtils.loadData(UTIL, TABLE2_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
// We'll have one of these per thread
class SSRunnable implements Runnable {
SnapshotDescription ss;
SSRunnable(SnapshotDescription ss) {
this.ss = ss;
protected static void setupConf(Configuration conf) {
TestFlushSnapshotFromClient.setupConf(conf);
UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@Override
public void run() {
try {
Admin admin = UTIL.getHBaseAdmin();
LOG.info("Submitting snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
admin.takeSnapshotAsync(ss);
} catch (Exception e) {
LOG.info("Exception during snapshot request: " + ClientSnapshotDescriptionUtils.toString(
ss)
+ ". This is ok, we expect some", e);
}
LOG.info("Submitted snapshot request: " + ClientSnapshotDescriptionUtils.toString(ss));
toBeSubmitted.countDown();
}
};
// build descriptions
SnapshotDescription[] descs = new SnapshotDescription[ssNum];
for (int i = 0; i < ssNum; i++) {
SnapshotDescription.Builder builder = SnapshotDescription.newBuilder();
builder.setTable(((i % 2) == 0 ? TABLE_NAME : TABLE2_NAME).getNameAsString());
builder.setName("ss"+i);
builder.setType(SnapshotDescription.Type.FLUSH);
descs[i] = builder.build();
protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, TABLE_NAME, 1, TEST_FAM);
}
// kick each off its own thread
for (int i=0 ; i < ssNum; i++) {
new Thread(new SSRunnable(descs[i])).start();
@Override
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
MobSnapshotTestingUtils.verifyMobRowCount(util, tableName, expectedRows);
}
// wait until all have been submitted
toBeSubmitted.await();
// loop until all are done.
while (true) {
int doneCount = 0;
for (SnapshotDescription ss : descs) {
try {
if (admin.isSnapshotFinished(ss)) {
doneCount++;
}
} catch (Exception e) {
LOG.warn("Got an exception when checking for snapshot " + ss.getName(), e);
doneCount++;
}
}
if (doneCount == descs.length) {
break;
}
Thread.sleep(100);
}
// dump for debugging
logFSTree(FSUtils.getRootDir(UTIL.getConfiguration()));
List<SnapshotDescription> taken = admin.listSnapshots();
int takenSize = taken.size();
LOG.info("Taken " + takenSize + " snapshots: " + taken);
assertTrue("We expect at least 1 request to be rejected because of we concurrently" +
" issued many requests", takenSize < ssNum && takenSize > 0);
// Verify that there's at least one snapshot per table
int t1SnapshotsCount = 0;
int t2SnapshotsCount = 0;
for (SnapshotDescription ss : taken) {
if (TableName.valueOf(ss.getTable()).equals(TABLE_NAME)) {
t1SnapshotsCount++;
} else if (TableName.valueOf(ss.getTable()).equals(TABLE2_NAME)) {
t2SnapshotsCount++;
}
}
assertTrue("We expect at least 1 snapshot of table1 ", t1SnapshotsCount > 0);
assertTrue("We expect at least 1 snapshot of table2 ", t2SnapshotsCount > 0);
UTIL.deleteTable(TABLE2_NAME);
}
private void logFSTree(Path root) throws IOException {
FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
}
private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
throws IOException, InterruptedException {
Admin admin = UTIL.getHBaseAdmin();
// Verify that there's one region less
long startTime = System.currentTimeMillis();
while (admin.getTableRegions(TABLE_NAME).size() != numRegionsAfterMerge) {
// This may be flaky... if after 15sec the merge is not complete give up
// it will fail in the assertEquals(numRegionsAfterMerge).
if ((System.currentTimeMillis() - startTime) > 15000)
break;
Thread.sleep(100);
}
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
@Override
protected int countRows(final Table table, final byte[]... families) throws IOException {
return MobSnapshotTestingUtils.countMobRows(table, families);
}
}

View File

@ -21,26 +21,15 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
@ -50,162 +39,33 @@ import org.junit.experimental.categories.Category;
* this because there will be a few more flavors of snapshots that need to run these tests.
*/
@Category({ClientTests.class,LargeTests.class})
public class TestMobRestoreFlushSnapshotFromClient {
public class TestMobRestoreFlushSnapshotFromClient extends TestRestoreFlushSnapshotFromClient {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
private final byte[] FAMILY = Bytes.toBytes("cf");
private byte[] snapshotName0;
private byte[] snapshotName1;
private byte[] snapshotName2;
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
UTIL.getConfiguration().setInt("hbase.client.pause", 250);
UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
UTIL.getConfiguration().setBoolean(
"hbase.master.enabletable.roundrobin", true);
// Enable snapshot
UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
UTIL.getConfiguration().setLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT * 2);
UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(3);
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
UTIL.shutdownMiniCluster();
protected static void setupConf(Configuration conf) {
TestRestoreFlushSnapshotFromClient.setupConf(conf);
UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
/**
* Initialize the tests with a table filled with some data
* and two snapshots (snapshotName0, snapshotName1) of different states.
* The tableName, snapshotNames and the number of rows in the snapshot are initialized.
*/
@Before
public void setup() throws Exception {
this.admin = UTIL.getHBaseAdmin();
long tid = System.currentTimeMillis();
tableName = TableName.valueOf("testtb-" + tid);
snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
// create Table
@Override
protected void createTable() throws Exception {
MobSnapshotTestingUtils.createMobTable(UTIL, tableName, 1, FAMILY);
Table table = ConnectionFactory.createConnection(UTIL.getConfiguration()).getTable(tableName);
SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
snapshot0Rows = MobSnapshotTestingUtils.countMobRows(table);
LOG.info("=== before snapshot with 500 rows");
logFSTree();
// take a snapshot
admin.snapshot(Bytes.toString(snapshotName0), tableName,
SnapshotDescription.Type.FLUSH);
LOG.info("=== after snapshot with 500 rows");
logFSTree();
// insert more data
SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
snapshot1Rows = MobSnapshotTestingUtils.countMobRows(table);
LOG.info("=== before snapshot with 1000 rows");
logFSTree();
// take a snapshot of the updated table
admin.snapshot(Bytes.toString(snapshotName1), tableName,
SnapshotDescription.Type.FLUSH);
LOG.info("=== after snapshot with 1000 rows");
logFSTree();
table.close();
}
@After
public void tearDown() throws Exception {
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
@Override
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
MobSnapshotTestingUtils.verifyMobRowCount(util, tableName, expectedRows);
}
@Test
public void testTakeFlushSnapshot() throws IOException {
// taking happens in setup.
}
@Test
public void testRestoreSnapshot() throws IOException {
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot1Rows);
// Restore from snapshot-0
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName0);
logFSTree();
admin.enableTable(tableName);
LOG.info("=== after restore with 500 row snapshot");
logFSTree();
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot0Rows);
// Restore from snapshot-1
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName1);
admin.enableTable(tableName);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshot1Rows);
}
@Test(expected=SnapshotDoesNotExistException.class)
public void testCloneNonExistentSnapshot() throws IOException, InterruptedException {
String snapshotName = "random-snapshot-" + System.currentTimeMillis();
TableName tableName = TableName.valueOf("random-table-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName, tableName);
}
@Test
public void testCloneSnapshot() throws IOException, InterruptedException {
TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
testCloneSnapshot(clonedTableName, snapshotName0, snapshot0Rows);
testCloneSnapshot(clonedTableName, snapshotName1, snapshot1Rows);
}
private void testCloneSnapshot(final TableName tableName, final byte[] snapshotName,
int snapshotRows) throws IOException, InterruptedException {
// create a new table from snapshot
admin.cloneSnapshot(snapshotName, tableName);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, snapshotRows);
UTIL.deleteTable(tableName);
}
@Test
public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, clonedTableName, snapshot0Rows);
admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotDescription.Type.FLUSH);
UTIL.deleteTable(clonedTableName);
admin.cloneSnapshot(snapshotName2, clonedTableName);
MobSnapshotTestingUtils.verifyMobRowCount(UTIL, clonedTableName, snapshot0Rows);
UTIL.deleteTable(clonedTableName);
}
// ==========================================================================
// Helpers
// ==========================================================================
private void logFSTree() throws IOException {
MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
@Override
protected int countRows(final Table table, final byte[]... families) throws IOException {
return MobSnapshotTestingUtils.countMobRows(table, families);
}
}

View File

@ -17,31 +17,14 @@
*/
package org.apache.hadoop.hbase.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.mob.MobConstants;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils.SnapshotMock;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mockito;
@ -49,112 +32,16 @@ import org.mockito.Mockito;
* Test the restore/clone operation from a file-system point of view.
*/
@Category(SmallTests.class)
public class TestMobRestoreSnapshotHelper {
public class TestMobRestoreSnapshotHelper extends TestRestoreSnapshotHelper {
final Log LOG = LogFactory.getLog(getClass());
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private Configuration conf;
private FileSystem fs;
private Path rootDir;
@Before
public void setup() throws Exception {
rootDir = TEST_UTIL.getDataTestDir("testRestore");
fs = TEST_UTIL.getTestFileSystem();
TEST_UTIL.getConfiguration().setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
conf = TEST_UTIL.getConfiguration();
FSUtils.setRootDir(conf, rootDir);
@Override
protected void setupConf(Configuration conf) {
conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0);
}
@After
public void tearDown() throws Exception {
fs.delete(TEST_UTIL.getDataTestDir(), true);
}
@Test
public void testRestore() throws IOException {
// Test Rolling-Upgrade like Snapshot.
// half machines writing using v1 and the others using v2 format.
SnapshotMock snapshotMock = new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("snapshot");
builder.addRegionV1();
builder.addRegionV2();
builder.addRegionV2();
builder.addRegionV1();
Path snapshotDir = builder.commit();
HTableDescriptor htd = builder.getTableDescriptor();
SnapshotDescription desc = builder.getSnapshotDescription();
// Test clone a snapshot
HTableDescriptor htdClone = snapshotMock.createHtd("testtb-clone");
testRestore(snapshotDir, desc, htdClone);
verifyRestore(rootDir, htd, htdClone);
// Test clone a clone ("link to link")
SnapshotDescription cloneDesc = SnapshotDescription.newBuilder()
.setName("cloneSnapshot")
.setTable("testtb-clone")
.build();
Path cloneDir = FSUtils.getTableDir(rootDir, htdClone.getTableName());
HTableDescriptor htdClone2 = snapshotMock.createHtd("testtb-clone2");
testRestore(cloneDir, cloneDesc, htdClone2);
verifyRestore(rootDir, htd, htdClone2);
}
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
final HTableDescriptor htdClone) throws IOException {
String[] files = SnapshotTestingUtils.listHFileNames(fs,
FSUtils.getTableDir(rootDir, htdClone.getTableName()));
assertEquals(12, files.length);
for (int i = 0; i < files.length; i += 2) {
String linkFile = files[i];
String refFile = files[i+1];
assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));
Path refPath = getReferredToFile(refFile);
LOG.debug("get reference name for file " + refFile + " = " + refPath);
assertTrue(refPath.getName() + " should be a HFileLink",
HFileLink.isHFileLink(refPath.getName()));
assertEquals(linkFile, refPath.getName());
}
}
/**
* Execute the restore operation
* @param snapshotDir The snapshot directory to use as "restore source"
* @param sd The snapshot descriptor
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
public void testRestore(final Path snapshotDir, final SnapshotDescription sd,
final HTableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
new FSTableDescriptors(conf).createTableDescriptor(htdClone);
RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone);
helper.restoreHdfsRegions();
LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);
}
/**
* Initialize the restore helper, based on the snapshot and table information provided.
*/
private RestoreSnapshotHelper getRestoreHelper(final Path rootDir, final Path snapshotDir,
final SnapshotDescription sd, final HTableDescriptor htdClone) throws IOException {
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
MonitoredTask status = Mockito.mock(MonitoredTask.class);
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd);
return new RestoreSnapshotHelper(conf, fs, manifest,
htdClone, rootDir, monitor, status);
}
private Path getReferredToFile(final String referenceName) {
Path fakeBasePath = new Path(new Path("table", "region"), "cf");
return StoreFileInfo.getReferredToFile(new Path(fakeBasePath, referenceName));
@Override
protected SnapshotMock createSnapshotMock() throws IOException {
return new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
}
}

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
@ -51,20 +52,25 @@ import org.junit.experimental.categories.Category;
public class TestRestoreFlushSnapshotFromClient {
private static final Log LOG = LogFactory.getLog(TestRestoreFlushSnapshotFromClient.class);
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
protected final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
private final byte[] FAMILY = Bytes.toBytes("cf");
protected final byte[] FAMILY = Bytes.toBytes("cf");
private byte[] snapshotName0;
private byte[] snapshotName1;
private byte[] snapshotName2;
private int snapshot0Rows;
private int snapshot1Rows;
private TableName tableName;
private Admin admin;
protected byte[] snapshotName0;
protected byte[] snapshotName1;
protected byte[] snapshotName2;
protected int snapshot0Rows;
protected int snapshot1Rows;
protected TableName tableName;
protected Admin admin;
@BeforeClass
public static void setUpBeforeClass() throws Exception {
public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(3);
}
protected static void setupConf(Configuration conf) {
UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
UTIL.getConfiguration().setInt("hbase.client.pause", 250);
@ -76,8 +82,6 @@ public class TestRestoreFlushSnapshotFromClient {
UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
UTIL.getConfiguration().setLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT * 2);
UTIL.startMiniCluster(3);
}
@AfterClass
@ -85,6 +89,10 @@ public class TestRestoreFlushSnapshotFromClient {
UTIL.shutdownMiniCluster();
}
protected void createTable() throws Exception {
SnapshotTestingUtils.createTable(UTIL, tableName, FAMILY);
}
/**
* Initialize the tests with a table filled with some data
* and two snapshots (snapshotName0, snapshotName1) of different states.
@ -101,10 +109,10 @@ public class TestRestoreFlushSnapshotFromClient {
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
// create Table and disable it
SnapshotTestingUtils.createTable(UTIL, tableName, FAMILY);
createTable();
SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
Table table = UTIL.getConnection().getTable(tableName);
snapshot0Rows = UTIL.countRows(table);
snapshot0Rows = countRows(table);
LOG.info("=== before snapshot with 500 rows");
logFSTree();
@ -117,7 +125,7 @@ public class TestRestoreFlushSnapshotFromClient {
// insert more data
SnapshotTestingUtils.loadData(UTIL, tableName, 500, FAMILY);
snapshot1Rows = UTIL.countRows(table);
snapshot1Rows = countRows(table);
LOG.info("=== before snapshot with 1000 rows");
logFSTree();
@ -142,7 +150,7 @@ public class TestRestoreFlushSnapshotFromClient {
@Test
public void testRestoreSnapshot() throws IOException {
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
verifyRowCount(UTIL, tableName, snapshot1Rows);
// Restore from snapshot-0
admin.disableTable(tableName);
@ -151,13 +159,13 @@ public class TestRestoreFlushSnapshotFromClient {
admin.enableTable(tableName);
LOG.info("=== after restore with 500 row snapshot");
logFSTree();
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
verifyRowCount(UTIL, tableName, snapshot0Rows);
// Restore from snapshot-1
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName1);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
verifyRowCount(UTIL, tableName, snapshot1Rows);
}
@Test(expected=SnapshotDoesNotExistException.class)
@ -178,7 +186,7 @@ public class TestRestoreFlushSnapshotFromClient {
int snapshotRows) throws IOException, InterruptedException {
// create a new table from snapshot
admin.cloneSnapshot(snapshotName, tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshotRows);
verifyRowCount(UTIL, tableName, snapshotRows);
UTIL.deleteTable(tableName);
}
@ -187,12 +195,12 @@ public class TestRestoreFlushSnapshotFromClient {
public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
SnapshotTestingUtils.verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotDescription.Type.FLUSH);
UTIL.deleteTable(clonedTableName);
admin.cloneSnapshot(snapshotName2, clonedTableName);
SnapshotTestingUtils.verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
UTIL.deleteTable(clonedTableName);
}
@ -203,4 +211,13 @@ public class TestRestoreFlushSnapshotFromClient {
MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
}
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,
long expectedRows) throws IOException {
SnapshotTestingUtils.verifyRowCount(util, tableName, expectedRows);
}
protected int countRows(final Table table, final byte[]... families) throws IOException {
return UTIL.countRows(table, families);
}
}

View File

@ -53,13 +53,16 @@ import org.mockito.Mockito;
public class TestRestoreSnapshotHelper {
private static final Log LOG = LogFactory.getLog(TestRestoreSnapshotHelper.class);
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private final static String TEST_HFILE = "abc";
protected final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
protected final static String TEST_HFILE = "abc";
private Configuration conf;
private Path archiveDir;
private FileSystem fs;
private Path rootDir;
protected Configuration conf;
protected Path archiveDir;
protected FileSystem fs;
protected Path rootDir;
protected void setupConf(Configuration conf) {
}
@Before
public void setup() throws Exception {
@ -67,6 +70,7 @@ public class TestRestoreSnapshotHelper {
archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
fs = TEST_UTIL.getTestFileSystem();
conf = TEST_UTIL.getConfiguration();
setupConf(conf);
FSUtils.setRootDir(conf, rootDir);
}
@ -75,11 +79,15 @@ public class TestRestoreSnapshotHelper {
fs.delete(TEST_UTIL.getDataTestDir(), true);
}
protected SnapshotMock createSnapshotMock() throws IOException {
return new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
}
@Test
public void testRestore() throws IOException {
// Test Rolling-Upgrade like Snapshot.
// half machines writing using v1 and the others using v2 format.
SnapshotMock snapshotMock = new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
SnapshotMock snapshotMock = createSnapshotMock();
SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("snapshot");
builder.addRegionV1();
builder.addRegionV2();
@ -129,7 +137,7 @@ public class TestRestoreSnapshotHelper {
* @param sd The snapshot descriptor
* @param htdClone The HTableDescriptor of the table to restore/clone.
*/
public void testRestore(final Path snapshotDir, final SnapshotDescription sd,
private void testRestore(final Path snapshotDir, final SnapshotDescription sd,
final HTableDescriptor htdClone) throws IOException {
LOG.debug("pre-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir);
FSUtils.logFileSystemState(fs, rootDir, LOG);