HBASE-9090 cleanup snapshot tests setup/teardown code
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1508801 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b60eb26632
commit
1ed35e4cb6
|
@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.LargeTests;
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
|
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -96,7 +96,7 @@ public class TestCloneSnapshotFromClient {
|
||||||
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
|
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
|
||||||
|
|
||||||
// create Table and disable it
|
// create Table and disable it
|
||||||
createTable(tableName, FAMILY);
|
SnapshotTestingUtils.createTable(TEST_UTIL, tableName, FAMILY);
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
|
|
||||||
// take an empty snapshot
|
// take an empty snapshot
|
||||||
|
@ -106,7 +106,7 @@ public class TestCloneSnapshotFromClient {
|
||||||
try {
|
try {
|
||||||
// enable table and insert data
|
// enable table and insert data
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
loadData(table, 500, FAMILY);
|
SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY);
|
||||||
snapshot0Rows = TEST_UTIL.countRows(table);
|
snapshot0Rows = TEST_UTIL.countRows(table);
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ public class TestCloneSnapshotFromClient {
|
||||||
|
|
||||||
// enable table and insert more data
|
// enable table and insert more data
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
loadData(table, 500, FAMILY);
|
SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY);
|
||||||
snapshot1Rows = TEST_UTIL.countRows(table);
|
snapshot1Rows = TEST_UTIL.countRows(table);
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
|
|
||||||
|
@ -134,13 +134,8 @@ public class TestCloneSnapshotFromClient {
|
||||||
if (admin.tableExists(tableName)) {
|
if (admin.tableExists(tableName)) {
|
||||||
TEST_UTIL.deleteTable(tableName);
|
TEST_UTIL.deleteTable(tableName);
|
||||||
}
|
}
|
||||||
admin.deleteSnapshot(snapshotName0);
|
SnapshotTestingUtils.deleteAllSnapshots(admin);
|
||||||
admin.deleteSnapshot(snapshotName1);
|
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
|
||||||
|
|
||||||
// Ensure the archiver to be empty
|
|
||||||
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
|
|
||||||
mfs.getFileSystem().delete(
|
|
||||||
new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected=SnapshotDoesNotExistException.class)
|
@Test(expected=SnapshotDoesNotExistException.class)
|
||||||
|
@ -162,10 +157,9 @@ public class TestCloneSnapshotFromClient {
|
||||||
int snapshotRows) throws IOException, InterruptedException {
|
int snapshotRows) throws IOException, InterruptedException {
|
||||||
// create a new table from snapshot
|
// create a new table from snapshot
|
||||||
admin.cloneSnapshot(snapshotName, tableName);
|
admin.cloneSnapshot(snapshotName, tableName);
|
||||||
verifyRowCount(tableName, snapshotRows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshotRows);
|
||||||
|
|
||||||
admin.disableTable(tableName);
|
TEST_UTIL.deleteTable(tableName);
|
||||||
admin.deleteTable(tableName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -176,7 +170,7 @@ public class TestCloneSnapshotFromClient {
|
||||||
// Clone a table from the first snapshot
|
// Clone a table from the first snapshot
|
||||||
byte[] clonedTableName = Bytes.toBytes("clonedtb1-" + System.currentTimeMillis());
|
byte[] clonedTableName = Bytes.toBytes("clonedtb1-" + System.currentTimeMillis());
|
||||||
admin.cloneSnapshot(snapshotName0, clonedTableName);
|
admin.cloneSnapshot(snapshotName0, clonedTableName);
|
||||||
verifyRowCount(clonedTableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
|
||||||
|
|
||||||
// Take a snapshot of this cloned table.
|
// Take a snapshot of this cloned table.
|
||||||
admin.disableTable(clonedTableName);
|
admin.disableTable(clonedTableName);
|
||||||
|
@ -185,85 +179,45 @@ public class TestCloneSnapshotFromClient {
|
||||||
// Clone the snapshot of the cloned table
|
// Clone the snapshot of the cloned table
|
||||||
byte[] clonedTableName2 = Bytes.toBytes("clonedtb2-" + System.currentTimeMillis());
|
byte[] clonedTableName2 = Bytes.toBytes("clonedtb2-" + System.currentTimeMillis());
|
||||||
admin.cloneSnapshot(snapshotName2, clonedTableName2);
|
admin.cloneSnapshot(snapshotName2, clonedTableName2);
|
||||||
verifyRowCount(clonedTableName2, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
|
||||||
admin.disableTable(clonedTableName2);
|
admin.disableTable(clonedTableName2);
|
||||||
|
|
||||||
// Remove the original table
|
// Remove the original table
|
||||||
admin.disableTable(tableName);
|
TEST_UTIL.deleteTable(tableName);
|
||||||
admin.deleteTable(tableName);
|
|
||||||
waitCleanerRun();
|
waitCleanerRun();
|
||||||
|
|
||||||
// Verify the first cloned table
|
// Verify the first cloned table
|
||||||
admin.enableTable(clonedTableName);
|
admin.enableTable(clonedTableName);
|
||||||
verifyRowCount(clonedTableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
|
||||||
|
|
||||||
// Verify the second cloned table
|
// Verify the second cloned table
|
||||||
admin.enableTable(clonedTableName2);
|
admin.enableTable(clonedTableName2);
|
||||||
verifyRowCount(clonedTableName2, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
|
||||||
admin.disableTable(clonedTableName2);
|
admin.disableTable(clonedTableName2);
|
||||||
|
|
||||||
// Delete the first cloned table
|
// Delete the first cloned table
|
||||||
admin.disableTable(clonedTableName);
|
TEST_UTIL.deleteTable(clonedTableName);
|
||||||
admin.deleteTable(clonedTableName);
|
|
||||||
waitCleanerRun();
|
waitCleanerRun();
|
||||||
|
|
||||||
// Verify the second cloned table
|
// Verify the second cloned table
|
||||||
admin.enableTable(clonedTableName2);
|
admin.enableTable(clonedTableName2);
|
||||||
verifyRowCount(clonedTableName2, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName2, snapshot0Rows);
|
||||||
|
|
||||||
// Clone a new table from cloned
|
// Clone a new table from cloned
|
||||||
byte[] clonedTableName3 = Bytes.toBytes("clonedtb3-" + System.currentTimeMillis());
|
byte[] clonedTableName3 = Bytes.toBytes("clonedtb3-" + System.currentTimeMillis());
|
||||||
admin.cloneSnapshot(snapshotName2, clonedTableName3);
|
admin.cloneSnapshot(snapshotName2, clonedTableName3);
|
||||||
verifyRowCount(clonedTableName3, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName3, snapshot0Rows);
|
||||||
|
|
||||||
// Delete the cloned tables
|
// Delete the cloned tables
|
||||||
admin.disableTable(clonedTableName2);
|
TEST_UTIL.deleteTable(clonedTableName2);
|
||||||
admin.deleteTable(clonedTableName2);
|
TEST_UTIL.deleteTable(clonedTableName3);
|
||||||
admin.disableTable(clonedTableName3);
|
|
||||||
admin.deleteTable(clonedTableName3);
|
|
||||||
admin.deleteSnapshot(snapshotName2);
|
admin.deleteSnapshot(snapshotName2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==========================================================================
|
// ==========================================================================
|
||||||
// Helpers
|
// Helpers
|
||||||
// ==========================================================================
|
// ==========================================================================
|
||||||
private void createTable(final byte[] tableName, final byte[]... families) throws IOException {
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
|
||||||
htd.addFamily(hcd);
|
|
||||||
}
|
|
||||||
byte[][] splitKeys = new byte[16][];
|
|
||||||
byte[] hex = Bytes.toBytes("0123456789abcdef");
|
|
||||||
for (int i = 0; i < 16; ++i) {
|
|
||||||
splitKeys[i] = new byte[] { hex[i] };
|
|
||||||
}
|
|
||||||
admin.createTable(htd, splitKeys);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void loadData(final HTable table, int rows, byte[]... families) throws IOException {
|
|
||||||
byte[] qualifier = Bytes.toBytes("q");
|
|
||||||
table.setAutoFlush(false);
|
|
||||||
while (rows-- > 0) {
|
|
||||||
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
|
|
||||||
byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
|
|
||||||
Put put = new Put(key);
|
|
||||||
put.setDurability(Durability.SKIP_WAL);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
put.add(family, qualifier, value);
|
|
||||||
}
|
|
||||||
table.put(put);
|
|
||||||
}
|
|
||||||
table.flushCommits();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void waitCleanerRun() throws InterruptedException {
|
private void waitCleanerRun() throws InterruptedException {
|
||||||
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
|
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
|
|
||||||
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
|
|
||||||
assertEquals(expectedRows, TEST_UTIL.countRows(table));
|
|
||||||
table.close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.LargeTests;
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||||
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
|
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.MD5Hash;
|
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -100,7 +100,7 @@ public class TestRestoreSnapshotFromClient {
|
||||||
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
|
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
|
||||||
|
|
||||||
// create Table and disable it
|
// create Table and disable it
|
||||||
createTable(tableName, FAMILY);
|
SnapshotTestingUtils.createTable(TEST_UTIL, tableName, FAMILY);
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
|
|
||||||
// take an empty snapshot
|
// take an empty snapshot
|
||||||
|
@ -110,7 +110,7 @@ public class TestRestoreSnapshotFromClient {
|
||||||
try {
|
try {
|
||||||
// enable table and insert data
|
// enable table and insert data
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
loadData(table, 500, FAMILY);
|
SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY);
|
||||||
snapshot0Rows = TEST_UTIL.countRows(table);
|
snapshot0Rows = TEST_UTIL.countRows(table);
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ public class TestRestoreSnapshotFromClient {
|
||||||
|
|
||||||
// enable table and insert more data
|
// enable table and insert more data
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
loadData(table, 500, FAMILY);
|
SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY);
|
||||||
snapshot1Rows = TEST_UTIL.countRows(table);
|
snapshot1Rows = TEST_UTIL.countRows(table);
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
|
|
||||||
|
@ -135,43 +135,36 @@ public class TestRestoreSnapshotFromClient {
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
if (admin.tableExists(tableName)) {
|
TEST_UTIL.deleteTable(tableName);
|
||||||
TEST_UTIL.deleteTable(tableName);
|
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
|
||||||
}
|
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
|
||||||
admin.deleteSnapshot(snapshotName0);
|
|
||||||
admin.deleteSnapshot(snapshotName1);
|
|
||||||
|
|
||||||
// Ensure the archiver to be empty
|
|
||||||
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
|
|
||||||
mfs.getFileSystem().delete(
|
|
||||||
new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRestoreSnapshot() throws IOException {
|
public void testRestoreSnapshot() throws IOException {
|
||||||
verifyRowCount(tableName, snapshot1Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
|
||||||
|
|
||||||
// Restore from snapshot-0
|
// Restore from snapshot-0
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
admin.restoreSnapshot(snapshotName0);
|
admin.restoreSnapshot(snapshotName0);
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
verifyRowCount(tableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot0Rows);
|
||||||
|
|
||||||
// Restore from emptySnapshot
|
// Restore from emptySnapshot
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
admin.restoreSnapshot(emptySnapshot);
|
admin.restoreSnapshot(emptySnapshot);
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
verifyRowCount(tableName, 0);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, 0);
|
||||||
|
|
||||||
// Restore from snapshot-1
|
// Restore from snapshot-1
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
admin.restoreSnapshot(snapshotName1);
|
admin.restoreSnapshot(snapshotName1);
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
verifyRowCount(tableName, snapshot1Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, tableName, snapshot1Rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRestoreSchemaChange() throws IOException {
|
public void testRestoreSchemaChange() throws Exception {
|
||||||
byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
|
byte[] TEST_FAMILY2 = Bytes.toBytes("cf2");
|
||||||
|
|
||||||
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
|
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
|
||||||
|
@ -183,7 +176,7 @@ public class TestRestoreSnapshotFromClient {
|
||||||
assertEquals(2, table.getTableDescriptor().getFamilies().size());
|
assertEquals(2, table.getTableDescriptor().getFamilies().size());
|
||||||
HTableDescriptor htd = admin.getTableDescriptor(tableName);
|
HTableDescriptor htd = admin.getTableDescriptor(tableName);
|
||||||
assertEquals(2, htd.getFamilies().size());
|
assertEquals(2, htd.getFamilies().size());
|
||||||
loadData(table, 500, TEST_FAMILY2);
|
SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, TEST_FAMILY2);
|
||||||
long snapshot2Rows = snapshot1Rows + 500;
|
long snapshot2Rows = snapshot1Rows + 500;
|
||||||
assertEquals(snapshot2Rows, TEST_UTIL.countRows(table));
|
assertEquals(snapshot2Rows, TEST_UTIL.countRows(table));
|
||||||
assertEquals(500, TEST_UTIL.countRows(table, TEST_FAMILY2));
|
assertEquals(500, TEST_UTIL.countRows(table, TEST_FAMILY2));
|
||||||
|
@ -230,51 +223,20 @@ public class TestRestoreSnapshotFromClient {
|
||||||
public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
|
public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
|
||||||
byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
|
byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
|
||||||
admin.cloneSnapshot(snapshotName0, clonedTableName);
|
admin.cloneSnapshot(snapshotName0, clonedTableName);
|
||||||
verifyRowCount(clonedTableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
|
||||||
admin.disableTable(clonedTableName);
|
admin.disableTable(clonedTableName);
|
||||||
admin.snapshot(snapshotName2, clonedTableName);
|
admin.snapshot(snapshotName2, clonedTableName);
|
||||||
admin.deleteTable(clonedTableName);
|
TEST_UTIL.deleteTable(clonedTableName);
|
||||||
waitCleanerRun();
|
waitCleanerRun();
|
||||||
|
|
||||||
admin.cloneSnapshot(snapshotName2, clonedTableName);
|
admin.cloneSnapshot(snapshotName2, clonedTableName);
|
||||||
verifyRowCount(clonedTableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(TEST_UTIL, clonedTableName, snapshot0Rows);
|
||||||
admin.disableTable(clonedTableName);
|
TEST_UTIL.deleteTable(clonedTableName);
|
||||||
admin.deleteTable(clonedTableName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==========================================================================
|
// ==========================================================================
|
||||||
// Helpers
|
// Helpers
|
||||||
// ==========================================================================
|
// ==========================================================================
|
||||||
private void createTable(final byte[] tableName, final byte[]... families) throws IOException {
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
|
||||||
htd.addFamily(hcd);
|
|
||||||
}
|
|
||||||
byte[][] splitKeys = new byte[16][];
|
|
||||||
byte[] hex = Bytes.toBytes("0123456789abcdef");
|
|
||||||
for (int i = 0; i < 16; ++i) {
|
|
||||||
splitKeys[i] = new byte[] { hex[i] };
|
|
||||||
}
|
|
||||||
admin.createTable(htd, splitKeys);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void loadData(final HTable table, int rows, byte[]... families) throws IOException {
|
|
||||||
byte[] qualifier = Bytes.toBytes("q");
|
|
||||||
table.setAutoFlush(false);
|
|
||||||
while (rows-- > 0) {
|
|
||||||
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
|
|
||||||
byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
|
|
||||||
Put put = new Put(key);
|
|
||||||
put.setDurability(Durability.SKIP_WAL);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
put.add(family, qualifier, value);
|
|
||||||
}
|
|
||||||
table.put(put);
|
|
||||||
}
|
|
||||||
table.flushCommits();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void waitCleanerRun() throws InterruptedException {
|
private void waitCleanerRun() throws InterruptedException {
|
||||||
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
|
TEST_UTIL.getMiniHBaseCluster().getMaster().getHFileCleaner().choreForTesting();
|
||||||
}
|
}
|
||||||
|
@ -290,10 +252,4 @@ public class TestRestoreSnapshotFromClient {
|
||||||
}
|
}
|
||||||
return families;
|
return families;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
|
|
||||||
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
|
|
||||||
assertEquals(expectedRows, TEST_UTIL.countRows(table));
|
|
||||||
table.close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -97,13 +97,8 @@ public class TestSnapshotCloneIndependence {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
UTIL.deleteTable(TABLE_NAME);
|
UTIL.deleteTable(TABLE_NAME);
|
||||||
// and cleanup the archive directory
|
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
|
||||||
try {
|
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
|
||||||
UTIL.getTestFileSystem().delete(
|
|
||||||
new Path(UTIL.getDefaultRootDirPath(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.warn("Failure to delete archive directory", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
@ -373,4 +368,4 @@ public class TestSnapshotCloneIndependence {
|
||||||
Assert.assertTrue("The new family was not found. ",
|
Assert.assertTrue("The new family was not found. ",
|
||||||
!clonedTableDescriptor.hasFamily(TEST_FAM_2));
|
!clonedTableDescriptor.hasFamily(TEST_FAM_2));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,13 +98,8 @@ public class TestSnapshotFromClient {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
UTIL.deleteTable(TABLE_NAME);
|
UTIL.deleteTable(TABLE_NAME);
|
||||||
// and cleanup the archive directory
|
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
|
||||||
try {
|
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
|
||||||
UTIL.getTestFileSystem().delete(
|
|
||||||
new Path(UTIL.getDefaultRootDirPath(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.warn("Failure to delete archive directory", e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
@ -263,4 +258,4 @@ public class TestSnapshotFromClient {
|
||||||
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
|
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -134,22 +134,8 @@ public class TestSnapshotFromMaster {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
UTIL.deleteTable(TABLE_NAME);
|
UTIL.deleteTable(TABLE_NAME);
|
||||||
|
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
|
||||||
// delete the archive directory, if its exists
|
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
|
||||||
if (fs.exists(archiveDir)) {
|
|
||||||
if (!fs.delete(archiveDir, true)) {
|
|
||||||
throw new IOException("Couldn't delete archive directory (" + archiveDir
|
|
||||||
+ " for an unknown reason");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// delete the snapshot directory, if its exists
|
|
||||||
if (fs.exists(snapshots)) {
|
|
||||||
if (!fs.delete(snapshots, true)) {
|
|
||||||
throw new IOException("Couldn't delete snapshots directory (" + snapshots
|
|
||||||
+ " for an unknown reason");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
|
|
@ -34,21 +34,29 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathFilter;
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
import org.apache.hadoop.hbase.TableNotEnabledException;
|
||||||
|
import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.master.HMaster;
|
import org.apache.hadoop.hbase.master.HMaster;
|
||||||
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
|
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneResponse;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
import org.apache.hadoop.hbase.util.MD5Hash;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
import com.google.protobuf.ServiceException;
|
import com.google.protobuf.ServiceException;
|
||||||
|
@ -433,4 +441,102 @@ public class SnapshotTestingUtils {
|
||||||
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, nonEmptyFamilyNames, emptyFamilyNames,
|
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), tableName, nonEmptyFamilyNames, emptyFamilyNames,
|
||||||
rootDir, admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), null);
|
rootDir, admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ==========================================================================
|
||||||
|
// Table Helpers
|
||||||
|
// ==========================================================================
|
||||||
|
public static void waitForTableToBeOnline(final HBaseTestingUtility util, final byte[] tableName)
|
||||||
|
throws IOException, InterruptedException {
|
||||||
|
HRegionServer rs = util.getRSForFirstRegionInTable(tableName);
|
||||||
|
List<HRegion> onlineRegions = rs.getOnlineRegions(tableName);
|
||||||
|
for (HRegion region : onlineRegions) {
|
||||||
|
region.waitForFlushesAndCompactions();
|
||||||
|
}
|
||||||
|
util.getHBaseAdmin().isTableAvailable(tableName);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void createTable(final HBaseTestingUtility util, final byte[] tableName,
|
||||||
|
final byte[]... families) throws IOException, InterruptedException {
|
||||||
|
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||||
|
for (byte[] family: families) {
|
||||||
|
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
||||||
|
htd.addFamily(hcd);
|
||||||
|
}
|
||||||
|
byte[][] splitKeys = new byte[14][];
|
||||||
|
byte[] hex = Bytes.toBytes("123456789abcde");
|
||||||
|
for (int i = 0; i < splitKeys.length; ++i) {
|
||||||
|
splitKeys[i] = new byte[] { hex[i] };
|
||||||
|
}
|
||||||
|
util.getHBaseAdmin().createTable(htd, splitKeys);
|
||||||
|
waitForTableToBeOnline(util, tableName);
|
||||||
|
assertEquals(15, util.getHBaseAdmin().getTableRegions(tableName).size());
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void loadData(final HBaseTestingUtility util, final byte[] tableName, int rows,
|
||||||
|
byte[]... families) throws IOException, InterruptedException {
|
||||||
|
loadData(util, new HTable(util.getConfiguration(), tableName), rows, families);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void loadData(final HBaseTestingUtility util, final HTable table, int rows,
|
||||||
|
byte[]... families) throws IOException, InterruptedException {
|
||||||
|
table.setAutoFlush(false);
|
||||||
|
|
||||||
|
// Ensure one row per region
|
||||||
|
assertTrue(rows >= 16);
|
||||||
|
for (byte k0: Bytes.toBytes("0123456789abcdef")) {
|
||||||
|
byte[] k = new byte[] { k0 };
|
||||||
|
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k);
|
||||||
|
byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value)));
|
||||||
|
putData(table, families, key, value);
|
||||||
|
rows--;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add other extra rows. more rows, more files
|
||||||
|
while (rows-- > 0) {
|
||||||
|
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
|
||||||
|
byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
|
||||||
|
putData(table, families, key, value);
|
||||||
|
}
|
||||||
|
table.flushCommits();
|
||||||
|
|
||||||
|
waitForTableToBeOnline(util, table.getTableName());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void putData(final HTable table, final byte[][] families,
|
||||||
|
final byte[] key, final byte[] value) throws IOException {
|
||||||
|
byte[] q = Bytes.toBytes("q");
|
||||||
|
Put put = new Put(key);
|
||||||
|
put.setDurability(Durability.SKIP_WAL);
|
||||||
|
for (byte[] family: families) {
|
||||||
|
put.add(family, q, value);
|
||||||
|
}
|
||||||
|
table.put(put);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void deleteAllSnapshots(final HBaseAdmin admin)
|
||||||
|
throws IOException {
|
||||||
|
// Delete all the snapshots
|
||||||
|
for (SnapshotDescription snapshot: admin.listSnapshots()) {
|
||||||
|
admin.deleteSnapshot(snapshot.getName());
|
||||||
|
}
|
||||||
|
SnapshotTestingUtils.assertNoSnapshots(admin);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void deleteArchiveDirectory(final HBaseTestingUtility util)
|
||||||
|
throws IOException {
|
||||||
|
// Ensure the archiver to be empty
|
||||||
|
MasterFileSystem mfs = util.getMiniHBaseCluster().getMaster().getMasterFileSystem();
|
||||||
|
Path archiveDir = new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);
|
||||||
|
mfs.getFileSystem().delete(archiveDir, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void verifyRowCount(final HBaseTestingUtility util, final byte[] tableName,
|
||||||
|
long expectedRows) throws IOException {
|
||||||
|
HTable table = new HTable(util.getConfiguration(), tableName);
|
||||||
|
try {
|
||||||
|
assertEquals(expectedRows, util.countRows(table));
|
||||||
|
} finally {
|
||||||
|
table.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,10 +119,9 @@ public class TestExportSnapshot {
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
admin.disableTable(tableName);
|
TEST_UTIL.deleteTable(tableName);
|
||||||
admin.deleteSnapshot(snapshotName);
|
SnapshotTestingUtils.deleteAllSnapshots(TEST_UTIL.getHBaseAdmin());
|
||||||
admin.deleteSnapshot(emptySnapshotName);
|
SnapshotTestingUtils.deleteArchiveDirectory(TEST_UTIL);
|
||||||
admin.deleteTable(tableName);
|
|
||||||
admin.close();
|
admin.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,8 +42,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.LargeTests;
|
import org.apache.hadoop.hbase.LargeTests;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
|
||||||
import org.apache.hadoop.hbase.client.Durability;
|
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcClient;
|
import org.apache.hadoop.hbase.ipc.RpcClient;
|
||||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||||
|
@ -56,7 +54,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
|
||||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
@ -121,23 +118,15 @@ public class TestFlushSnapshotFromClient {
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() throws Exception {
|
public void setup() throws Exception {
|
||||||
createTable(TABLE_NAME, TEST_FAM);
|
SnapshotTestingUtils.createTable(UTIL, TABLE_NAME, TEST_FAM);
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
UTIL.deleteTable(TABLE_NAME);
|
UTIL.deleteTable(TABLE_NAME);
|
||||||
// and cleanup the archive directory
|
|
||||||
try {
|
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
|
||||||
UTIL.getTestFileSystem().delete(
|
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
|
||||||
new Path(UTIL.getDefaultRootDirPath(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.warn("Failure to delete archive directory", e);
|
|
||||||
}
|
|
||||||
for (SnapshotDescription snapshot: UTIL.getHBaseAdmin().listSnapshots()) {
|
|
||||||
UTIL.getHBaseAdmin().deleteSnapshot(snapshot.getName());
|
|
||||||
}
|
|
||||||
SnapshotTestingUtils.assertNoSnapshots(UTIL.getHBaseAdmin());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
|
@ -161,7 +150,7 @@ public class TestFlushSnapshotFromClient {
|
||||||
|
|
||||||
// put some stuff in the table
|
// put some stuff in the table
|
||||||
HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
|
HTable table = new HTable(UTIL.getConfiguration(), TABLE_NAME);
|
||||||
loadData(table, DEFAULT_NUM_ROWS, TEST_FAM);
|
SnapshotTestingUtils.loadData(UTIL, table, DEFAULT_NUM_ROWS, TEST_FAM);
|
||||||
|
|
||||||
// get the name of all the regionservers hosting the snapshotted table
|
// get the name of all the regionservers hosting the snapshotted table
|
||||||
Set<String> snapshotServers = new HashSet<String>();
|
Set<String> snapshotServers = new HashSet<String>();
|
||||||
|
@ -252,9 +241,7 @@ public class TestFlushSnapshotFromClient {
|
||||||
// make sure we don't fail on listing snapshots
|
// make sure we don't fail on listing snapshots
|
||||||
SnapshotTestingUtils.assertNoSnapshots(admin);
|
SnapshotTestingUtils.assertNoSnapshots(admin);
|
||||||
// load the table so we have some data
|
// load the table so we have some data
|
||||||
loadData(new HTable(UTIL.getConfiguration(), TABLE_NAME), numRows, TEST_FAM);
|
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
|
||||||
// and wait until everything stabilizes
|
|
||||||
waitForTableToBeOnline(TABLE_NAME);
|
|
||||||
|
|
||||||
// Take a snapshot
|
// Take a snapshot
|
||||||
String snapshotBeforeMergeName = "snapshotBeforeMerge";
|
String snapshotBeforeMergeName = "snapshotBeforeMerge";
|
||||||
|
@ -263,7 +250,7 @@ public class TestFlushSnapshotFromClient {
|
||||||
// Clone the table
|
// Clone the table
|
||||||
String cloneBeforeMergeName = "cloneBeforeMerge";
|
String cloneBeforeMergeName = "cloneBeforeMerge";
|
||||||
admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
|
admin.cloneSnapshot(snapshotBeforeMergeName, cloneBeforeMergeName);
|
||||||
waitForTableToBeOnline(Bytes.toBytes(cloneBeforeMergeName));
|
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, Bytes.toBytes(cloneBeforeMergeName));
|
||||||
|
|
||||||
// Merge two regions
|
// Merge two regions
|
||||||
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
|
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
|
||||||
|
@ -287,11 +274,11 @@ public class TestFlushSnapshotFromClient {
|
||||||
// Clone the table
|
// Clone the table
|
||||||
String cloneAfterMergeName = "cloneAfterMerge";
|
String cloneAfterMergeName = "cloneAfterMerge";
|
||||||
admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
|
admin.cloneSnapshot(snapshotBeforeMergeName, cloneAfterMergeName);
|
||||||
waitForTableToBeOnline(Bytes.toBytes(cloneAfterMergeName));
|
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, Bytes.toBytes(cloneAfterMergeName));
|
||||||
|
|
||||||
verifyRowCount(TABLE_NAME, numRows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
|
||||||
verifyRowCount(Bytes.toBytes(cloneBeforeMergeName), numRows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, Bytes.toBytes(cloneBeforeMergeName), numRows);
|
||||||
verifyRowCount(Bytes.toBytes(cloneAfterMergeName), numRows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, Bytes.toBytes(cloneAfterMergeName), numRows);
|
||||||
|
|
||||||
// test that we can delete the snapshot
|
// test that we can delete the snapshot
|
||||||
UTIL.deleteTable(cloneAfterMergeName);
|
UTIL.deleteTable(cloneAfterMergeName);
|
||||||
|
@ -305,9 +292,7 @@ public class TestFlushSnapshotFromClient {
|
||||||
// make sure we don't fail on listing snapshots
|
// make sure we don't fail on listing snapshots
|
||||||
SnapshotTestingUtils.assertNoSnapshots(admin);
|
SnapshotTestingUtils.assertNoSnapshots(admin);
|
||||||
// load the table so we have some data
|
// load the table so we have some data
|
||||||
loadData(new HTable(UTIL.getConfiguration(), TABLE_NAME), numRows, TEST_FAM);
|
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, numRows, TEST_FAM);
|
||||||
// and wait until everything stabilizes
|
|
||||||
waitForTableToBeOnline(TABLE_NAME);
|
|
||||||
|
|
||||||
// Merge two regions
|
// Merge two regions
|
||||||
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
|
List<HRegionInfo> regions = admin.getTableRegions(TABLE_NAME);
|
||||||
|
@ -334,10 +319,10 @@ public class TestFlushSnapshotFromClient {
|
||||||
// Clone the table
|
// Clone the table
|
||||||
String cloneName = "cloneMerge";
|
String cloneName = "cloneMerge";
|
||||||
admin.cloneSnapshot(snapshotName, cloneName);
|
admin.cloneSnapshot(snapshotName, cloneName);
|
||||||
waitForTableToBeOnline(Bytes.toBytes(cloneName));
|
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, Bytes.toBytes(cloneName));
|
||||||
|
|
||||||
verifyRowCount(TABLE_NAME, numRows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, TABLE_NAME, numRows);
|
||||||
verifyRowCount(Bytes.toBytes(cloneName), numRows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, Bytes.toBytes(cloneName), numRows);
|
||||||
|
|
||||||
// test that we can delete the snapshot
|
// test that we can delete the snapshot
|
||||||
UTIL.deleteTable(cloneName);
|
UTIL.deleteTable(cloneName);
|
||||||
|
@ -353,9 +338,7 @@ public class TestFlushSnapshotFromClient {
|
||||||
// make sure we don't fail on listing snapshots
|
// make sure we don't fail on listing snapshots
|
||||||
SnapshotTestingUtils.assertNoSnapshots(admin);
|
SnapshotTestingUtils.assertNoSnapshots(admin);
|
||||||
// load the table so we have some data
|
// load the table so we have some data
|
||||||
loadData(new HTable(UTIL.getConfiguration(), TABLE_NAME), DEFAULT_NUM_ROWS, TEST_FAM);
|
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
|
||||||
// and wait until everything stabilizes
|
|
||||||
waitForTableToBeOnline(TABLE_NAME);
|
|
||||||
|
|
||||||
String snapshotName = "flushSnapshotCreateListDestroy";
|
String snapshotName = "flushSnapshotCreateListDestroy";
|
||||||
// test creating the snapshot
|
// test creating the snapshot
|
||||||
|
@ -414,13 +397,10 @@ public class TestFlushSnapshotFromClient {
|
||||||
// make sure we don't fail on listing snapshots
|
// make sure we don't fail on listing snapshots
|
||||||
SnapshotTestingUtils.assertNoSnapshots(admin);
|
SnapshotTestingUtils.assertNoSnapshots(admin);
|
||||||
// create second testing table
|
// create second testing table
|
||||||
createTable(TABLE2_NAME, TEST_FAM);
|
SnapshotTestingUtils.createTable(UTIL, TABLE2_NAME, TEST_FAM);
|
||||||
// load the table so we have some data
|
// load the table so we have some data
|
||||||
loadData(new HTable(UTIL.getConfiguration(), TABLE_NAME), DEFAULT_NUM_ROWS, TEST_FAM);
|
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
|
||||||
loadData(new HTable(UTIL.getConfiguration(), TABLE2_NAME), DEFAULT_NUM_ROWS, TEST_FAM);
|
SnapshotTestingUtils.loadData(UTIL, TABLE2_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
|
||||||
// and wait until everything stabilizes
|
|
||||||
waitForTableToBeOnline(TABLE_NAME);
|
|
||||||
waitForTableToBeOnline(TABLE2_NAME);
|
|
||||||
|
|
||||||
final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
|
final CountDownLatch toBeSubmitted = new CountDownLatch(ssNum);
|
||||||
// We'll have one of these per thread
|
// We'll have one of these per thread
|
||||||
|
@ -512,16 +492,6 @@ public class TestFlushSnapshotFromClient {
|
||||||
FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
|
FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void waitForTableToBeOnline(final byte[] tableName)
|
|
||||||
throws IOException, InterruptedException {
|
|
||||||
HRegionServer rs = UTIL.getRSForFirstRegionInTable(tableName);
|
|
||||||
List<HRegion> onlineRegions = rs.getOnlineRegions(tableName);
|
|
||||||
for (HRegion region : onlineRegions) {
|
|
||||||
region.waitForFlushesAndCompactions();
|
|
||||||
}
|
|
||||||
UTIL.getHBaseAdmin().isTableAvailable(tableName);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
|
private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
HBaseAdmin admin = UTIL.getHBaseAdmin();
|
HBaseAdmin admin = UTIL.getHBaseAdmin();
|
||||||
|
@ -534,64 +504,6 @@ public class TestFlushSnapshotFromClient {
|
||||||
break;
|
break;
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
waitForTableToBeOnline(TABLE_NAME);
|
SnapshotTestingUtils.waitForTableToBeOnline(UTIL, TABLE_NAME);
|
||||||
}
|
|
||||||
|
|
||||||
private void createTable(final byte[] tableName, final byte[]... families)
|
|
||||||
throws IOException, InterruptedException {
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
|
||||||
htd.addFamily(hcd);
|
|
||||||
}
|
|
||||||
byte[][] splitKeys = new byte[14][];
|
|
||||||
byte[] hex = Bytes.toBytes("123456789abcde");
|
|
||||||
for (int i = 0; i < splitKeys.length; ++i) {
|
|
||||||
splitKeys[i] = new byte[] { hex[i] };
|
|
||||||
}
|
|
||||||
UTIL.getHBaseAdmin().createTable(htd, splitKeys);
|
|
||||||
waitForTableToBeOnline(tableName);
|
|
||||||
assertEquals(15, UTIL.getHBaseAdmin().getTableRegions(TABLE_NAME).size());
|
|
||||||
}
|
|
||||||
|
|
||||||
public void loadData(final HTable table, int rows, byte[]... families)
|
|
||||||
throws IOException, InterruptedException {
|
|
||||||
table.setAutoFlush(false);
|
|
||||||
|
|
||||||
// Ensure one row per region
|
|
||||||
assertTrue(rows >= 16);
|
|
||||||
for (byte k0: Bytes.toBytes("0123456789abcdef")) {
|
|
||||||
byte[] k = new byte[] { k0 };
|
|
||||||
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), k);
|
|
||||||
byte[] key = Bytes.add(k, Bytes.toBytes(MD5Hash.getMD5AsHex(value)));
|
|
||||||
putData(table, families, key, value);
|
|
||||||
rows--;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add other extra rows. more rows, more files
|
|
||||||
while (rows-- > 0) {
|
|
||||||
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
|
|
||||||
byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
|
|
||||||
putData(table, families, key, value);
|
|
||||||
}
|
|
||||||
table.flushCommits();
|
|
||||||
|
|
||||||
waitForTableToBeOnline(table.getTableName());
|
|
||||||
}
|
|
||||||
|
|
||||||
private void putData(final HTable table, final byte[][] families,
|
|
||||||
final byte[] key, final byte[] value) throws IOException {
|
|
||||||
Put put = new Put(key);
|
|
||||||
put.setDurability(Durability.SKIP_WAL);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
put.add(family, TEST_QUAL, value);
|
|
||||||
}
|
|
||||||
table.put(put);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
|
|
||||||
HTable table = new HTable(UTIL.getConfiguration(), tableName);
|
|
||||||
assertEquals(expectedRows, UTIL.countRows(table));
|
|
||||||
table.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,8 +18,10 @@
|
||||||
package org.apache.hadoop.hbase.snapshot;
|
package org.apache.hadoop.hbase.snapshot;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
@ -36,6 +38,8 @@ import org.apache.hadoop.hbase.client.Durability;
|
||||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||||
|
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||||
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
|
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
@ -57,9 +61,10 @@ import org.junit.experimental.categories.Category;
|
||||||
public class TestRestoreFlushSnapshotFromClient {
|
public class TestRestoreFlushSnapshotFromClient {
|
||||||
final Log LOG = LogFactory.getLog(getClass());
|
final Log LOG = LogFactory.getLog(getClass());
|
||||||
|
|
||||||
private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private final static HBaseTestingUtility UTIL = new HBaseTestingUtility();
|
||||||
|
|
||||||
private final byte[] FAMILY = Bytes.toBytes("cf");
|
private final byte[] FAMILY = Bytes.toBytes("cf");
|
||||||
|
private static final byte[] TEST_QUAL = Bytes.toBytes("q");
|
||||||
|
|
||||||
private byte[] snapshotName0;
|
private byte[] snapshotName0;
|
||||||
private byte[] snapshotName1;
|
private byte[] snapshotName1;
|
||||||
|
@ -71,24 +76,24 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void setUpBeforeClass() throws Exception {
|
public static void setUpBeforeClass() throws Exception {
|
||||||
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
|
UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
|
||||||
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
|
UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
|
||||||
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
|
UTIL.getConfiguration().setInt("hbase.client.pause", 250);
|
||||||
TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
|
UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
|
||||||
TEST_UTIL.getConfiguration().setBoolean(
|
UTIL.getConfiguration().setBoolean(
|
||||||
"hbase.master.enabletable.roundrobin", true);
|
"hbase.master.enabletable.roundrobin", true);
|
||||||
|
|
||||||
// Enable snapshot
|
// Enable snapshot
|
||||||
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||||
TEST_UTIL.getConfiguration().setLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
|
UTIL.getConfiguration().setLong(RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_KEY,
|
||||||
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT * 2);
|
RegionServerSnapshotManager.SNAPSHOT_TIMEOUT_MILLIS_DEFAULT * 2);
|
||||||
|
|
||||||
TEST_UTIL.startMiniCluster(3);
|
UTIL.startMiniCluster(3);
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void tearDownAfterClass() throws Exception {
|
public static void tearDownAfterClass() throws Exception {
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
UTIL.shutdownMiniCluster();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -98,7 +103,7 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
*/
|
*/
|
||||||
@Before
|
@Before
|
||||||
public void setup() throws Exception {
|
public void setup() throws Exception {
|
||||||
this.admin = TEST_UTIL.getHBaseAdmin();
|
this.admin = UTIL.getHBaseAdmin();
|
||||||
|
|
||||||
long tid = System.currentTimeMillis();
|
long tid = System.currentTimeMillis();
|
||||||
tableName = Bytes.toBytes("testtb-" + tid);
|
tableName = Bytes.toBytes("testtb-" + tid);
|
||||||
|
@ -107,11 +112,11 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
|
snapshotName2 = Bytes.toBytes("snaptb2-" + tid);
|
||||||
|
|
||||||
// create Table and disable it
|
// create Table and disable it
|
||||||
createTable(tableName, FAMILY);
|
SnapshotTestingUtils.createTable(UTIL, tableName, FAMILY);
|
||||||
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
|
HTable table = new HTable(UTIL.getConfiguration(), tableName);
|
||||||
try {
|
try {
|
||||||
loadData(table, 500, FAMILY);
|
SnapshotTestingUtils.loadData(UTIL, table, 500, FAMILY);
|
||||||
snapshot0Rows = TEST_UTIL.countRows(table);
|
snapshot0Rows = UTIL.countRows(table);
|
||||||
LOG.info("=== before snapshot with 500 rows");
|
LOG.info("=== before snapshot with 500 rows");
|
||||||
logFSTree();
|
logFSTree();
|
||||||
|
|
||||||
|
@ -123,8 +128,8 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
logFSTree();
|
logFSTree();
|
||||||
|
|
||||||
// insert more data
|
// insert more data
|
||||||
loadData(table, 500, FAMILY);
|
SnapshotTestingUtils.loadData(UTIL, table, 500, FAMILY);
|
||||||
snapshot1Rows = TEST_UTIL.countRows(table);
|
snapshot1Rows = UTIL.countRows(table);
|
||||||
LOG.info("=== before snapshot with 1000 rows");
|
LOG.info("=== before snapshot with 1000 rows");
|
||||||
logFSTree();
|
logFSTree();
|
||||||
|
|
||||||
|
@ -140,14 +145,8 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
|
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
TEST_UTIL.deleteTable(tableName);
|
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
|
||||||
admin.deleteSnapshot(snapshotName0);
|
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
|
||||||
admin.deleteSnapshot(snapshotName1);
|
|
||||||
|
|
||||||
// Ensure the archiver to be empty
|
|
||||||
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
|
|
||||||
mfs.getFileSystem().delete(
|
|
||||||
new Path(mfs.getRootDir(), HConstants.HFILE_ARCHIVE_DIRECTORY), true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -157,7 +156,7 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRestoreSnapshot() throws IOException {
|
public void testRestoreSnapshot() throws IOException {
|
||||||
verifyRowCount(tableName, snapshot1Rows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
|
||||||
|
|
||||||
// Restore from snapshot-0
|
// Restore from snapshot-0
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
|
@ -166,13 +165,13 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
LOG.info("=== after restore with 500 row snapshot");
|
LOG.info("=== after restore with 500 row snapshot");
|
||||||
logFSTree();
|
logFSTree();
|
||||||
verifyRowCount(tableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
|
||||||
|
|
||||||
// Restore from snapshot-1
|
// Restore from snapshot-1
|
||||||
admin.disableTable(tableName);
|
admin.disableTable(tableName);
|
||||||
admin.restoreSnapshot(snapshotName1);
|
admin.restoreSnapshot(snapshotName1);
|
||||||
admin.enableTable(tableName);
|
admin.enableTable(tableName);
|
||||||
verifyRowCount(tableName, snapshot1Rows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected=SnapshotDoesNotExistException.class)
|
@Test(expected=SnapshotDoesNotExistException.class)
|
||||||
|
@ -193,65 +192,29 @@ public class TestRestoreFlushSnapshotFromClient {
|
||||||
int snapshotRows) throws IOException, InterruptedException {
|
int snapshotRows) throws IOException, InterruptedException {
|
||||||
// create a new table from snapshot
|
// create a new table from snapshot
|
||||||
admin.cloneSnapshot(snapshotName, tableName);
|
admin.cloneSnapshot(snapshotName, tableName);
|
||||||
verifyRowCount(tableName, snapshotRows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshotRows);
|
||||||
|
|
||||||
TEST_UTIL.deleteTable(tableName);
|
UTIL.deleteTable(tableName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
|
public void testRestoreSnapshotOfCloned() throws IOException, InterruptedException {
|
||||||
byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
|
byte[] clonedTableName = Bytes.toBytes("clonedtb-" + System.currentTimeMillis());
|
||||||
admin.cloneSnapshot(snapshotName0, clonedTableName);
|
admin.cloneSnapshot(snapshotName0, clonedTableName);
|
||||||
verifyRowCount(clonedTableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
|
||||||
admin.snapshot(Bytes.toString(snapshotName2), Bytes.toString(clonedTableName), SnapshotDescription.Type.FLUSH);
|
admin.snapshot(Bytes.toString(snapshotName2), Bytes.toString(clonedTableName), SnapshotDescription.Type.FLUSH);
|
||||||
TEST_UTIL.deleteTable(clonedTableName);
|
UTIL.deleteTable(clonedTableName);
|
||||||
|
|
||||||
admin.cloneSnapshot(snapshotName2, clonedTableName);
|
admin.cloneSnapshot(snapshotName2, clonedTableName);
|
||||||
verifyRowCount(clonedTableName, snapshot0Rows);
|
SnapshotTestingUtils.verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
|
||||||
TEST_UTIL.deleteTable(clonedTableName);
|
UTIL.deleteTable(clonedTableName);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ==========================================================================
|
// ==========================================================================
|
||||||
// Helpers
|
// Helpers
|
||||||
// ==========================================================================
|
// ==========================================================================
|
||||||
private void createTable(final byte[] tableName, final byte[]... families) throws IOException {
|
|
||||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
|
||||||
htd.addFamily(hcd);
|
|
||||||
}
|
|
||||||
byte[][] splitKeys = new byte[16][];
|
|
||||||
byte[] hex = Bytes.toBytes("0123456789abcdef");
|
|
||||||
for (int i = 0; i < 16; ++i) {
|
|
||||||
splitKeys[i] = new byte[] { hex[i] };
|
|
||||||
}
|
|
||||||
admin.createTable(htd, splitKeys);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void loadData(final HTable table, int rows, byte[]... families) throws IOException {
|
|
||||||
byte[] qualifier = Bytes.toBytes("q");
|
|
||||||
table.setAutoFlush(false);
|
|
||||||
while (rows-- > 0) {
|
|
||||||
byte[] value = Bytes.add(Bytes.toBytes(System.currentTimeMillis()), Bytes.toBytes(rows));
|
|
||||||
byte[] key = Bytes.toBytes(MD5Hash.getMD5AsHex(value));
|
|
||||||
Put put = new Put(key);
|
|
||||||
put.setDurability(Durability.SKIP_WAL);
|
|
||||||
for (byte[] family: families) {
|
|
||||||
put.add(family, qualifier, value);
|
|
||||||
}
|
|
||||||
table.put(put);
|
|
||||||
}
|
|
||||||
table.flushCommits();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void logFSTree() throws IOException {
|
private void logFSTree() throws IOException {
|
||||||
MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
|
MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
|
||||||
FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
|
FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verifyRowCount(final byte[] tableName, long expectedRows) throws IOException {
|
|
||||||
HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName);
|
|
||||||
assertEquals(expectedRows, TEST_UTIL.countRows(table));
|
|
||||||
table.close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue