HBASE-16937 Replace SnapshotType protobuf conversion when we can directly use the pojo object

This commit is contained in:
Matteo Bertozzi 2016-11-04 13:18:34 -07:00
parent 00ea7aeafe
commit 7e05d0f161
4 changed files with 31 additions and 45 deletions

View File

@ -18,25 +18,27 @@
*/
package org.apache.hadoop.hbase.snapshot;
import java.util.Arrays;
import java.util.Locale;
import org.apache.commons.cli.CommandLine;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import java.util.Arrays;
/**
* This is a command line class that will snapshot a given table.
*/
public class CreateSnapshot extends AbstractHBaseTool {
private SnapshotType snapshotType = SnapshotType.FLUSH;
private TableName tableName = null;
private String snapshotName = null;
private String snapshotType = null;
public static void main(String[] args) {
new CreateSnapshot().doStaticMain(args);
@ -48,15 +50,18 @@ public class CreateSnapshot extends AbstractHBaseTool {
this.addRequiredOptWithArg("n", "name", "The name of the created snapshot");
this.addOptWithArg("s", "snapshot_type",
"Snapshot Type. FLUSH is default. Posible values are "
+ Arrays.toString(HBaseProtos.SnapshotDescription.Type.values()));
+ Arrays.toString(SnapshotType.values()));
}
@Override
protected void processOptions(CommandLine cmd) {
this.tableName = TableName.valueOf(cmd.getOptionValue('t'));
this.snapshotName = cmd.getOptionValue('n');
this.snapshotType = cmd.getOptionValue('s');
String snapshotTypeName = cmd.getOptionValue('s');
if (snapshotTypeName != null) {
snapshotTypeName = snapshotTypeName.toUpperCase(Locale.ROOT);
this.snapshotType = SnapshotType.valueOf(snapshotTypeName);
}
}
@Override
@ -66,13 +71,9 @@ public class CreateSnapshot extends AbstractHBaseTool {
try {
connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin();
HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
if (snapshotType != null) {
type = ProtobufUtil.createProtosSnapShotDescType(snapshotName);
}
admin.snapshot(new SnapshotDescription(snapshotName, tableName,
ProtobufUtil.createSnapshotType(type)));
admin.snapshot(new SnapshotDescription(snapshotName, tableName, snapshotType));
} catch (Exception e) {
System.err.println("failed to take the snapshot: " + e.getMessage());
return -1;
} finally {
if (admin != null) {

View File

@ -292,15 +292,13 @@ public final class SnapshotTestingUtils {
* Take snapshot with maximum of numTries attempts, ignoring CorruptedSnapshotException
* except for the last CorruptedSnapshotException
*/
public static void snapshot(Admin admin,
final String snapshotName, final String tableName,
HBaseProtos.SnapshotDescription.Type type, int numTries) throws IOException {
public static void snapshot(Admin admin, final String snapshotName, final TableName tableName,
final SnapshotType type, final int numTries) throws IOException {
int tries = 0;
CorruptedSnapshotException lastEx = null;
while (tries++ < numTries) {
try {
admin.snapshot(new SnapshotDescription(snapshotName, TableName.valueOf(tableName),
SnapshotType.valueOf(type.toString())));
admin.snapshot(snapshotName, tableName, type);
return;
} catch (CorruptedSnapshotException cse) {
LOG.warn("Got CorruptedSnapshotException", cse);

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
@ -150,8 +151,7 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
admin.snapshot(snapshotString, TABLE_NAME, SnapshotType.FLUSH);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
@ -186,8 +186,7 @@ public class TestFlushSnapshotFromClient {
// take a snapshot of the enabled table
String snapshotString = "skipFlushTableSnapshot";
byte[] snapshot = Bytes.toBytes(snapshotString);
admin.snapshot(snapshotString, TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.SKIPFLUSH));
admin.snapshot(snapshotString, TABLE_NAME, SnapshotType.SKIPFLUSH);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
@ -266,8 +265,7 @@ public class TestFlushSnapshotFromClient {
// snapshot the non-existant table
try {
admin.snapshot("fail", tableName,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
admin.snapshot("fail", tableName, SnapshotType.FLUSH);
fail("Snapshot succeeded even though there is not table.");
} catch (SnapshotCreationException e) {
LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage());
@ -282,8 +280,7 @@ public class TestFlushSnapshotFromClient {
// take the snapshot async
admin.takeSnapshotAsync(
new SnapshotDescription("asyncSnapshot", TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH)));
new SnapshotDescription("asyncSnapshot", TABLE_NAME, SnapshotType.FLUSH));
// constantly loop, looking for the snapshot to complete
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
@ -305,8 +302,7 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot
String snapshotBeforeMergeName = "snapshotBeforeMerge";
admin.snapshot(snapshotBeforeMergeName, TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
admin.snapshot(snapshotBeforeMergeName, TABLE_NAME, SnapshotType.FLUSH);
// Clone the table
TableName cloneBeforeMergeName = TableName.valueOf("cloneBeforeMerge");
@ -374,8 +370,7 @@ public class TestFlushSnapshotFromClient {
// Take a snapshot
String snapshotName = "snapshotAfterMerge";
SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME.getNameAsString(),
HBaseProtos.SnapshotDescription.Type.FLUSH, 3);
SnapshotTestingUtils.snapshot(admin, snapshotName, TABLE_NAME, SnapshotType.FLUSH, 3);
// Clone the table
TableName cloneName = TableName.valueOf("cloneMerge");
@ -453,14 +448,10 @@ public class TestFlushSnapshotFromClient {
// build descriptions
SnapshotDescription[] descs = new SnapshotDescription[ssNum];
for (int i = 0; i < ssNum; i++) {
HBaseProtos.SnapshotDescription.Builder builder =
HBaseProtos.SnapshotDescription.newBuilder();
if(i %2 ==0) {
descs[i] = new SnapshotDescription("ss" + i, TABLE_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
if(i % 2 ==0) {
descs[i] = new SnapshotDescription("ss" + i, TABLE_NAME, SnapshotType.FLUSH);
} else {
descs[i] = new SnapshotDescription("ss" + i, TABLE2_NAME,
ProtobufUtil.createSnapshotType(HBaseProtos.SnapshotDescription.Type.FLUSH));
descs[i] = new SnapshotDescription("ss" + i, TABLE2_NAME, SnapshotType.FLUSH);
}
}
@ -539,4 +530,4 @@ public class TestFlushSnapshotFromClient {
protected int countRows(final Table table, final byte[]... families) throws IOException {
return UTIL.countRows(table, families);
}
}
}

View File

@ -26,10 +26,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.SnapshotType;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@ -115,8 +114,7 @@ public class TestRestoreFlushSnapshotFromClient {
logFSTree();
// take a snapshot
admin.snapshot(Bytes.toString(snapshotName0), tableName,
ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH));
admin.snapshot(Bytes.toString(snapshotName0), tableName, SnapshotType.FLUSH);
LOG.info("=== after snapshot with 500 rows");
logFSTree();
@ -128,8 +126,7 @@ public class TestRestoreFlushSnapshotFromClient {
logFSTree();
// take a snapshot of the updated table
admin.snapshot(Bytes.toString(snapshotName1), tableName,
ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH));
admin.snapshot(Bytes.toString(snapshotName1), tableName, SnapshotType.FLUSH);
LOG.info("=== after snapshot with 1000 rows");
logFSTree();
table.close();
@ -194,8 +191,7 @@ public class TestRestoreFlushSnapshotFromClient {
TableName clonedTableName = TableName.valueOf("clonedtb-" + System.currentTimeMillis());
admin.cloneSnapshot(snapshotName0, clonedTableName);
verifyRowCount(UTIL, clonedTableName, snapshot0Rows);
admin.snapshot(Bytes.toString(snapshotName2), clonedTableName,
ProtobufUtil.createSnapshotType(SnapshotDescription.Type.FLUSH));
admin.snapshot(Bytes.toString(snapshotName2), clonedTableName, SnapshotType.FLUSH);
UTIL.deleteTable(clonedTableName);
admin.cloneSnapshot(snapshotName2, clonedTableName);