HBASE-11013 Clone Snapshots on Secure Cluster Should provide option to apply Retained User Permissions

Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
huzheng 2017-05-15 16:00:01 +08:00 committed by Guanghao Zhang
parent 68d292d83b
commit f9dc4cad63
69 changed files with 2694 additions and 1994 deletions

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.QuotaSettings;
@ -1146,7 +1147,7 @@ public interface Admin extends Abortable, Closeable {
@Deprecated @Deprecated
void snapshot(final String snapshotName, void snapshot(final String snapshotName,
final TableName tableName, final TableName tableName,
HBaseProtos.SnapshotDescription.Type type) throws IOException, SnapshotCreationException, SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
IllegalArgumentException; IllegalArgumentException;
/** /**
@ -1168,7 +1169,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/ */
@Deprecated @Deprecated
void snapshot(HBaseProtos.SnapshotDescription snapshot) void snapshot(SnapshotDescription snapshot)
throws IOException, SnapshotCreationException, IllegalArgumentException; throws IOException, SnapshotCreationException, IllegalArgumentException;
/** /**
@ -1182,7 +1183,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly * @throws IllegalArgumentException if the snapshot request is formatted incorrectly
*/ */
@Deprecated @Deprecated
MasterProtos.SnapshotResponse takeSnapshotAsync(HBaseProtos.SnapshotDescription snapshot) MasterProtos.SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot)
throws IOException, SnapshotCreationException; throws IOException, SnapshotCreationException;
/** /**
@ -1202,7 +1203,7 @@ public interface Admin extends Abortable, Closeable {
* unknown * unknown
*/ */
@Deprecated @Deprecated
boolean isSnapshotFinished(final HBaseProtos.SnapshotDescription snapshot) boolean isSnapshotFinished(final SnapshotDescription snapshot)
throws IOException, HBaseSnapshotException, UnknownSnapshotException; throws IOException, HBaseSnapshotException, UnknownSnapshotException;
/** /**
@ -1268,6 +1269,23 @@ public interface Admin extends Abortable, Closeable {
void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot) void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
throws IOException, RestoreSnapshotException; throws IOException, RestoreSnapshotException;
/**
* Restore the specified snapshot on the original table. (The table must be disabled) If
* 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
* executing the restore operation. In case of restore failure, the failsafe snapshot will be
* restored. If the restore completes without problem the failsafe snapshot is deleted. The
* failsafe snapshot name is configurable by using the property
* "hbase.snapshot.restore.failsafe.name".
* @param snapshotName name of the snapshot to restore
* @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
* @param restoreAcl true to restore acl of snapshot into table.
* @throws IOException if a remote or network exception occurs
* @throws RestoreSnapshotException if snapshot failed to be restored
* @throws IllegalArgumentException if the restore request is formatted incorrectly
*/
void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
throws IOException, RestoreSnapshotException;
/** /**
* Create a new table by cloning the snapshot content. * Create a new table by cloning the snapshot content.
* *
@ -1294,6 +1312,19 @@ public interface Admin extends Abortable, Closeable {
void cloneSnapshot(final String snapshotName, final TableName tableName) void cloneSnapshot(final String snapshotName, final TableName tableName)
throws IOException, TableExistsException, RestoreSnapshotException; throws IOException, TableExistsException, RestoreSnapshotException;
/**
* Create a new table by cloning the snapshot content.
* @param snapshotName name of the snapshot to be cloned
* @param tableName name of the table where the snapshot will be restored
* @param restoreAcl true to restore acl of snapshot into newly created table
* @throws IOException if a remote or network exception occurs
* @throws TableExistsException if table to be created already exists
* @throws RestoreSnapshotException if snapshot failed to be cloned
* @throws IllegalArgumentException if the specified table has not a valid name
*/
void cloneSnapshot(final String snapshotName, final TableName tableName, final boolean restoreAcl)
throws IOException, TableExistsException, RestoreSnapshotException;
/** /**
* Execute a distributed procedure on a cluster. * Execute a distributed procedure on a cluster.
* *
@ -1342,7 +1373,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IOException if a network error occurs * @throws IOException if a network error occurs
*/ */
@Deprecated @Deprecated
List<HBaseProtos.SnapshotDescription> listSnapshots() throws IOException; List<SnapshotDescription> listSnapshots() throws IOException;
/** /**
* List all the completed snapshots matching the given regular expression. * List all the completed snapshots matching the given regular expression.
@ -1352,7 +1383,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
@Deprecated @Deprecated
List<HBaseProtos.SnapshotDescription> listSnapshots(String regex) throws IOException; List<SnapshotDescription> listSnapshots(String regex) throws IOException;
/** /**
* List all the completed snapshots matching the given pattern. * List all the completed snapshots matching the given pattern.
@ -1362,7 +1393,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
@Deprecated @Deprecated
List<HBaseProtos.SnapshotDescription> listSnapshots(Pattern pattern) throws IOException; List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
/** /**
* List all the completed snapshots matching the given table name regular expression and snapshot * List all the completed snapshots matching the given table name regular expression and snapshot
@ -1373,7 +1404,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
@Deprecated @Deprecated
List<HBaseProtos.SnapshotDescription> listTableSnapshots(String tableNameRegex, List<SnapshotDescription> listTableSnapshots(String tableNameRegex,
String snapshotNameRegex) throws IOException; String snapshotNameRegex) throws IOException;
/** /**
@ -1385,7 +1416,7 @@ public interface Admin extends Abortable, Closeable {
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
*/ */
@Deprecated @Deprecated
List<HBaseProtos.SnapshotDescription> listTableSnapshots(Pattern tableNamePattern, List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
Pattern snapshotNamePattern) throws IOException; Pattern snapshotNamePattern) throws IOException;
/** /**

View File

@ -95,7 +95,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
@ -157,6 +156,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.quotas.QuotaFilter; import org.apache.hadoop.hbase.quotas.QuotaFilter;
import org.apache.hadoop.hbase.quotas.QuotaRetriever; import org.apache.hadoop.hbase.quotas.QuotaRetriever;
import org.apache.hadoop.hbase.quotas.QuotaSettings; import org.apache.hadoop.hbase.quotas.QuotaSettings;
@ -917,7 +917,6 @@ public class HBaseAdmin implements Admin {
* or TimeoutException in case the wait timeout was not long enough to allow the * or TimeoutException in case the wait timeout was not long enough to allow the
* operation to complete. * operation to complete.
* *
* @param desc table descriptor for table
* @param tableName name of table to delete * @param tableName name of table to delete
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @return the result of the async delete. You can use Future.get(long, TimeUnit) * @return the result of the async delete. You can use Future.get(long, TimeUnit)
@ -3820,23 +3819,21 @@ public class HBaseAdmin implements Admin {
} }
/** /**
* Restore the specified snapshot on the original table. (The table must be disabled) * Restore the specified snapshot on the original table. (The table must be disabled) If
* If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken * 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
* before executing the restore operation. * executing the restore operation. In case of restore failure, the failsafe snapshot will be
* In case of restore failure, the failsafe snapshot will be restored. * restored. If the restore completes without problem the failsafe snapshot is deleted. The
* If the restore completes without problem the failsafe snapshot is deleted. * failsafe snapshot name is configurable by using the property
*
* The failsafe snapshot name is configurable by using the property
* "hbase.snapshot.restore.failsafe.name". * "hbase.snapshot.restore.failsafe.name".
*
* @param snapshotName name of the snapshot to restore * @param snapshotName name of the snapshot to restore
* @param takeFailSafeSnapshot true if the failsafe snapshot should be taken * @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
* @param restoreAcl true to restore acl of snapshot into table.
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @throws RestoreSnapshotException if snapshot failed to be restored * @throws RestoreSnapshotException if snapshot failed to be restored
* @throws IllegalArgumentException if the restore request is formatted incorrectly * @throws IllegalArgumentException if the restore request is formatted incorrectly
*/ */
@Override @Override
public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot) public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
throws IOException, RestoreSnapshotException { throws IOException, RestoreSnapshotException {
TableName tableName = null; TableName tableName = null;
for (SnapshotDescription snapshotInfo: listSnapshots()) { for (SnapshotDescription snapshotInfo: listSnapshots()) {
@ -3853,7 +3850,7 @@ public class HBaseAdmin implements Admin {
// The table does not exists, switch to clone. // The table does not exists, switch to clone.
if (!tableExists(tableName)) { if (!tableExists(tableName)) {
cloneSnapshot(snapshotName, tableName); cloneSnapshot(snapshotName, tableName, restoreAcl);
return; return;
} }
@ -3877,13 +3874,13 @@ public class HBaseAdmin implements Admin {
try { try {
// Restore snapshot // Restore snapshot
internalRestoreSnapshot(snapshotName, tableName); internalRestoreSnapshot(snapshotName, tableName, restoreAcl);
} catch (IOException e) { } catch (IOException e) {
// Somthing went wrong during the restore... // Somthing went wrong during the restore...
// if the pre-restore snapshot is available try to rollback // if the pre-restore snapshot is available try to rollback
if (takeFailSafeSnapshot) { if (takeFailSafeSnapshot) {
try { try {
internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName); internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl);
String msg = "Restore snapshot=" + snapshotName + String msg = "Restore snapshot=" + snapshotName +
" failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded."; " failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
LOG.error(msg, e); LOG.error(msg, e);
@ -3909,6 +3906,12 @@ public class HBaseAdmin implements Admin {
} }
} }
@Override
public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
throws IOException, RestoreSnapshotException {
restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
}
/** /**
* Create a new table by cloning the snapshot content. * Create a new table by cloning the snapshot content.
* *
@ -3968,15 +3971,21 @@ public class HBaseAdmin implements Admin {
* @throws IllegalArgumentException if the specified table has not a valid name * @throws IllegalArgumentException if the specified table has not a valid name
*/ */
@Override @Override
public void cloneSnapshot(final String snapshotName, final TableName tableName) public void cloneSnapshot(final String snapshotName, final TableName tableName,
throws IOException, TableExistsException, RestoreSnapshotException { final boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException {
if (tableExists(tableName)) { if (tableExists(tableName)) {
throw new TableExistsException(tableName); throw new TableExistsException(tableName);
} }
internalRestoreSnapshot(snapshotName, tableName); internalRestoreSnapshot(snapshotName, tableName, restoreAcl);
waitUntilTableIsEnabled(tableName); waitUntilTableIsEnabled(tableName);
} }
@Override
public void cloneSnapshot(String snapshotName, TableName tableName)
throws IOException, TableExistsException, RestoreSnapshotException {
cloneSnapshot(snapshotName, tableName, false);
}
/** /**
* Execute a distributed procedure on a cluster synchronously with return data * Execute a distributed procedure on a cluster synchronously with return data
* *
@ -4117,23 +4126,23 @@ public class HBaseAdmin implements Admin {
} }
/** /**
* Execute Restore/Clone snapshot and wait for the server to complete (blocking). * Execute Restore/Clone snapshot and wait for the server to complete (blocking). To check if the
* To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to * cloned table exists, use {@link #isTableAvailable} -- it is not safe to create an HTable
* create an HTable instance to this table before it is available. * instance to this table before it is available.
* @param snapshotName snapshot to restore * @param snapshotName snapshot to restore
* @param tableName table name to restore the snapshot on * @param tableName table name to restore the snapshot on
* @throws IOException if a remote or network exception occurs * @throws IOException if a remote or network exception occurs
* @throws RestoreSnapshotException if snapshot failed to be restored * @throws RestoreSnapshotException if snapshot failed to be restored
* @throws IllegalArgumentException if the restore request is formatted incorrectly * @throws IllegalArgumentException if the restore request is formatted incorrectly
*/ */
private void internalRestoreSnapshot(final String snapshotName, final TableName private void internalRestoreSnapshot(final String snapshotName, final TableName tableName,
tableName) final boolean restoreAcl)
throws IOException, RestoreSnapshotException { throws IOException, RestoreSnapshotException {
SnapshotDescription snapshot = SnapshotDescription.newBuilder() SnapshotDescription snapshot = SnapshotDescription.newBuilder()
.setName(snapshotName).setTable(tableName.getNameAsString()).build(); .setName(snapshotName).setTable(tableName.getNameAsString()).build();
// actually restore the snapshot // actually restore the snapshot
internalRestoreSnapshotAsync(snapshot); internalRestoreSnapshotAsync(snapshot, restoreAcl);
final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder() final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
.setSnapshot(snapshot).build(); .setSnapshot(snapshot).build();
@ -4177,12 +4186,12 @@ public class HBaseAdmin implements Admin {
* @throws RestoreSnapshotException if snapshot failed to be restored * @throws RestoreSnapshotException if snapshot failed to be restored
* @throws IllegalArgumentException if the restore request is formatted incorrectly * @throws IllegalArgumentException if the restore request is formatted incorrectly
*/ */
private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot) private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot,
throws IOException, RestoreSnapshotException { final boolean restoreAcl) throws IOException, RestoreSnapshotException {
ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot) final RestoreSnapshotRequest request =
.build(); RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setRestoreACL(restoreAcl).build();
// run the snapshot restore on the master // run the snapshot restore on the master
return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) { return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {

View File

@ -157,6 +157,10 @@ public class TablePermission extends Permission {
return table; return table;
} }
public void setTableName(TableName table) {
this.table = table;
}
public boolean hasFamily() { public boolean hasFamily() {
return family != null; return family != null;
} }

View File

@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**
@ -36,7 +36,7 @@ public class ClientSnapshotDescriptionUtils {
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to * @throws IllegalArgumentException if the name of the snapshot or the name of the table to
* snapshot are not valid names. * snapshot are not valid names.
*/ */
public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot) public static void assertSnapshotRequestIsValid(SnapshotDescription snapshot)
throws IllegalArgumentException { throws IllegalArgumentException {
// make sure the snapshot name is valid // make sure the snapshot name is valid
TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true); TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true);
@ -52,12 +52,12 @@ public class ClientSnapshotDescriptionUtils {
/** /**
* Returns a single line (no \n) representation of snapshot metadata. Use this instead of * Returns a single line (no \n) representation of snapshot metadata. Use this instead of
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription#toString()}. We don't replace SnapshotDescrpition's toString * {@link SnapshotDescription#toString()}. We don't replace SnapshotDescrpition's toString
* because it is auto-generated by protoc. * because it is auto-generated by protoc.
* @param ssd * @param ssd
* @return Single line string with a summary of the snapshot parameters * @return Single line string with a summary of the snapshot parameters
*/ */
public static String toString(HBaseProtos.SnapshotDescription ssd) { public static String toString(SnapshotDescription ssd) {
if (ssd == null) { if (ssd == null) {
return null; return null;
} }

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**

View File

@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**
* General exception base class for when a snapshot fails * General exception base class for when a snapshot fails

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**
* Thrown when a snapshot could not be restored due to a server-side error when restoring it. * Thrown when a snapshot could not be restored due to a server-side error when restoring it.

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**
* Thrown when a snapshot could not be created due to a server-side error when * Thrown when a snapshot could not be created due to a server-side error when

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**

View File

@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**
* Thrown when a snapshot exists but should not * Thrown when a snapshot exists but should not

View File

@ -32,11 +32,11 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.HBaseRpcController;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;

View File

@ -157,23 +157,6 @@ message NameInt64Pair {
optional int64 value = 2; optional int64 value = 2;
} }
/**
* Description of the snapshot to take
*/
message SnapshotDescription {
required string name = 1;
optional string table = 2; // not needed for delete, but checked for in taking snapshot
optional int64 creation_time = 3 [default = 0];
enum Type {
DISABLED = 0;
FLUSH = 1;
SKIPFLUSH = 2;
}
optional Type type = 4 [default = FLUSH];
optional int32 version = 5;
optional string owner = 6;
}
/** /**
* Description of the distributed procedure to take * Description of the distributed procedure to take
*/ */

View File

@ -32,6 +32,7 @@ import "ClusterStatus.proto";
import "ErrorHandling.proto"; import "ErrorHandling.proto";
import "Procedure.proto"; import "Procedure.proto";
import "Quota.proto"; import "Quota.proto";
import "Snapshot.proto";
/* Column-level protobufs */ /* Column-level protobufs */
@ -391,6 +392,7 @@ message DeleteSnapshotResponse {
message RestoreSnapshotRequest { message RestoreSnapshotRequest {
required SnapshotDescription snapshot = 1; required SnapshotDescription snapshot = 1;
optional bool restoreACL = 2 [default=false];
} }
message RestoreSnapshotResponse { message RestoreSnapshotResponse {

View File

@ -23,9 +23,28 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true; option java_generate_equals_and_hash = true;
option optimize_for = SPEED; option optimize_for = SPEED;
import "AccessControl.proto";
import "FS.proto"; import "FS.proto";
import "HBase.proto"; import "HBase.proto";
/**
* Description of the snapshot to take
*/
message SnapshotDescription {
required string name = 1;
optional string table = 2; // not needed for delete, but checked for in taking snapshot
optional int64 creation_time = 3 [default = 0];
enum Type {
DISABLED = 0;
FLUSH = 1;
SKIPFLUSH = 2;
}
optional Type type = 4 [default = FLUSH];
optional int32 version = 5;
optional string owner = 6;
optional UsersAndPermissions users_and_permissions = 7;
}
message SnapshotFileInfo { message SnapshotFileInfo {
enum Type { enum Type {
HFILE = 1; HFILE = 1;

View File

@ -49,7 +49,7 @@ org.apache.hadoop.hbase.HTableDescriptor;
org.apache.hadoop.hbase.HBaseConfiguration; org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.TableName; org.apache.hadoop.hbase.TableName;
org.apache.hadoop.hbase.tool.Canary; org.apache.hadoop.hbase.tool.Canary;
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
org.apache.hadoop.hbase.master.DeadServer; org.apache.hadoop.hbase.master.DeadServer;
org.apache.hadoop.hbase.protobuf.ProtobufUtil; org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.security.visibility.VisibilityConstants; org.apache.hadoop.hbase.security.visibility.VisibilityConstants;

View File

@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;

View File

@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;

View File

@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.RegionPlan; import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**
* Defines coprocessor hooks for interacting with operations on the * Defines coprocessor hooks for interacting with operations on the

View File

@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.client.IsolationLevel;
import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit; import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;

View File

@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.coprocessor.*; import org.apache.hadoop.hbase.coprocessor.*;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**
* Provides the coprocessor framework and environment for master oriented * Provides the coprocessor framework and environment for master oriented

View File

@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStor
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@ -186,6 +185,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.RSRpcServices; import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
@ -1311,7 +1311,8 @@ public class MasterRpcServices extends RSRpcServices
master.ensureNamespaceExists(dstTable.getNamespaceAsString()); master.ensureNamespaceExists(dstTable.getNamespaceAsString());
SnapshotDescription reqSnapshot = request.getSnapshot(); SnapshotDescription reqSnapshot = request.getSnapshot();
master.snapshotManager.restoreSnapshot(reqSnapshot); master.snapshotManager.restoreSnapshot(reqSnapshot,
request.hasRestoreACL() && request.getRestoreACL());
return RestoreSnapshotResponse.newBuilder().build(); return RestoreSnapshotResponse.newBuilder().build();
} catch (ForeignException e) { } catch (ForeignException e) {
throw new ServiceException(e.getCause()); throw new ServiceException(e.getCause());

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
/** /**
* Watch the current snapshot under process * Watch the current snapshot under process

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.master.SnapshotSentinel;
import org.apache.hadoop.hbase.master.handler.CreateTableHandler; import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
@ -63,6 +63,7 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
private final static String NAME = "Master CloneSnapshotHandler"; private final static String NAME = "Master CloneSnapshotHandler";
private final SnapshotDescription snapshot; private final SnapshotDescription snapshot;
private final boolean restoreAcl;
private final ForeignExceptionDispatcher monitor; private final ForeignExceptionDispatcher monitor;
private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
@ -73,12 +74,14 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
private volatile boolean stopped = false; private volatile boolean stopped = false;
public CloneSnapshotHandler(final MasterServices masterServices, public CloneSnapshotHandler(final MasterServices masterServices,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) { final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor,
final boolean restoreAcl) {
super(masterServices, masterServices.getMasterFileSystem(), hTableDescriptor, super(masterServices, masterServices.getMasterFileSystem(), hTableDescriptor,
masterServices.getConfiguration(), null, masterServices); masterServices.getConfiguration(), null, masterServices);
// Snapshot information // Snapshot information
this.snapshot = snapshot; this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
// Monitor // Monitor
this.monitor = new ForeignExceptionDispatcher(); this.monitor = new ForeignExceptionDispatcher();
@ -118,6 +121,13 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(), Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(),
"A clone should not have regions to remove"); "A clone should not have regions to remove");
// Clone acl of snapshot into newly created table.
if (restoreAcl && snapshot.hasUsersAndPermissions()
&& snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(conf)) {
RestoreSnapshotHelper.restoreSnapshotACL(snapshot, tableName, conf);
}
// At this point the clone is complete. Next step is enabling the table. // At this point the clone is complete. Next step is enabling the table.
String msg = "Clone snapshot="+ snapshot.getName() +" on table=" + tableName + " completed!"; String msg = "Clone snapshot="+ snapshot.getName() +" on table=" + tableName + " completed!";
LOG.info(msg); LOG.info(msg);

View File

@ -25,8 +25,6 @@ import java.util.concurrent.ThreadPoolExecutor;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
@ -34,7 +32,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;

View File

@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.procedure.Procedure; import org.apache.hadoop.hbase.procedure.Procedure;
import org.apache.hadoop.hbase.procedure.ProcedureCoordinator; import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Pair;
@ -49,7 +49,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler {
private final ProcedureCoordinator coordinator; private final ProcedureCoordinator coordinator;
public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master, public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master,
final SnapshotManager manager) { final SnapshotManager manager) {
super(snapshot, master); super(snapshot, master);
this.coordinator = manager.getCoordinator(); this.coordinator = manager.getCoordinator();
} }

View File

@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.master.MasterServices; import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;

View File

@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.master.SnapshotSentinel;
import org.apache.hadoop.hbase.master.handler.TableEventHandler; import org.apache.hadoop.hbase.master.handler.TableEventHandler;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
@ -65,6 +65,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
private final HTableDescriptor hTableDescriptor; private final HTableDescriptor hTableDescriptor;
private final SnapshotDescription snapshot; private final SnapshotDescription snapshot;
private final boolean restoreAcl;
private final ForeignExceptionDispatcher monitor; private final ForeignExceptionDispatcher monitor;
private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot(); private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
@ -73,11 +74,13 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
private volatile boolean stopped = false; private volatile boolean stopped = false;
public RestoreSnapshotHandler(final MasterServices masterServices, public RestoreSnapshotHandler(final MasterServices masterServices,
final SnapshotDescription snapshot, final HTableDescriptor htd) throws IOException { final SnapshotDescription snapshot, final HTableDescriptor htd, final boolean restoreAcl)
throws IOException {
super(EventType.C_M_RESTORE_SNAPSHOT, htd.getTableName(), masterServices, masterServices); super(EventType.C_M_RESTORE_SNAPSHOT, htd.getTableName(), masterServices, masterServices);
// Snapshot information // Snapshot information
this.snapshot = snapshot; this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
// Monitor // Monitor
this.monitor = new ForeignExceptionDispatcher(); this.monitor = new ForeignExceptionDispatcher();
@ -166,6 +169,14 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
} }
metaChanges.updateMetaParentRegions(this.server.getConnection(), hris); metaChanges.updateMetaParentRegions(this.server.getConnection(), hris);
// 5. restore acl of snapshot into the table.
if (restoreAcl && snapshot.hasUsersAndPermissions()
&& snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(server.getConfiguration())) {
RestoreSnapshotHelper.restoreSnapshotACL(snapshot, tableName, server.getConfiguration());
}
// At this point the restore is complete. Next step is enabling the table. // At this point the restore is complete. Next step is enabling the table.
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) + LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +
" on table=" + tableName + " completed!"); " on table=" + tableName + " completed!");

View File

@ -62,8 +62,8 @@ import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Type;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.quotas.QuotaExceededException; import org.apache.hadoop.hbase.quotas.QuotaExceededException;
import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.security.AccessDeniedException;
@ -654,7 +654,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param hTableDescriptor Table Descriptor of the table to create * @param hTableDescriptor Table Descriptor of the table to create
*/ */
synchronized void cloneSnapshot(final SnapshotDescription snapshot, synchronized void cloneSnapshot(final SnapshotDescription snapshot,
final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException { final HTableDescriptor hTableDescriptor, final boolean restoreAcl)
throws HBaseSnapshotException {
TableName tableName = hTableDescriptor.getTableName(); TableName tableName = hTableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table // make sure we aren't running a snapshot on the same table
@ -669,7 +670,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try { try {
CloneSnapshotHandler handler = CloneSnapshotHandler handler =
new CloneSnapshotHandler(master, snapshot, hTableDescriptor).prepare(); new CloneSnapshotHandler(master, snapshot, hTableDescriptor, restoreAcl).prepare();
this.executorService.submit(handler); this.executorService.submit(handler);
this.restoreHandlers.put(tableName, handler); this.restoreHandlers.put(tableName, handler);
} catch (Exception e) { } catch (Exception e) {
@ -685,7 +686,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param reqSnapshot * @param reqSnapshot
* @throws IOException * @throws IOException
*/ */
public void restoreSnapshot(SnapshotDescription reqSnapshot) throws IOException { public void restoreSnapshot(SnapshotDescription reqSnapshot, boolean restoreAcl)
throws IOException {
FileSystem fs = master.getMasterFileSystem().getFileSystem(); FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost(); MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
@ -742,7 +744,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
if (tableRegionCount > 0 && tableRegionCount < snapshotRegionCount) { if (tableRegionCount > 0 && tableRegionCount < snapshotRegionCount) {
checkAndUpdateNamespaceRegionQuota(snapshotRegionCount, tableName); checkAndUpdateNamespaceRegionQuota(snapshotRegionCount, tableName);
} }
restoreSnapshot(snapshot, snapshotTableDesc); restoreSnapshot(snapshot, snapshotTableDesc, restoreAcl);
// Update the region quota if snapshotRegionCount is smaller. This step should not fail // Update the region quota if snapshotRegionCount is smaller. This step should not fail
// because we have reserved enough region quota before hand // because we have reserved enough region quota before hand
if (tableRegionCount > 0 && tableRegionCount > snapshotRegionCount) { if (tableRegionCount > 0 && tableRegionCount > snapshotRegionCount) {
@ -776,7 +778,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
} }
try { try {
checkAndUpdateNamespaceQuota(manifest, tableName); checkAndUpdateNamespaceQuota(manifest, tableName);
cloneSnapshot(snapshot, htd); cloneSnapshot(snapshot, htd, restoreAcl);
} catch (IOException e) { } catch (IOException e) {
this.master.getMasterQuotaManager().removeTableFromNamespaceQuota(tableName); this.master.getMasterQuotaManager().removeTableFromNamespaceQuota(tableName);
LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName() LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName()
@ -825,7 +827,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param hTableDescriptor Table Descriptor * @param hTableDescriptor Table Descriptor
*/ */
private synchronized void restoreSnapshot(final SnapshotDescription snapshot, private synchronized void restoreSnapshot(final SnapshotDescription snapshot,
final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException { final HTableDescriptor hTableDescriptor, final boolean restoreAcl)
throws HBaseSnapshotException {
TableName tableName = hTableDescriptor.getTableName(); TableName tableName = hTableDescriptor.getTableName();
// make sure we aren't running a snapshot on the same table // make sure we aren't running a snapshot on the same table
@ -840,7 +843,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try { try {
RestoreSnapshotHandler handler = RestoreSnapshotHandler handler =
new RestoreSnapshotHandler(master, snapshot, hTableDescriptor).prepare(); new RestoreSnapshotHandler(master, snapshot, hTableDescriptor, restoreAcl).prepare();
this.executorService.submit(handler); this.executorService.submit(handler);
restoreHandlers.put(tableName, handler); restoreHandlers.put(tableName, handler);
} catch (Exception e) { } catch (Exception e) {

View File

@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.master.TableLockManager.TableLock; import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;

View File

@ -155,7 +155,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos; import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;

View File

@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.IOException; import java.io.IOException;
import java.io.InterruptedIOException; import java.io.InterruptedIOException;
import java.lang.Thread.UncaughtExceptionHandler; import java.lang.Thread.UncaughtExceptionHandler;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryUsage; import java.lang.management.MemoryUsage;
import java.lang.reflect.Constructor; import java.lang.reflect.Constructor;
import java.net.BindException; import java.net.BindException;
@ -96,7 +95,6 @@ import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
import org.apache.hadoop.hbase.executor.ExecutorService; import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.executor.ExecutorType; import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.http.HttpServer;
import org.apache.hadoop.hbase.http.InfoServer; import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil; import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;

View File

@ -28,10 +28,9 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.procedure.ProcedureMember; import org.apache.hadoop.hbase.procedure.ProcedureMember;
import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.Subprocedure;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.Region.Operation;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;

View File

@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
import org.apache.hadoop.hbase.procedure.Subprocedure; import org.apache.hadoop.hbase.procedure.Subprocedure;
import org.apache.hadoop.hbase.procedure.SubprocedureFactory; import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs; import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerServices;

View File

@ -470,7 +470,7 @@ public class AccessControlLists {
return allPerms; return allPerms;
} }
static ListMultimap<String, TablePermission> getTablePermissions(Configuration conf, public static ListMultimap<String, TablePermission> getTablePermissions(Configuration conf,
TableName tableName) throws IOException { TableName tableName) throws IOException {
return getPermissions(conf, tableName != null ? tableName.getName() : null, null); return getPermissions(conf, tableName != null ? tableName.getName() : null, null);
} }

View File

@ -91,10 +91,10 @@ import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest;
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.InternalScanner;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;

View File

@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import java.util.Arrays; import java.util.Arrays;
import java.util.Locale; import java.util.Locale;
@ -47,7 +47,7 @@ public class CreateSnapshot extends AbstractHBaseTool {
this.addRequiredOptWithArg("n", "name", "The name of the created snapshot"); this.addRequiredOptWithArg("n", "name", "The name of the created snapshot");
this.addOptWithArg("s", "snapshot_type", this.addOptWithArg("s", "snapshot_type",
"Snapshot Type. FLUSH is default. Posible values are " "Snapshot Type. FLUSH is default. Posible values are "
+ Arrays.toString(HBaseProtos.SnapshotDescription.Type.values())); + Arrays.toString(SnapshotDescription.Type.values()));
} }
@Override @Override
@ -65,9 +65,9 @@ public class CreateSnapshot extends AbstractHBaseTool {
try { try {
connection = ConnectionFactory.createConnection(getConf()); connection = ConnectionFactory.createConnection(getConf());
admin = connection.getAdmin(); admin = connection.getAdmin();
HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH; SnapshotDescription.Type type = SnapshotDescription.Type.FLUSH;
if (snapshotType != null) { if (snapshotType != null) {
type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase(Locale.ROOT)); type = SnapshotDescription.Type.valueOf(snapshotName.toUpperCase(Locale.ROOT));
} }
admin.snapshot(snapshotName, TableName.valueOf(tableName), type); admin.snapshot(snapshotName, TableName.valueOf(tableName), type);

View File

@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.io.FileLink;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.WALLink; import org.apache.hadoop.hbase.io.WALLink;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;

View File

@ -29,10 +29,12 @@ import java.util.HashSet;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry;
import java.util.Set; import java.util.Set;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
import com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -51,11 +53,14 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.security.access.AccessControlClient;
import org.apache.hadoop.hbase.security.access.TablePermission;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils;
@ -602,7 +607,7 @@ public class RestoreSnapshotHelper {
* </pre></blockquote> * </pre></blockquote>
* @param familyDir destination directory for the store file * @param familyDir destination directory for the store file
* @param regionInfo destination region info for the table * @param regionInfo destination region info for the table
* @param hfileName reference file name * @param storeFile reference file name
*/ */
private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo, private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
final SnapshotRegionManifest.StoreFile storeFile) throws IOException { final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
@ -741,4 +746,25 @@ public class RestoreSnapshotHelper {
} }
return metaChanges; return metaChanges;
} }
public static void restoreSnapshotACL(SnapshotDescription snapshot, TableName newTableName,
Configuration conf) throws IOException {
if (snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null) {
LOG.info("Restore snapshot acl to table. snapshot: " + snapshot + ", table: " + newTableName);
ListMultimap<String, TablePermission> perms =
ProtobufUtil.toUserTablePermissions(snapshot.getUsersAndPermissions());
try {
for (Entry<String, TablePermission> e : perms.entries()) {
String user = e.getKey();
TablePermission perm = e.getValue();
perm.setTableName(newTableName);
AccessControlClient.grant(conf, perm.getTableName(), user, perm.getFamily(),
perm.getQualifier(), perm.getActions());
}
} catch (Throwable e) {
throw new IOException("Grant acl into newly creatd table failed. snapshot: " + snapshot
+ ", table: " + newTableName, e);
}
}
}
} }

View File

@ -17,10 +17,11 @@
*/ */
package org.apache.hadoop.hbase.snapshot; package org.apache.hadoop.hbase.snapshot;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Collections; import java.util.Collections;
import com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -30,10 +31,16 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2; import org.apache.hadoop.hbase.security.access.AccessControlLists;
import org.apache.hadoop.hbase.security.access.TablePermission;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
@ -246,7 +253,7 @@ public final class SnapshotDescriptionUtils {
* {@link SnapshotDescription}. * {@link SnapshotDescription}.
*/ */
public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf) public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf)
throws IllegalArgumentException { throws IllegalArgumentException, IOException {
if (!snapshot.hasTable()) { if (!snapshot.hasTable()) {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Descriptor doesn't apply to a table, so we can't build it."); "Descriptor doesn't apply to a table, so we can't build it.");
@ -262,6 +269,12 @@ public final class SnapshotDescriptionUtils {
builder.setCreationTime(time); builder.setCreationTime(time);
snapshot = builder.build(); snapshot = builder.build();
} }
// set the acl to snapshot if security feature is enabled.
if(isSecurityAvailable(conf)){
snapshot = writeAclToSnapshotDescription(snapshot, conf);
}
return snapshot; return snapshot;
} }
@ -306,7 +319,7 @@ public final class SnapshotDescriptionUtils {
} }
/** /**
* Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory * Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory
* @param fs filesystem where the snapshot was taken * @param fs filesystem where the snapshot was taken
* @param snapshotDir directory where the snapshot was stored * @param snapshotDir directory where the snapshot was stored
* @return the stored snapshot description * @return the stored snapshot description
@ -364,4 +377,32 @@ public final class SnapshotDescriptionUtils {
if (!snapshot.hasOwner()) return false; if (!snapshot.hasOwner()) return false;
return snapshot.getOwner().equals(user.getShortName()); return snapshot.getOwner().equals(user.getShortName());
} }
public static boolean isSecurityAvailable(Configuration conf) throws IOException {
Connection conn = ConnectionFactory.createConnection(conf);
try {
Admin admin = conn.getAdmin();
try {
return admin.tableExists(AccessControlLists.ACL_TABLE_NAME);
} finally {
admin.close();
}
} finally {
conn.close();
}
}
private static SnapshotDescription writeAclToSnapshotDescription(
final SnapshotDescription snapshot, final Configuration conf) throws IOException {
ListMultimap<String, TablePermission> perms =
User.runAsLoginUser(new PrivilegedExceptionAction<ListMultimap<String, TablePermission>>() {
@Override
public ListMultimap<String, TablePermission> run() throws Exception {
return AccessControlLists.getTablePermissions(conf,
TableName.valueOf(snapshot.getTable()));
}
});
return snapshot.toBuilder().setUsersAndPermissions(ProtobufUtil.toUserTablePermissions(perms))
.build();
}
} }

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configured; import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.ToolRunner;
@ -51,7 +52,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.WALLink; import org.apache.hadoop.hbase.io.WALLink;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;

View File

@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;

View File

@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.ByteStringer;
@ -126,7 +126,7 @@ public final class SnapshotManifestV2 {
} }
static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf, static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf,
final Executor executor,final FileSystem fs, final Path snapshotDir, final Executor executor, final FileSystem fs, final Path snapshotDir,
final SnapshotDescription desc, final int manifestSizeLimit) throws IOException { final SnapshotDescription desc, final int manifestSizeLimit) throws IOException {
FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() { FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
@Override @Override

View File

@ -39,7 +39,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;

View File

@ -24,7 +24,7 @@
import="org.apache.hadoop.hbase.client.HConnectionManager" import="org.apache.hadoop.hbase.client.HConnectionManager"
import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.snapshot.SnapshotInfo" import="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription" import="org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription"
import="org.apache.hadoop.util.StringUtils" import="org.apache.hadoop.util.StringUtils"
import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.HBaseConfiguration" %> import="org.apache.hadoop.hbase.HBaseConfiguration" %>

View File

@ -27,7 +27,7 @@
import="org.apache.hadoop.hbase.HBaseConfiguration" import="org.apache.hadoop.hbase.HBaseConfiguration"
import="org.apache.hadoop.hbase.client.Admin" import="org.apache.hadoop.hbase.client.Admin"
import="org.apache.hadoop.hbase.master.HMaster" import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription" import="org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription"
import="org.apache.hadoop.hbase.snapshot.SnapshotInfo" import="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.util.StringUtils" %> import="org.apache.hadoop.util.StringUtils" %>

View File

@ -34,10 +34,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;

View File

@ -0,0 +1,243 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessControlConstants;
import org.apache.hadoop.hbase.security.access.AccessController;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.SecureTestUtil;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.IOException;
@Category(MediumTests.class)
public class TestSnapshotWithAcl extends SecureTestUtil {
public TableName TEST_TABLE = TableName.valueOf("TestSnapshotWithAcl");
private static final int ROW_COUNT = 30000;
private static byte[] TEST_FAMILY = Bytes.toBytes("f1");
private static byte[] TEST_QUALIFIER = Bytes.toBytes("cq");
private static byte[] TEST_ROW = Bytes.toBytes(0);
private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Configuration conf;
private static HBaseAdmin admin = null;
// user is table owner. will have all permissions on table
private static User USER_OWNER;
// user with rw permissions on column family.
private static User USER_RW;
// user with read-only permissions
private static User USER_RO;
// user with none permissions
private static User USER_NONE;
static class AccessReadAction implements AccessTestAction {
private TableName tableName;
public AccessReadAction(TableName tableName) {
this.tableName = tableName;
}
@Override
public Object run() throws Exception {
Get g = new Get(TEST_ROW);
g.addFamily(TEST_FAMILY);
HTable t = new HTable(conf, tableName);
try {
t.get(g);
} finally {
t.close();
}
return null;
}
};
static class AccessWriteAction implements AccessTestAction {
private TableName tableName;
public AccessWriteAction(TableName tableName) {
this.tableName = tableName;
}
@Override
public Object run() throws Exception {
Put p = new Put(TEST_ROW);
p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1));
HTable t = new HTable(conf, tableName);
try {
t.put(p);
} finally {
t.close();
}
return null;
}
}
@BeforeClass
public static void setupBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
// Enable security
enableSecurity(conf);
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName());
// Verify enableSecurity sets up what we require
verifyConfiguration(conf);
// Enable EXEC permission checking
conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
TEST_UTIL.startMiniCluster();
MasterCoprocessorHost cpHost =
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
USER_NONE = User.createUserForTesting(conf, "usernone", new String[0]);
}
@Before
public void setUp() throws Exception {
admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
hcd.setMaxVersions(100);
htd.addFamily(hcd);
htd.setOwner(USER_OWNER);
admin.createTable(htd, new byte[][] { Bytes.toBytes("s") });
TEST_UTIL.waitTableEnabled(TEST_TABLE);
grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ, Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ);
}
private void loadData() throws IOException {
HTable hTable = new HTable(conf, TEST_TABLE);
try {
for (int i = 0; i < ROW_COUNT; i++) {
Put put = new Put(Bytes.toBytes(i));
put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
hTable.put(put);
}
hTable.flushCommits();
} finally {
hTable.close();
}
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
private void verifyRows(TableName tableName) throws IOException {
HTable hTable = new HTable(conf, tableName);
try {
Scan scan = new Scan();
ResultScanner scanner = hTable.getScanner(scan);
Result result;
int rowCount = 0;
while ((result = scanner.next()) != null) {
byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER);
Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++));
}
Assert.assertEquals(rowCount, ROW_COUNT);
} finally {
hTable.close();
}
}
@Test
public void testRestoreSnapshot() throws Exception {
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
loadData();
verifyRows(TEST_TABLE);
String snapshotName1 = "testSnapshot1";
admin.snapshot(snapshotName1, TEST_TABLE);
// clone snapshot with restoreAcl true.
TableName tableName1 = TableName.valueOf("tableName1");
admin.cloneSnapshot(snapshotName1, tableName1, true);
verifyRows(tableName1);
verifyAllowed(new AccessReadAction(tableName1), USER_OWNER, USER_RO, USER_RW);
verifyDenied(new AccessReadAction(tableName1), USER_NONE);
verifyAllowed(new AccessWriteAction(tableName1), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(tableName1), USER_RO, USER_NONE);
// clone snapshot with restoreAcl false.
TableName tableName2 = TableName.valueOf("tableName2");
admin.cloneSnapshot(snapshotName1, tableName2, false);
verifyRows(tableName2);
verifyAllowed(new AccessReadAction(tableName2), USER_OWNER);
verifyDenied(new AccessReadAction(tableName2), USER_NONE, USER_RO, USER_RW);
verifyAllowed(new AccessWriteAction(tableName2), USER_OWNER);
verifyDenied(new AccessWriteAction(tableName2), USER_RO, USER_RW, USER_NONE);
// remove read permission for USER_RO.
revokeFromTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ);
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
// restore snapshot with restoreAcl false.
admin.disableTable(TEST_TABLE);
admin.restoreSnapshot(snapshotName1, false, false);
admin.enableTable(TEST_TABLE);
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
// restore snapshot with restoreAcl true.
admin.disableTable(TEST_TABLE);
admin.restoreSnapshot(snapshotName1, false, true);
admin.enableTable(TEST_TABLE);
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
}
}

View File

@ -59,10 +59,10 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter; import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
@ -41,7 +42,6 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler; import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner; import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil; import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;

View File

@ -168,7 +168,7 @@ public class SecureTestUtil {
* To indicate the action was not allowed, either throw an AccessDeniedException * To indicate the action was not allowed, either throw an AccessDeniedException
* or return an empty list of KeyValues. * or return an empty list of KeyValues.
*/ */
static interface AccessTestAction extends PrivilegedExceptionAction<Object> { } public static interface AccessTestAction extends PrivilegedExceptionAction<Object> { }
/** This fails only in case of ADE or empty list for any of the actions. */ /** This fails only in case of ADE or empty list for any of the actions. */
public static void verifyAllowed(User user, AccessTestAction... actions) throws Exception { public static void verifyAllowed(User user, AccessTestAction... actions) throws Exception {

View File

@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ProcedureInfo; import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.MiniHBaseCluster;
@ -108,7 +109,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState;
import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HRegionServer;

View File

@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter; import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress; import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost; import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;

View File

@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
@ -263,8 +263,8 @@ public final class SnapshotTestingUtils {
* @param sleep: amount to sleep between checks to see if the snapshot is done * @param sleep: amount to sleep between checks to see if the snapshot is done
* @throws ServiceException if the snapshot fails * @throws ServiceException if the snapshot fails
*/ */
public static void waitForSnapshotToComplete(HMaster master, public static void waitForSnapshotToComplete(HMaster master, SnapshotDescription snapshot,
SnapshotDescription snapshot, long sleep) throws ServiceException { long sleep) throws ServiceException {
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder() final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
.setSnapshot(snapshot).build(); .setSnapshot(snapshot).build();
IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder() IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;

View File

@ -23,12 +23,12 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.junit.After; import org.junit.After;

View File

@ -31,11 +31,11 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSTableDescriptors;

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.TestTableName; import org.apache.hadoop.hbase.util.TestTableName;
import org.junit.After; import org.junit.After;

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hbase.snapshot; package org.apache.hadoop.hbase.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -30,8 +29,8 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.junit.After; import org.junit.After;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -77,6 +76,8 @@ public class TestSnapshotDescriptionUtils {
fail("Snapshot was considered valid without a table name"); fail("Snapshot was considered valid without a table name");
} catch (IllegalArgumentException e) { } catch (IllegalArgumentException e) {
LOG.debug("Correctly failed when snapshot doesn't have a tablename"); LOG.debug("Correctly failed when snapshot doesn't have a tablename");
} catch (IOException e) {
LOG.debug("Correctly failed when saving acl into snapshot");
} }
} }

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hbase.snapshot; package org.apache.hadoop.hbase.snapshot;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -28,8 +27,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;

View File

@ -87,6 +87,7 @@ module HBaseConstants
DATA = 'DATA' DATA = 'DATA'
SERVER_NAME = 'SERVER_NAME' SERVER_NAME = 'SERVER_NAME'
LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD' LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD'
RESTORE_ACL = 'RESTORE_ACL'
# Load constants from hbase java API # Load constants from hbase java API
def self.promote_constants(constants) def self.promote_constants(constants)

View File

@ -24,7 +24,7 @@ java_import org.apache.hadoop.hbase.util.RegionSplitter
java_import org.apache.hadoop.hbase.util.Bytes java_import org.apache.hadoop.hbase.util.Bytes
java_import org.apache.hadoop.hbase.ServerName java_import org.apache.hadoop.hbase.ServerName
java_import org.apache.hadoop.hbase.TableName java_import org.apache.hadoop.hbase.TableName
java_import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos::SnapshotDescription java_import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos::SnapshotDescription
# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin # Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
@ -907,14 +907,16 @@ module Hbase
#---------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------
# Restore specified snapshot # Restore specified snapshot
def restore_snapshot(snapshot_name) def restore_snapshot(snapshot_name, restore_acl = false)
@admin.restoreSnapshot(snapshot_name.to_java_bytes) conf = @connection.getConfiguration
take_fail_safe_snapshot = conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false)
@admin.restoreSnapshot(snapshot_name, take_fail_safe_snapshot, restore_acl)
end end
#---------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------
# Create a new table by cloning the snapshot content # Create a new table by cloning the snapshot content
def clone_snapshot(snapshot_name, table) def clone_snapshot(snapshot_name, table, restore_acl = false)
@admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes) @admin.cloneSnapshot(snapshot_name, org.apache.hadoop.hbase::TableName.valueOf(table), restore_acl)
end end
#---------------------------------------------------------------------------------------------- #----------------------------------------------------------------------------------------------

View File

@ -28,12 +28,19 @@ And writing on the newly created table will not influence the snapshot data.
Examples: Examples:
hbase> clone_snapshot 'snapshotName', 'tableName' hbase> clone_snapshot 'snapshotName', 'tableName'
hbase> clone_snapshot 'snapshotName', 'namespace:tableName' hbase> clone_snapshot 'snapshotName', 'namespace:tableName'
Following command will restore all acl from origin snapshot table into the
newly created table.
hbase> clone_snapshot 'snapshotName', 'namespace:tableName', {RESTORE_ACL=>true}
EOF EOF
end end
def command(snapshot_name, table) def command(snapshot_name, table, args = {})
format_simple_command do format_simple_command do
admin.clone_snapshot(snapshot_name, table) raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
restore_acl = args.delete(RESTORE_ACL) || false
admin.clone_snapshot(snapshot_name, table, restore_acl)
end end
end end

View File

@ -28,12 +28,18 @@ The table must be disabled.
Examples: Examples:
hbase> restore_snapshot 'snapshotName' hbase> restore_snapshot 'snapshotName'
Following command will restore all acl from snapshot into the table.
hbase> restore_snapshot 'snapshotName', {RESTORE_ACL=>true}
EOF EOF
end end
def command(snapshot_name) def command(snapshot_name, args = {})
format_simple_command do format_simple_command do
admin.restore_snapshot(snapshot_name) raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
restore_acl = args.delete(RESTORE_ACL) || false
admin.restore_snapshot(snapshot_name, restore_acl)
end end
end end
end end