HBASE-11013 Clone Snapshots on Secure Cluster Should provide option to apply Retained User Permissions
Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
parent
68d292d83b
commit
f9dc4cad63
@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaFilter;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaSettings;
|
||||
@ -1146,7 +1147,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
@Deprecated
|
||||
void snapshot(final String snapshotName,
|
||||
final TableName tableName,
|
||||
HBaseProtos.SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
|
||||
SnapshotDescription.Type type) throws IOException, SnapshotCreationException,
|
||||
IllegalArgumentException;
|
||||
|
||||
/**
|
||||
@ -1168,7 +1169,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
|
||||
*/
|
||||
@Deprecated
|
||||
void snapshot(HBaseProtos.SnapshotDescription snapshot)
|
||||
void snapshot(SnapshotDescription snapshot)
|
||||
throws IOException, SnapshotCreationException, IllegalArgumentException;
|
||||
|
||||
/**
|
||||
@ -1182,7 +1183,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* @throws IllegalArgumentException if the snapshot request is formatted incorrectly
|
||||
*/
|
||||
@Deprecated
|
||||
MasterProtos.SnapshotResponse takeSnapshotAsync(HBaseProtos.SnapshotDescription snapshot)
|
||||
MasterProtos.SnapshotResponse takeSnapshotAsync(SnapshotDescription snapshot)
|
||||
throws IOException, SnapshotCreationException;
|
||||
|
||||
/**
|
||||
@ -1202,7 +1203,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* unknown
|
||||
*/
|
||||
@Deprecated
|
||||
boolean isSnapshotFinished(final HBaseProtos.SnapshotDescription snapshot)
|
||||
boolean isSnapshotFinished(final SnapshotDescription snapshot)
|
||||
throws IOException, HBaseSnapshotException, UnknownSnapshotException;
|
||||
|
||||
/**
|
||||
@ -1268,6 +1269,23 @@ public interface Admin extends Abortable, Closeable {
|
||||
void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
|
||||
throws IOException, RestoreSnapshotException;
|
||||
|
||||
/**
|
||||
* Restore the specified snapshot on the original table. (The table must be disabled) If
|
||||
* 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
|
||||
* executing the restore operation. In case of restore failure, the failsafe snapshot will be
|
||||
* restored. If the restore completes without problem the failsafe snapshot is deleted. The
|
||||
* failsafe snapshot name is configurable by using the property
|
||||
* "hbase.snapshot.restore.failsafe.name".
|
||||
* @param snapshotName name of the snapshot to restore
|
||||
* @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
|
||||
* @param restoreAcl true to restore acl of snapshot into table.
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws RestoreSnapshotException if snapshot failed to be restored
|
||||
* @throws IllegalArgumentException if the restore request is formatted incorrectly
|
||||
*/
|
||||
void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
|
||||
throws IOException, RestoreSnapshotException;
|
||||
|
||||
/**
|
||||
* Create a new table by cloning the snapshot content.
|
||||
*
|
||||
@ -1294,6 +1312,19 @@ public interface Admin extends Abortable, Closeable {
|
||||
void cloneSnapshot(final String snapshotName, final TableName tableName)
|
||||
throws IOException, TableExistsException, RestoreSnapshotException;
|
||||
|
||||
/**
|
||||
* Create a new table by cloning the snapshot content.
|
||||
* @param snapshotName name of the snapshot to be cloned
|
||||
* @param tableName name of the table where the snapshot will be restored
|
||||
* @param restoreAcl true to restore acl of snapshot into newly created table
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws TableExistsException if table to be created already exists
|
||||
* @throws RestoreSnapshotException if snapshot failed to be cloned
|
||||
* @throws IllegalArgumentException if the specified table has not a valid name
|
||||
*/
|
||||
void cloneSnapshot(final String snapshotName, final TableName tableName, final boolean restoreAcl)
|
||||
throws IOException, TableExistsException, RestoreSnapshotException;
|
||||
|
||||
/**
|
||||
* Execute a distributed procedure on a cluster.
|
||||
*
|
||||
@ -1342,7 +1373,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* @throws IOException if a network error occurs
|
||||
*/
|
||||
@Deprecated
|
||||
List<HBaseProtos.SnapshotDescription> listSnapshots() throws IOException;
|
||||
List<SnapshotDescription> listSnapshots() throws IOException;
|
||||
|
||||
/**
|
||||
* List all the completed snapshots matching the given regular expression.
|
||||
@ -1352,7 +1383,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
@Deprecated
|
||||
List<HBaseProtos.SnapshotDescription> listSnapshots(String regex) throws IOException;
|
||||
List<SnapshotDescription> listSnapshots(String regex) throws IOException;
|
||||
|
||||
/**
|
||||
* List all the completed snapshots matching the given pattern.
|
||||
@ -1362,7 +1393,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
@Deprecated
|
||||
List<HBaseProtos.SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
|
||||
List<SnapshotDescription> listSnapshots(Pattern pattern) throws IOException;
|
||||
|
||||
/**
|
||||
* List all the completed snapshots matching the given table name regular expression and snapshot
|
||||
@ -1373,7 +1404,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
@Deprecated
|
||||
List<HBaseProtos.SnapshotDescription> listTableSnapshots(String tableNameRegex,
|
||||
List<SnapshotDescription> listTableSnapshots(String tableNameRegex,
|
||||
String snapshotNameRegex) throws IOException;
|
||||
|
||||
/**
|
||||
@ -1385,7 +1416,7 @@ public interface Admin extends Abortable, Closeable {
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
*/
|
||||
@Deprecated
|
||||
List<HBaseProtos.SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
|
||||
List<SnapshotDescription> listTableSnapshots(Pattern tableNamePattern,
|
||||
Pattern snapshotNamePattern) throws IOException;
|
||||
|
||||
/**
|
||||
|
@ -95,7 +95,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
|
||||
@ -157,6 +156,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaFilter;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaRetriever;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaSettings;
|
||||
@ -917,7 +917,6 @@ public class HBaseAdmin implements Admin {
|
||||
* or TimeoutException in case the wait timeout was not long enough to allow the
|
||||
* operation to complete.
|
||||
*
|
||||
* @param desc table descriptor for table
|
||||
* @param tableName name of table to delete
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @return the result of the async delete. You can use Future.get(long, TimeUnit)
|
||||
@ -3820,23 +3819,21 @@ public class HBaseAdmin implements Admin {
|
||||
}
|
||||
|
||||
/**
|
||||
* Restore the specified snapshot on the original table. (The table must be disabled)
|
||||
* If 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken
|
||||
* before executing the restore operation.
|
||||
* In case of restore failure, the failsafe snapshot will be restored.
|
||||
* If the restore completes without problem the failsafe snapshot is deleted.
|
||||
*
|
||||
* The failsafe snapshot name is configurable by using the property
|
||||
* Restore the specified snapshot on the original table. (The table must be disabled) If
|
||||
* 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
|
||||
* executing the restore operation. In case of restore failure, the failsafe snapshot will be
|
||||
* restored. If the restore completes without problem the failsafe snapshot is deleted. The
|
||||
* failsafe snapshot name is configurable by using the property
|
||||
* "hbase.snapshot.restore.failsafe.name".
|
||||
*
|
||||
* @param snapshotName name of the snapshot to restore
|
||||
* @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
|
||||
* @param restoreAcl true to restore acl of snapshot into table.
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws RestoreSnapshotException if snapshot failed to be restored
|
||||
* @throws IllegalArgumentException if the restore request is formatted incorrectly
|
||||
*/
|
||||
@Override
|
||||
public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot)
|
||||
public void restoreSnapshot(final String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl)
|
||||
throws IOException, RestoreSnapshotException {
|
||||
TableName tableName = null;
|
||||
for (SnapshotDescription snapshotInfo: listSnapshots()) {
|
||||
@ -3853,7 +3850,7 @@ public class HBaseAdmin implements Admin {
|
||||
|
||||
// The table does not exists, switch to clone.
|
||||
if (!tableExists(tableName)) {
|
||||
cloneSnapshot(snapshotName, tableName);
|
||||
cloneSnapshot(snapshotName, tableName, restoreAcl);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3877,13 +3874,13 @@ public class HBaseAdmin implements Admin {
|
||||
|
||||
try {
|
||||
// Restore snapshot
|
||||
internalRestoreSnapshot(snapshotName, tableName);
|
||||
internalRestoreSnapshot(snapshotName, tableName, restoreAcl);
|
||||
} catch (IOException e) {
|
||||
// Somthing went wrong during the restore...
|
||||
// if the pre-restore snapshot is available try to rollback
|
||||
if (takeFailSafeSnapshot) {
|
||||
try {
|
||||
internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName);
|
||||
internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl);
|
||||
String msg = "Restore snapshot=" + snapshotName +
|
||||
" failed. Rollback to snapshot=" + failSafeSnapshotSnapshotName + " succeeded.";
|
||||
LOG.error(msg, e);
|
||||
@ -3909,6 +3906,12 @@ public class HBaseAdmin implements Admin {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
|
||||
throws IOException, RestoreSnapshotException {
|
||||
restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new table by cloning the snapshot content.
|
||||
*
|
||||
@ -3968,15 +3971,21 @@ public class HBaseAdmin implements Admin {
|
||||
* @throws IllegalArgumentException if the specified table has not a valid name
|
||||
*/
|
||||
@Override
|
||||
public void cloneSnapshot(final String snapshotName, final TableName tableName)
|
||||
throws IOException, TableExistsException, RestoreSnapshotException {
|
||||
public void cloneSnapshot(final String snapshotName, final TableName tableName,
|
||||
final boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException {
|
||||
if (tableExists(tableName)) {
|
||||
throw new TableExistsException(tableName);
|
||||
}
|
||||
internalRestoreSnapshot(snapshotName, tableName);
|
||||
internalRestoreSnapshot(snapshotName, tableName, restoreAcl);
|
||||
waitUntilTableIsEnabled(tableName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cloneSnapshot(String snapshotName, TableName tableName)
|
||||
throws IOException, TableExistsException, RestoreSnapshotException {
|
||||
cloneSnapshot(snapshotName, tableName, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a distributed procedure on a cluster synchronously with return data
|
||||
*
|
||||
@ -4117,23 +4126,23 @@ public class HBaseAdmin implements Admin {
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute Restore/Clone snapshot and wait for the server to complete (blocking).
|
||||
* To check if the cloned table exists, use {@link #isTableAvailable} -- it is not safe to
|
||||
* create an HTable instance to this table before it is available.
|
||||
* Execute Restore/Clone snapshot and wait for the server to complete (blocking). To check if the
|
||||
* cloned table exists, use {@link #isTableAvailable} -- it is not safe to create an HTable
|
||||
* instance to this table before it is available.
|
||||
* @param snapshotName snapshot to restore
|
||||
* @param tableName table name to restore the snapshot on
|
||||
* @throws IOException if a remote or network exception occurs
|
||||
* @throws RestoreSnapshotException if snapshot failed to be restored
|
||||
* @throws IllegalArgumentException if the restore request is formatted incorrectly
|
||||
*/
|
||||
private void internalRestoreSnapshot(final String snapshotName, final TableName
|
||||
tableName)
|
||||
private void internalRestoreSnapshot(final String snapshotName, final TableName tableName,
|
||||
final boolean restoreAcl)
|
||||
throws IOException, RestoreSnapshotException {
|
||||
SnapshotDescription snapshot = SnapshotDescription.newBuilder()
|
||||
.setName(snapshotName).setTable(tableName.getNameAsString()).build();
|
||||
|
||||
// actually restore the snapshot
|
||||
internalRestoreSnapshotAsync(snapshot);
|
||||
internalRestoreSnapshotAsync(snapshot, restoreAcl);
|
||||
|
||||
final IsRestoreSnapshotDoneRequest request = IsRestoreSnapshotDoneRequest.newBuilder()
|
||||
.setSnapshot(snapshot).build();
|
||||
@ -4177,12 +4186,12 @@ public class HBaseAdmin implements Admin {
|
||||
* @throws RestoreSnapshotException if snapshot failed to be restored
|
||||
* @throws IllegalArgumentException if the restore request is formatted incorrectly
|
||||
*/
|
||||
private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot)
|
||||
throws IOException, RestoreSnapshotException {
|
||||
private RestoreSnapshotResponse internalRestoreSnapshotAsync(final SnapshotDescription snapshot,
|
||||
final boolean restoreAcl) throws IOException, RestoreSnapshotException {
|
||||
ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
|
||||
|
||||
final RestoreSnapshotRequest request = RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot)
|
||||
.build();
|
||||
final RestoreSnapshotRequest request =
|
||||
RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setRestoreACL(restoreAcl).build();
|
||||
|
||||
// run the snapshot restore on the master
|
||||
return executeCallable(new MasterCallable<RestoreSnapshotResponse>(getConnection()) {
|
||||
|
@ -157,6 +157,10 @@ public class TablePermission extends Permission {
|
||||
return table;
|
||||
}
|
||||
|
||||
public void setTableName(TableName table) {
|
||||
this.table = table;
|
||||
}
|
||||
|
||||
public boolean hasFamily() {
|
||||
return family != null;
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
/**
|
||||
@ -36,7 +36,7 @@ public class ClientSnapshotDescriptionUtils {
|
||||
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to
|
||||
* snapshot are not valid names.
|
||||
*/
|
||||
public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot)
|
||||
public static void assertSnapshotRequestIsValid(SnapshotDescription snapshot)
|
||||
throws IllegalArgumentException {
|
||||
// make sure the snapshot name is valid
|
||||
TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true);
|
||||
@ -52,12 +52,12 @@ public class ClientSnapshotDescriptionUtils {
|
||||
|
||||
/**
|
||||
* Returns a single line (no \n) representation of snapshot metadata. Use this instead of
|
||||
* {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription#toString()}. We don't replace SnapshotDescrpition's toString
|
||||
* {@link SnapshotDescription#toString()}. We don't replace SnapshotDescrpition's toString
|
||||
* because it is auto-generated by protoc.
|
||||
* @param ssd
|
||||
* @return Single line string with a summary of the snapshot parameters
|
||||
*/
|
||||
public static String toString(HBaseProtos.SnapshotDescription ssd) {
|
||||
public static String toString(SnapshotDescription ssd) {
|
||||
if (ssd == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -19,8 +19,8 @@ package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
/**
|
||||
* General exception base class for when a snapshot fails
|
||||
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
/**
|
||||
* Thrown when a snapshot could not be restored due to a server-side error when restoring it.
|
||||
|
@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
/**
|
||||
* Thrown when a snapshot could not be created due to a server-side error when
|
||||
|
@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
|
||||
/**
|
||||
|
@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
/**
|
||||
* Thrown when a snapshot exists but should not
|
||||
|
@ -32,11 +32,11 @@ import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController;
|
||||
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -157,23 +157,6 @@ message NameInt64Pair {
|
||||
optional int64 value = 2;
|
||||
}
|
||||
|
||||
/**
|
||||
* Description of the snapshot to take
|
||||
*/
|
||||
message SnapshotDescription {
|
||||
required string name = 1;
|
||||
optional string table = 2; // not needed for delete, but checked for in taking snapshot
|
||||
optional int64 creation_time = 3 [default = 0];
|
||||
enum Type {
|
||||
DISABLED = 0;
|
||||
FLUSH = 1;
|
||||
SKIPFLUSH = 2;
|
||||
}
|
||||
optional Type type = 4 [default = FLUSH];
|
||||
optional int32 version = 5;
|
||||
optional string owner = 6;
|
||||
}
|
||||
|
||||
/**
|
||||
* Description of the distributed procedure to take
|
||||
*/
|
||||
|
@ -32,6 +32,7 @@ import "ClusterStatus.proto";
|
||||
import "ErrorHandling.proto";
|
||||
import "Procedure.proto";
|
||||
import "Quota.proto";
|
||||
import "Snapshot.proto";
|
||||
|
||||
/* Column-level protobufs */
|
||||
|
||||
@ -391,6 +392,7 @@ message DeleteSnapshotResponse {
|
||||
|
||||
message RestoreSnapshotRequest {
|
||||
required SnapshotDescription snapshot = 1;
|
||||
optional bool restoreACL = 2 [default=false];
|
||||
}
|
||||
|
||||
message RestoreSnapshotResponse {
|
||||
|
@ -23,9 +23,28 @@ option java_generic_services = true;
|
||||
option java_generate_equals_and_hash = true;
|
||||
option optimize_for = SPEED;
|
||||
|
||||
import "AccessControl.proto";
|
||||
import "FS.proto";
|
||||
import "HBase.proto";
|
||||
|
||||
/**
|
||||
* Description of the snapshot to take
|
||||
*/
|
||||
message SnapshotDescription {
|
||||
required string name = 1;
|
||||
optional string table = 2; // not needed for delete, but checked for in taking snapshot
|
||||
optional int64 creation_time = 3 [default = 0];
|
||||
enum Type {
|
||||
DISABLED = 0;
|
||||
FLUSH = 1;
|
||||
SKIPFLUSH = 2;
|
||||
}
|
||||
optional Type type = 4 [default = FLUSH];
|
||||
optional int32 version = 5;
|
||||
optional string owner = 6;
|
||||
optional UsersAndPermissions users_and_permissions = 7;
|
||||
}
|
||||
|
||||
message SnapshotFileInfo {
|
||||
enum Type {
|
||||
HFILE = 1;
|
||||
|
@ -49,7 +49,7 @@ org.apache.hadoop.hbase.HTableDescriptor;
|
||||
org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
org.apache.hadoop.hbase.TableName;
|
||||
org.apache.hadoop.hbase.tool.Canary;
|
||||
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
org.apache.hadoop.hbase.master.DeadServer;
|
||||
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
org.apache.hadoop.hbase.security.visibility.VisibilityConstants;
|
||||
|
@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
@ -34,8 +34,8 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.master.RegionPlan;
|
||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
/**
|
||||
* Defines coprocessor hooks for interacting with operations on the
|
||||
|
@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.client.IsolationLevel;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
||||
|
@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.coprocessor.*;
|
||||
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
/**
|
||||
* Provides the coprocessor framework and environment for master oriented
|
||||
|
@ -60,7 +60,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStor
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AbortProcedureResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
|
||||
@ -186,6 +185,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Repor
|
||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
|
||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
@ -1311,7 +1311,8 @@ public class MasterRpcServices extends RSRpcServices
|
||||
master.ensureNamespaceExists(dstTable.getNamespaceAsString());
|
||||
|
||||
SnapshotDescription reqSnapshot = request.getSnapshot();
|
||||
master.snapshotManager.restoreSnapshot(reqSnapshot);
|
||||
master.snapshotManager.restoreSnapshot(reqSnapshot,
|
||||
request.hasRestoreACL() && request.getRestoreACL());
|
||||
return RestoreSnapshotResponse.newBuilder().build();
|
||||
} catch (ForeignException e) {
|
||||
throw new ServiceException(e.getCause());
|
||||
|
@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
|
||||
/**
|
||||
* Watch the current snapshot under process
|
||||
|
@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.master.SnapshotSentinel;
|
||||
import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
||||
@ -63,6 +63,7 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
|
||||
private final static String NAME = "Master CloneSnapshotHandler";
|
||||
|
||||
private final SnapshotDescription snapshot;
|
||||
private final boolean restoreAcl;
|
||||
|
||||
private final ForeignExceptionDispatcher monitor;
|
||||
private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
|
||||
@ -73,12 +74,14 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
|
||||
private volatile boolean stopped = false;
|
||||
|
||||
public CloneSnapshotHandler(final MasterServices masterServices,
|
||||
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor) {
|
||||
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor,
|
||||
final boolean restoreAcl) {
|
||||
super(masterServices, masterServices.getMasterFileSystem(), hTableDescriptor,
|
||||
masterServices.getConfiguration(), null, masterServices);
|
||||
|
||||
// Snapshot information
|
||||
this.snapshot = snapshot;
|
||||
this.restoreAcl = restoreAcl;
|
||||
|
||||
// Monitor
|
||||
this.monitor = new ForeignExceptionDispatcher();
|
||||
@ -118,6 +121,13 @@ public class CloneSnapshotHandler extends CreateTableHandler implements Snapshot
|
||||
Preconditions.checkArgument(!metaChanges.hasRegionsToRemove(),
|
||||
"A clone should not have regions to remove");
|
||||
|
||||
// Clone acl of snapshot into newly created table.
|
||||
if (restoreAcl && snapshot.hasUsersAndPermissions()
|
||||
&& snapshot.getUsersAndPermissions() != null
|
||||
&& SnapshotDescriptionUtils.isSecurityAvailable(conf)) {
|
||||
RestoreSnapshotHelper.restoreSnapshotACL(snapshot, tableName, conf);
|
||||
}
|
||||
|
||||
// At this point the clone is complete. Next step is enabling the table.
|
||||
String msg = "Clone snapshot="+ snapshot.getName() +" on table=" + tableName + " completed!";
|
||||
LOG.info(msg);
|
||||
|
@ -25,8 +25,6 @@ import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
@ -34,7 +32,7 @@ import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.procedure.Procedure;
|
||||
import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
||||
@ -49,7 +49,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler {
|
||||
private final ProcedureCoordinator coordinator;
|
||||
|
||||
public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master,
|
||||
final SnapshotManager manager) {
|
||||
final SnapshotManager manager) {
|
||||
super(snapshot, master);
|
||||
this.coordinator = manager.getCoordinator();
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
|
||||
|
@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.master.SnapshotSentinel;
|
||||
import org.apache.hadoop.hbase.master.handler.TableEventHandler;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
||||
@ -65,6 +65,7 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
|
||||
|
||||
private final HTableDescriptor hTableDescriptor;
|
||||
private final SnapshotDescription snapshot;
|
||||
private final boolean restoreAcl;
|
||||
|
||||
private final ForeignExceptionDispatcher monitor;
|
||||
private final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
|
||||
@ -73,11 +74,13 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
|
||||
private volatile boolean stopped = false;
|
||||
|
||||
public RestoreSnapshotHandler(final MasterServices masterServices,
|
||||
final SnapshotDescription snapshot, final HTableDescriptor htd) throws IOException {
|
||||
final SnapshotDescription snapshot, final HTableDescriptor htd, final boolean restoreAcl)
|
||||
throws IOException {
|
||||
super(EventType.C_M_RESTORE_SNAPSHOT, htd.getTableName(), masterServices, masterServices);
|
||||
|
||||
// Snapshot information
|
||||
this.snapshot = snapshot;
|
||||
this.restoreAcl = restoreAcl;
|
||||
|
||||
// Monitor
|
||||
this.monitor = new ForeignExceptionDispatcher();
|
||||
@ -166,6 +169,14 @@ public class RestoreSnapshotHandler extends TableEventHandler implements Snapsho
|
||||
}
|
||||
metaChanges.updateMetaParentRegions(this.server.getConnection(), hris);
|
||||
|
||||
// 5. restore acl of snapshot into the table.
|
||||
if (restoreAcl && snapshot.hasUsersAndPermissions()
|
||||
&& snapshot.getUsersAndPermissions() != null
|
||||
&& SnapshotDescriptionUtils.isSecurityAvailable(server.getConfiguration())) {
|
||||
RestoreSnapshotHelper.restoreSnapshotACL(snapshot, tableName, server.getConfiguration());
|
||||
}
|
||||
|
||||
|
||||
// At this point the restore is complete. Next step is enabling the table.
|
||||
LOG.info("Restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot) +
|
||||
" on table=" + tableName + " completed!");
|
||||
|
@ -62,8 +62,8 @@ import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
|
||||
import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinatorRpcs;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription.Type;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
||||
import org.apache.hadoop.hbase.quotas.QuotaExceededException;
|
||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||
@ -654,7 +654,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||
* @param hTableDescriptor Table Descriptor of the table to create
|
||||
*/
|
||||
synchronized void cloneSnapshot(final SnapshotDescription snapshot,
|
||||
final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException {
|
||||
final HTableDescriptor hTableDescriptor, final boolean restoreAcl)
|
||||
throws HBaseSnapshotException {
|
||||
TableName tableName = hTableDescriptor.getTableName();
|
||||
|
||||
// make sure we aren't running a snapshot on the same table
|
||||
@ -669,7 +670,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||
|
||||
try {
|
||||
CloneSnapshotHandler handler =
|
||||
new CloneSnapshotHandler(master, snapshot, hTableDescriptor).prepare();
|
||||
new CloneSnapshotHandler(master, snapshot, hTableDescriptor, restoreAcl).prepare();
|
||||
this.executorService.submit(handler);
|
||||
this.restoreHandlers.put(tableName, handler);
|
||||
} catch (Exception e) {
|
||||
@ -685,7 +686,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||
* @param reqSnapshot
|
||||
* @throws IOException
|
||||
*/
|
||||
public void restoreSnapshot(SnapshotDescription reqSnapshot) throws IOException {
|
||||
public void restoreSnapshot(SnapshotDescription reqSnapshot, boolean restoreAcl)
|
||||
throws IOException {
|
||||
FileSystem fs = master.getMasterFileSystem().getFileSystem();
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
|
||||
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
|
||||
@ -742,7 +744,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||
if (tableRegionCount > 0 && tableRegionCount < snapshotRegionCount) {
|
||||
checkAndUpdateNamespaceRegionQuota(snapshotRegionCount, tableName);
|
||||
}
|
||||
restoreSnapshot(snapshot, snapshotTableDesc);
|
||||
restoreSnapshot(snapshot, snapshotTableDesc, restoreAcl);
|
||||
// Update the region quota if snapshotRegionCount is smaller. This step should not fail
|
||||
// because we have reserved enough region quota before hand
|
||||
if (tableRegionCount > 0 && tableRegionCount > snapshotRegionCount) {
|
||||
@ -776,7 +778,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||
}
|
||||
try {
|
||||
checkAndUpdateNamespaceQuota(manifest, tableName);
|
||||
cloneSnapshot(snapshot, htd);
|
||||
cloneSnapshot(snapshot, htd, restoreAcl);
|
||||
} catch (IOException e) {
|
||||
this.master.getMasterQuotaManager().removeTableFromNamespaceQuota(tableName);
|
||||
LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName()
|
||||
@ -825,7 +827,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||
* @param hTableDescriptor Table Descriptor
|
||||
*/
|
||||
private synchronized void restoreSnapshot(final SnapshotDescription snapshot,
|
||||
final HTableDescriptor hTableDescriptor) throws HBaseSnapshotException {
|
||||
final HTableDescriptor hTableDescriptor, final boolean restoreAcl)
|
||||
throws HBaseSnapshotException {
|
||||
TableName tableName = hTableDescriptor.getTableName();
|
||||
|
||||
// make sure we aren't running a snapshot on the same table
|
||||
@ -840,7 +843,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||
|
||||
try {
|
||||
RestoreSnapshotHandler handler =
|
||||
new RestoreSnapshotHandler(master, snapshot, hTableDescriptor).prepare();
|
||||
new RestoreSnapshotHandler(master, snapshot, hTableDescriptor, restoreAcl).prepare();
|
||||
this.executorService.submit(handler);
|
||||
restoreHandlers.put(tableName, handler);
|
||||
} catch (Exception e) {
|
||||
|
@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.master.TableLockManager;
|
||||
import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
|
@ -155,7 +155,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceCall;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
|
||||
|
@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.regionserver;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.lang.Thread.UncaughtExceptionHandler;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.BindException;
|
||||
@ -96,7 +95,6 @@ import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorType;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.http.HttpServer;
|
||||
import org.apache.hadoop.hbase.http.InfoServer;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;
|
||||
|
@ -28,10 +28,9 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||
import org.apache.hadoop.hbase.procedure.ProcedureMember;
|
||||
import org.apache.hadoop.hbase.procedure.Subprocedure;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.Region.Operation;
|
||||
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool;
|
||||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
|
||||
|
@ -50,7 +50,7 @@ import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
|
||||
import org.apache.hadoop.hbase.procedure.Subprocedure;
|
||||
import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
|
||||
import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
|
||||
|
@ -470,7 +470,7 @@ public class AccessControlLists {
|
||||
return allPerms;
|
||||
}
|
||||
|
||||
static ListMultimap<String, TablePermission> getTablePermissions(Configuration conf,
|
||||
public static ListMultimap<String, TablePermission> getTablePermissions(Configuration conf,
|
||||
TableName tableName) throws IOException {
|
||||
return getPermissions(conf, tableName != null ? tableName.getName() : null, null);
|
||||
}
|
||||
|
@ -91,10 +91,10 @@ import org.apache.hadoop.hbase.protobuf.ResponseConverter;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.InternalScanner;
|
||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
|
@ -23,7 +23,7 @@ import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
@ -47,7 +47,7 @@ public class CreateSnapshot extends AbstractHBaseTool {
|
||||
this.addRequiredOptWithArg("n", "name", "The name of the created snapshot");
|
||||
this.addOptWithArg("s", "snapshot_type",
|
||||
"Snapshot Type. FLUSH is default. Posible values are "
|
||||
+ Arrays.toString(HBaseProtos.SnapshotDescription.Type.values()));
|
||||
+ Arrays.toString(SnapshotDescription.Type.values()));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -65,9 +65,9 @@ public class CreateSnapshot extends AbstractHBaseTool {
|
||||
try {
|
||||
connection = ConnectionFactory.createConnection(getConf());
|
||||
admin = connection.getAdmin();
|
||||
HBaseProtos.SnapshotDescription.Type type = HBaseProtos.SnapshotDescription.Type.FLUSH;
|
||||
SnapshotDescription.Type type = SnapshotDescription.Type.FLUSH;
|
||||
if (snapshotType != null) {
|
||||
type = HBaseProtos.SnapshotDescription.Type.valueOf(snapshotName.toUpperCase(Locale.ROOT));
|
||||
type = SnapshotDescription.Type.valueOf(snapshotName.toUpperCase(Locale.ROOT));
|
||||
}
|
||||
|
||||
admin.snapshot(snapshotName, TableName.valueOf(tableName), type);
|
||||
|
@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.io.FileLink;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.WALLink;
|
||||
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
@ -29,10 +29,12 @@ import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
|
||||
import com.google.common.collect.ListMultimap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
@ -51,11 +53,14 @@ import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.security.access.AccessControlClient;
|
||||
import org.apache.hadoop.hbase.security.access.TablePermission;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.ModifyRegionUtils;
|
||||
@ -602,7 +607,7 @@ public class RestoreSnapshotHelper {
|
||||
* </pre></blockquote>
|
||||
* @param familyDir destination directory for the store file
|
||||
* @param regionInfo destination region info for the table
|
||||
* @param hfileName reference file name
|
||||
* @param storeFile reference file name
|
||||
*/
|
||||
private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
|
||||
final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
|
||||
@ -741,4 +746,25 @@ public class RestoreSnapshotHelper {
|
||||
}
|
||||
return metaChanges;
|
||||
}
|
||||
|
||||
public static void restoreSnapshotACL(SnapshotDescription snapshot, TableName newTableName,
|
||||
Configuration conf) throws IOException {
|
||||
if (snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null) {
|
||||
LOG.info("Restore snapshot acl to table. snapshot: " + snapshot + ", table: " + newTableName);
|
||||
ListMultimap<String, TablePermission> perms =
|
||||
ProtobufUtil.toUserTablePermissions(snapshot.getUsersAndPermissions());
|
||||
try {
|
||||
for (Entry<String, TablePermission> e : perms.entries()) {
|
||||
String user = e.getKey();
|
||||
TablePermission perm = e.getValue();
|
||||
perm.setTableName(newTableName);
|
||||
AccessControlClient.grant(conf, perm.getTableName(), user, perm.getFamily(),
|
||||
perm.getQualifier(), perm.getActions());
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
throw new IOException("Grant acl into newly creatd table failed. snapshot: " + snapshot
|
||||
+ ", table: " + newTableName, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,10 +17,11 @@
|
||||
*/
|
||||
package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collections;
|
||||
|
||||
import com.google.common.collect.ListMultimap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -30,10 +31,16 @@ import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2;
|
||||
import org.apache.hadoop.hbase.security.access.AccessControlLists;
|
||||
import org.apache.hadoop.hbase.security.access.TablePermission;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
@ -246,7 +253,7 @@ public final class SnapshotDescriptionUtils {
|
||||
* {@link SnapshotDescription}.
|
||||
*/
|
||||
public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf)
|
||||
throws IllegalArgumentException {
|
||||
throws IllegalArgumentException, IOException {
|
||||
if (!snapshot.hasTable()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Descriptor doesn't apply to a table, so we can't build it.");
|
||||
@ -262,6 +269,12 @@ public final class SnapshotDescriptionUtils {
|
||||
builder.setCreationTime(time);
|
||||
snapshot = builder.build();
|
||||
}
|
||||
|
||||
// set the acl to snapshot if security feature is enabled.
|
||||
if(isSecurityAvailable(conf)){
|
||||
snapshot = writeAclToSnapshotDescription(snapshot, conf);
|
||||
}
|
||||
|
||||
return snapshot;
|
||||
}
|
||||
|
||||
@ -306,7 +319,7 @@ public final class SnapshotDescriptionUtils {
|
||||
}
|
||||
|
||||
/**
|
||||
* Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory
|
||||
* Read in the {@link SnapshotDescription} stored for the snapshot in the passed directory
|
||||
* @param fs filesystem where the snapshot was taken
|
||||
* @param snapshotDir directory where the snapshot was stored
|
||||
* @return the stored snapshot description
|
||||
@ -364,4 +377,32 @@ public final class SnapshotDescriptionUtils {
|
||||
if (!snapshot.hasOwner()) return false;
|
||||
return snapshot.getOwner().equals(user.getShortName());
|
||||
}
|
||||
|
||||
public static boolean isSecurityAvailable(Configuration conf) throws IOException {
|
||||
Connection conn = ConnectionFactory.createConnection(conf);
|
||||
try {
|
||||
Admin admin = conn.getAdmin();
|
||||
try {
|
||||
return admin.tableExists(AccessControlLists.ACL_TABLE_NAME);
|
||||
} finally {
|
||||
admin.close();
|
||||
}
|
||||
} finally {
|
||||
conn.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static SnapshotDescription writeAclToSnapshotDescription(
|
||||
final SnapshotDescription snapshot, final Configuration conf) throws IOException {
|
||||
ListMultimap<String, TablePermission> perms =
|
||||
User.runAsLoginUser(new PrivilegedExceptionAction<ListMultimap<String, TablePermission>>() {
|
||||
@Override
|
||||
public ListMultimap<String, TablePermission> run() throws Exception {
|
||||
return AccessControlLists.getTablePermissions(conf,
|
||||
TableName.valueOf(snapshot.getTable()));
|
||||
}
|
||||
});
|
||||
return snapshot.toBuilder().setUsersAndPermissions(ProtobufUtil.toUserTablePermissions(perms))
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
@ -51,7 +52,6 @@ import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.WALLink;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
|
@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
|
@ -36,7 +36,7 @@ import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
|
@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.util.ByteStringer;
|
||||
@ -126,7 +126,7 @@ public final class SnapshotManifestV2 {
|
||||
}
|
||||
|
||||
static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf,
|
||||
final Executor executor,final FileSystem fs, final Path snapshotDir,
|
||||
final Executor executor, final FileSystem fs, final Path snapshotDir,
|
||||
final SnapshotDescription desc, final int manifestSizeLimit) throws IOException {
|
||||
FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
|
||||
@Override
|
||||
|
@ -39,7 +39,7 @@ import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
|
||||
|
@ -24,7 +24,7 @@
|
||||
import="org.apache.hadoop.hbase.client.HConnectionManager"
|
||||
import="org.apache.hadoop.hbase.master.HMaster"
|
||||
import="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
|
||||
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"
|
||||
import="org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription"
|
||||
import="org.apache.hadoop.util.StringUtils"
|
||||
import="org.apache.hadoop.hbase.TableName"
|
||||
import="org.apache.hadoop.hbase.HBaseConfiguration" %>
|
||||
|
@ -27,7 +27,7 @@
|
||||
import="org.apache.hadoop.hbase.HBaseConfiguration"
|
||||
import="org.apache.hadoop.hbase.client.Admin"
|
||||
import="org.apache.hadoop.hbase.master.HMaster"
|
||||
import="org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription"
|
||||
import="org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription"
|
||||
import="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
|
||||
import="org.apache.hadoop.hbase.TableName"
|
||||
import="org.apache.hadoop.util.StringUtils" %>
|
||||
|
@ -34,10 +34,10 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
|
||||
|
@ -0,0 +1,243 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Coprocessor;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.security.access.AccessControlConstants;
|
||||
import org.apache.hadoop.hbase.security.access.AccessController;
|
||||
import org.apache.hadoop.hbase.security.access.Permission;
|
||||
import org.apache.hadoop.hbase.security.access.SecureTestUtil;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@Category(MediumTests.class)
|
||||
public class TestSnapshotWithAcl extends SecureTestUtil {
|
||||
|
||||
public TableName TEST_TABLE = TableName.valueOf("TestSnapshotWithAcl");
|
||||
|
||||
private static final int ROW_COUNT = 30000;
|
||||
|
||||
private static byte[] TEST_FAMILY = Bytes.toBytes("f1");
|
||||
private static byte[] TEST_QUALIFIER = Bytes.toBytes("cq");
|
||||
private static byte[] TEST_ROW = Bytes.toBytes(0);
|
||||
private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
private static Configuration conf;
|
||||
private static HBaseAdmin admin = null;
|
||||
|
||||
// user is table owner. will have all permissions on table
|
||||
private static User USER_OWNER;
|
||||
// user with rw permissions on column family.
|
||||
private static User USER_RW;
|
||||
// user with read-only permissions
|
||||
private static User USER_RO;
|
||||
// user with none permissions
|
||||
private static User USER_NONE;
|
||||
|
||||
static class AccessReadAction implements AccessTestAction {
|
||||
|
||||
private TableName tableName;
|
||||
|
||||
public AccessReadAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
Get g = new Get(TEST_ROW);
|
||||
g.addFamily(TEST_FAMILY);
|
||||
HTable t = new HTable(conf, tableName);
|
||||
try {
|
||||
t.get(g);
|
||||
} finally {
|
||||
t.close();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
static class AccessWriteAction implements AccessTestAction {
|
||||
private TableName tableName;
|
||||
|
||||
public AccessWriteAction(TableName tableName) {
|
||||
this.tableName = tableName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
Put p = new Put(TEST_ROW);
|
||||
p.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(1));
|
||||
HTable t = new HTable(conf, tableName);
|
||||
try {
|
||||
t.put(p);
|
||||
} finally {
|
||||
t.close();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setupBeforeClass() throws Exception {
|
||||
conf = TEST_UTIL.getConfiguration();
|
||||
// Enable security
|
||||
enableSecurity(conf);
|
||||
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName());
|
||||
// Verify enableSecurity sets up what we require
|
||||
verifyConfiguration(conf);
|
||||
// Enable EXEC permission checking
|
||||
conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
|
||||
TEST_UTIL.startMiniCluster();
|
||||
MasterCoprocessorHost cpHost =
|
||||
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
|
||||
cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
|
||||
|
||||
USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
|
||||
USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
|
||||
USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
|
||||
USER_NONE = User.createUserForTesting(conf, "usernone", new String[0]);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
admin = TEST_UTIL.getHBaseAdmin();
|
||||
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
|
||||
hcd.setMaxVersions(100);
|
||||
htd.addFamily(hcd);
|
||||
htd.setOwner(USER_OWNER);
|
||||
admin.createTable(htd, new byte[][] { Bytes.toBytes("s") });
|
||||
TEST_UTIL.waitTableEnabled(TEST_TABLE);
|
||||
|
||||
grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null,
|
||||
Permission.Action.READ, Permission.Action.WRITE);
|
||||
|
||||
grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
|
||||
Permission.Action.READ);
|
||||
}
|
||||
|
||||
private void loadData() throws IOException {
|
||||
HTable hTable = new HTable(conf, TEST_TABLE);
|
||||
try {
|
||||
for (int i = 0; i < ROW_COUNT; i++) {
|
||||
Put put = new Put(Bytes.toBytes(i));
|
||||
put.add(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
|
||||
hTable.put(put);
|
||||
}
|
||||
hTable.flushCommits();
|
||||
} finally {
|
||||
hTable.close();
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownAfterClass() throws Exception {
|
||||
TEST_UTIL.shutdownMiniCluster();
|
||||
}
|
||||
|
||||
private void verifyRows(TableName tableName) throws IOException {
|
||||
HTable hTable = new HTable(conf, tableName);
|
||||
try {
|
||||
Scan scan = new Scan();
|
||||
ResultScanner scanner = hTable.getScanner(scan);
|
||||
Result result;
|
||||
int rowCount = 0;
|
||||
while ((result = scanner.next()) != null) {
|
||||
byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER);
|
||||
Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++));
|
||||
}
|
||||
Assert.assertEquals(rowCount, ROW_COUNT);
|
||||
} finally {
|
||||
hTable.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRestoreSnapshot() throws Exception {
|
||||
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
|
||||
verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
|
||||
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
|
||||
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
|
||||
|
||||
loadData();
|
||||
verifyRows(TEST_TABLE);
|
||||
|
||||
String snapshotName1 = "testSnapshot1";
|
||||
admin.snapshot(snapshotName1, TEST_TABLE);
|
||||
|
||||
// clone snapshot with restoreAcl true.
|
||||
TableName tableName1 = TableName.valueOf("tableName1");
|
||||
admin.cloneSnapshot(snapshotName1, tableName1, true);
|
||||
verifyRows(tableName1);
|
||||
verifyAllowed(new AccessReadAction(tableName1), USER_OWNER, USER_RO, USER_RW);
|
||||
verifyDenied(new AccessReadAction(tableName1), USER_NONE);
|
||||
verifyAllowed(new AccessWriteAction(tableName1), USER_OWNER, USER_RW);
|
||||
verifyDenied(new AccessWriteAction(tableName1), USER_RO, USER_NONE);
|
||||
|
||||
// clone snapshot with restoreAcl false.
|
||||
TableName tableName2 = TableName.valueOf("tableName2");
|
||||
admin.cloneSnapshot(snapshotName1, tableName2, false);
|
||||
verifyRows(tableName2);
|
||||
verifyAllowed(new AccessReadAction(tableName2), USER_OWNER);
|
||||
verifyDenied(new AccessReadAction(tableName2), USER_NONE, USER_RO, USER_RW);
|
||||
verifyAllowed(new AccessWriteAction(tableName2), USER_OWNER);
|
||||
verifyDenied(new AccessWriteAction(tableName2), USER_RO, USER_RW, USER_NONE);
|
||||
|
||||
// remove read permission for USER_RO.
|
||||
revokeFromTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
|
||||
Permission.Action.READ);
|
||||
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
|
||||
verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
|
||||
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
|
||||
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
|
||||
|
||||
// restore snapshot with restoreAcl false.
|
||||
admin.disableTable(TEST_TABLE);
|
||||
admin.restoreSnapshot(snapshotName1, false, false);
|
||||
admin.enableTable(TEST_TABLE);
|
||||
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
|
||||
verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
|
||||
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
|
||||
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
|
||||
|
||||
// restore snapshot with restoreAcl true.
|
||||
admin.disableTable(TEST_TABLE);
|
||||
admin.restoreSnapshot(snapshotName1, false, true);
|
||||
admin.enableTable(TEST_TABLE);
|
||||
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
|
||||
verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
|
||||
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
|
||||
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
|
||||
}
|
||||
}
|
@ -59,10 +59,10 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
|
||||
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.RequestConverter;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -34,6 +34,7 @@ import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
@ -41,7 +42,6 @@ import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
|
||||
|
@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
|
||||
|
@ -168,7 +168,7 @@ public class SecureTestUtil {
|
||||
* To indicate the action was not allowed, either throw an AccessDeniedException
|
||||
* or return an empty list of KeyValues.
|
||||
*/
|
||||
static interface AccessTestAction extends PrivilegedExceptionAction<Object> { }
|
||||
public static interface AccessTestAction extends PrivilegedExceptionAction<Object> { }
|
||||
|
||||
/** This fails only in case of ADE or empty list for any of the actions. */
|
||||
public static void verifyAllowed(User user, AccessTestAction... actions) throws Exception {
|
||||
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.HRegionLocation;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.ProcedureInfo;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.security.Superusers;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
@ -108,7 +109,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
|
@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.filter.BinaryComparator;
|
||||
import org.apache.hadoop.hbase.filter.CompareFilter;
|
||||
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
|
||||
import org.apache.hadoop.hbase.regionserver.Region;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
|
||||
|
@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
|
||||
@ -263,8 +263,8 @@ public final class SnapshotTestingUtils {
|
||||
* @param sleep: amount to sleep between checks to see if the snapshot is done
|
||||
* @throws ServiceException if the snapshot fails
|
||||
*/
|
||||
public static void waitForSnapshotToComplete(HMaster master,
|
||||
SnapshotDescription snapshot, long sleep) throws ServiceException {
|
||||
public static void waitForSnapshotToComplete(HMaster master, SnapshotDescription snapshot,
|
||||
long sleep) throws ServiceException {
|
||||
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
|
||||
.setSnapshot(snapshot).build();
|
||||
IsSnapshotDoneResponse done = IsSnapshotDoneResponse.newBuilder()
|
||||
|
@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
|
||||
|
@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -23,12 +23,12 @@ import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.junit.After;
|
||||
|
@ -31,11 +31,11 @@ import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
|
@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
|
||||
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.util.TestTableName;
|
||||
import org.junit.After;
|
||||
|
@ -17,7 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
@ -30,8 +29,8 @@ import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
|
||||
import org.junit.After;
|
||||
import org.junit.BeforeClass;
|
||||
@ -77,6 +76,8 @@ public class TestSnapshotDescriptionUtils {
|
||||
fail("Snapshot was considered valid without a table name");
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.debug("Correctly failed when snapshot doesn't have a tablename");
|
||||
} catch (IOException e) {
|
||||
LOG.debug("Correctly failed when saving acl into snapshot");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
*/
|
||||
package org.apache.hadoop.hbase.snapshot;
|
||||
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
@ -28,8 +27,8 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.testclassification.MasterTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
|
@ -87,6 +87,7 @@ module HBaseConstants
|
||||
DATA = 'DATA'
|
||||
SERVER_NAME = 'SERVER_NAME'
|
||||
LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD'
|
||||
RESTORE_ACL = 'RESTORE_ACL'
|
||||
|
||||
# Load constants from hbase java API
|
||||
def self.promote_constants(constants)
|
||||
|
@ -24,7 +24,7 @@ java_import org.apache.hadoop.hbase.util.RegionSplitter
|
||||
java_import org.apache.hadoop.hbase.util.Bytes
|
||||
java_import org.apache.hadoop.hbase.ServerName
|
||||
java_import org.apache.hadoop.hbase.TableName
|
||||
java_import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos::SnapshotDescription
|
||||
java_import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos::SnapshotDescription
|
||||
|
||||
# Wrapper for org.apache.hadoop.hbase.client.HBaseAdmin
|
||||
|
||||
@ -907,14 +907,16 @@ module Hbase
|
||||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
# Restore specified snapshot
|
||||
def restore_snapshot(snapshot_name)
|
||||
@admin.restoreSnapshot(snapshot_name.to_java_bytes)
|
||||
def restore_snapshot(snapshot_name, restore_acl = false)
|
||||
conf = @connection.getConfiguration
|
||||
take_fail_safe_snapshot = conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false)
|
||||
@admin.restoreSnapshot(snapshot_name, take_fail_safe_snapshot, restore_acl)
|
||||
end
|
||||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
# Create a new table by cloning the snapshot content
|
||||
def clone_snapshot(snapshot_name, table)
|
||||
@admin.cloneSnapshot(snapshot_name.to_java_bytes, table.to_java_bytes)
|
||||
def clone_snapshot(snapshot_name, table, restore_acl = false)
|
||||
@admin.cloneSnapshot(snapshot_name, org.apache.hadoop.hbase::TableName.valueOf(table), restore_acl)
|
||||
end
|
||||
|
||||
#----------------------------------------------------------------------------------------------
|
||||
|
@ -28,12 +28,19 @@ And writing on the newly created table will not influence the snapshot data.
|
||||
Examples:
|
||||
hbase> clone_snapshot 'snapshotName', 'tableName'
|
||||
hbase> clone_snapshot 'snapshotName', 'namespace:tableName'
|
||||
|
||||
Following command will restore all acl from origin snapshot table into the
|
||||
newly created table.
|
||||
|
||||
hbase> clone_snapshot 'snapshotName', 'namespace:tableName', {RESTORE_ACL=>true}
|
||||
EOF
|
||||
end
|
||||
|
||||
def command(snapshot_name, table)
|
||||
def command(snapshot_name, table, args = {})
|
||||
format_simple_command do
|
||||
admin.clone_snapshot(snapshot_name, table)
|
||||
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
|
||||
restore_acl = args.delete(RESTORE_ACL) || false
|
||||
admin.clone_snapshot(snapshot_name, table, restore_acl)
|
||||
end
|
||||
end
|
||||
|
||||
|
@ -28,12 +28,18 @@ The table must be disabled.
|
||||
|
||||
Examples:
|
||||
hbase> restore_snapshot 'snapshotName'
|
||||
|
||||
Following command will restore all acl from snapshot into the table.
|
||||
|
||||
hbase> restore_snapshot 'snapshotName', {RESTORE_ACL=>true}
|
||||
EOF
|
||||
end
|
||||
|
||||
def command(snapshot_name)
|
||||
def command(snapshot_name, args = {})
|
||||
format_simple_command do
|
||||
admin.restore_snapshot(snapshot_name)
|
||||
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
|
||||
restore_acl = args.delete(RESTORE_ACL) || false
|
||||
admin.restore_snapshot(snapshot_name, restore_acl)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
Loading…
x
Reference in New Issue
Block a user