HBASE-11013: Clone Snapshots on Secure Cluster Should provide option to apply Retained User Permissions

Signed-off-by: Guanghao Zhang <zghao@apache.org>
This commit is contained in:
huzheng 2017-05-08 21:01:47 +08:00 committed by Guanghao Zhang
parent 32d2062b5c
commit 37dd8ff722
67 changed files with 14677 additions and 2352 deletions

View File

@ -1520,6 +1520,23 @@ public interface Admin extends Abortable, Closeable {
void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot)
throws IOException, RestoreSnapshotException;
/**
* Restore the specified snapshot on the original table. (The table must be disabled) If
* 'takeFailSafeSnapshot' is set to true, a snapshot of the current table is taken before
* executing the restore operation. In case of restore failure, the failsafe snapshot will be
* restored. If the restore completes without problem the failsafe snapshot is deleted. The
* failsafe snapshot name is configurable by using the property
* "hbase.snapshot.restore.failsafe.name".
* @param snapshotName name of the snapshot to restore
* @param takeFailSafeSnapshot true if the failsafe snapshot should be taken
* @param restoreAcl true to restore acl of snapshot
* @throws IOException if a remote or network exception occurs
* @throws RestoreSnapshotException if snapshot failed to be restored
* @throws IllegalArgumentException if the restore request is formatted incorrectly
*/
void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot,
final boolean restoreAcl) throws IOException, RestoreSnapshotException;
/**
* Create a new table by cloning the snapshot content.
*
@ -1533,6 +1550,19 @@ public interface Admin extends Abortable, Closeable {
void cloneSnapshot(final byte[] snapshotName, final TableName tableName)
throws IOException, TableExistsException, RestoreSnapshotException;
/**
* Create a new table by cloning the snapshot content.
* @param snapshotName name of the snapshot to be cloned
* @param tableName name of the table where the snapshot will be restored
* @param restoreAcl true to clone acl into newly created table
* @throws IOException if a remote or network exception occurs
* @throws TableExistsException if table to be created already exists
* @throws RestoreSnapshotException if snapshot failed to be cloned
* @throws IllegalArgumentException if the specified table has not a valid name
*/
void cloneSnapshot(final String snapshotName, final TableName tableName, final boolean restoreAcl)
throws IOException, TableExistsException, RestoreSnapshotException;
/**
* Create a new table by cloning the snapshot content.
*

View File

@ -178,6 +178,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Remov
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@ -1706,7 +1707,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
@Override
public CompletableFuture<Void> snapshot(SnapshotDescription snapshotDesc) {
HBaseProtos.SnapshotDescription snapshot =
SnapshotProtos.SnapshotDescription snapshot =
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
try {
ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
@ -1916,7 +1917,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
private CompletableFuture<Void> internalRestoreSnapshot(String snapshotName,
TableName tableName) {
HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder()
SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder()
.setName(snapshotName).setTable(tableName.getNameAsString()).build();
try {
ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);

View File

@ -107,7 +107,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
@ -186,6 +185,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRe
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
@ -2420,7 +2420,7 @@ public class HBaseAdmin implements Admin {
public void snapshot(SnapshotDescription snapshotDesc)
throws IOException, SnapshotCreationException, IllegalArgumentException {
// actually take the snapshot
HBaseProtos.SnapshotDescription snapshot =
SnapshotProtos.SnapshotDescription snapshot =
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
SnapshotResponse response = asyncSnapshot(snapshot);
final IsSnapshotDoneRequest request =
@ -2466,7 +2466,7 @@ public class HBaseAdmin implements Admin {
asyncSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc));
}
private SnapshotResponse asyncSnapshot(HBaseProtos.SnapshotDescription snapshot)
private SnapshotResponse asyncSnapshot(SnapshotProtos.SnapshotDescription snapshot)
throws IOException {
ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot);
final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot)
@ -2484,7 +2484,7 @@ public class HBaseAdmin implements Admin {
@Override
public boolean isSnapshotFinished(final SnapshotDescription snapshotDesc)
throws IOException, HBaseSnapshotException, UnknownSnapshotException {
final HBaseProtos.SnapshotDescription snapshot =
final SnapshotProtos.SnapshotDescription snapshot =
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc);
return executeCallable(new MasterCallable<IsSnapshotDoneResponse>(getConnection(),
getRpcControllerFactory()) {
@ -2542,13 +2542,19 @@ public class HBaseAdmin implements Admin {
}
@Override
public void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot)
public void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot)
throws IOException, RestoreSnapshotException {
restoreSnapshot(snapshotName, takeFailSafeSnapshot, false);
}
@Override
public void restoreSnapshot(final String snapshotName, final boolean takeFailSafeSnapshot,
final boolean restoreAcl) throws IOException, RestoreSnapshotException {
TableName tableName = getTableNameBeforeRestoreSnapshot(snapshotName);
// The table does not exists, switch to clone.
if (!tableExists(tableName)) {
cloneSnapshot(snapshotName, tableName);
cloneSnapshot(snapshotName, tableName, restoreAcl);
return;
}
@ -2573,7 +2579,7 @@ public class HBaseAdmin implements Admin {
try {
// Restore snapshot
get(
internalRestoreSnapshotAsync(snapshotName, tableName),
internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
syncWaitTimeout,
TimeUnit.MILLISECONDS);
} catch (IOException e) {
@ -2582,7 +2588,7 @@ public class HBaseAdmin implements Admin {
if (takeFailSafeSnapshot) {
try {
get(
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName),
internalRestoreSnapshotAsync(failSafeSnapshotSnapshotName, tableName, restoreAcl),
syncWaitTimeout,
TimeUnit.MILLISECONDS);
String msg = "Restore snapshot=" + snapshotName +
@ -2625,7 +2631,7 @@ public class HBaseAdmin implements Admin {
throw new TableNotDisabledException(tableName);
}
return internalRestoreSnapshotAsync(snapshotName, tableName);
return internalRestoreSnapshotAsync(snapshotName, tableName, false);
}
@Override
@ -2635,24 +2641,30 @@ public class HBaseAdmin implements Admin {
}
@Override
public void cloneSnapshot(final String snapshotName, final TableName tableName)
public void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl)
throws IOException, TableExistsException, RestoreSnapshotException {
if (tableExists(tableName)) {
throw new TableExistsException(tableName);
}
get(
internalRestoreSnapshotAsync(snapshotName, tableName),
internalRestoreSnapshotAsync(snapshotName, tableName, restoreAcl),
Integer.MAX_VALUE,
TimeUnit.MILLISECONDS);
}
@Override
public void cloneSnapshot(final String snapshotName, final TableName tableName)
throws IOException, TableExistsException, RestoreSnapshotException {
cloneSnapshot(snapshotName, tableName, false);
}
@Override
public Future<Void> cloneSnapshotAsync(final String snapshotName, final TableName tableName)
throws IOException, TableExistsException {
if (tableExists(tableName)) {
throw new TableExistsException(tableName);
}
return internalRestoreSnapshotAsync(snapshotName, tableName);
return internalRestoreSnapshotAsync(snapshotName, tableName, false);
}
@Override
@ -2740,10 +2752,10 @@ public class HBaseAdmin implements Admin {
* @throws RestoreSnapshotException if snapshot failed to be restored
* @throws IllegalArgumentException if the restore request is formatted incorrectly
*/
private Future<Void> internalRestoreSnapshotAsync(
final String snapshotName,
final TableName tableName) throws IOException, RestoreSnapshotException {
final HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder()
private Future<Void> internalRestoreSnapshotAsync(final String snapshotName,
final TableName tableName, final boolean restoreAcl)
throws IOException, RestoreSnapshotException {
final SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder()
.setName(snapshotName).setTable(tableName.getNameAsString()).build();
// actually restore the snapshot
@ -2757,6 +2769,7 @@ public class HBaseAdmin implements Admin {
.setSnapshot(snapshot)
.setNonceGroup(ng.getNonceGroup())
.setNonce(ng.newNonce())
.setRestoreACL(restoreAcl)
.build();
return master.restoreSnapshot(getRpcController(), request);
}
@ -2768,7 +2781,7 @@ public class HBaseAdmin implements Admin {
private static class RestoreSnapshotFuture extends TableFuture<Void> {
public RestoreSnapshotFuture(
final HBaseAdmin admin,
final HBaseProtos.SnapshotDescription snapshot,
final SnapshotProtos.SnapshotDescription snapshot,
final TableName tableName,
final RestoreSnapshotResponse response) {
super(admin, tableName,
@ -2798,12 +2811,12 @@ public class HBaseAdmin implements Admin {
getRpcControllerFactory()) {
@Override
protected List<SnapshotDescription> rpcCall() throws Exception {
List<HBaseProtos.SnapshotDescription> snapshotsList = master
List<SnapshotProtos.SnapshotDescription> snapshotsList = master
.getCompletedSnapshots(getRpcController(),
GetCompletedSnapshotsRequest.newBuilder().build())
.getSnapshotsList();
List<SnapshotDescription> result = new ArrayList<>(snapshotsList.size());
for (HBaseProtos.SnapshotDescription snapshot : snapshotsList) {
for (SnapshotProtos.SnapshotDescription snapshot : snapshotsList) {
result.add(ProtobufUtil.createSnapshotDesc(snapshot));
}
return result;
@ -2866,7 +2879,7 @@ public class HBaseAdmin implements Admin {
protected Void rpcCall() throws Exception {
master.deleteSnapshot(getRpcController(),
DeleteSnapshotRequest.newBuilder().setSnapshot(
HBaseProtos.SnapshotDescription.newBuilder().setName(snapshotName).build())
SnapshotProtos.SnapshotDescription.newBuilder().setName(snapshotName).build())
.build()
);
return null;
@ -4122,7 +4135,7 @@ public class HBaseAdmin implements Admin {
/**
* Decide whether the table need replicate to the peer cluster according to the peer config
* @param table name of the table
* @param peerConfig config for the peer
* @param peer config for the peer
* @return true if the table need replicate to the peer cluster
*/
private boolean needToReplicate(TableName table, ReplicationPeerDescription peer) {

View File

@ -0,0 +1,277 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.security.access;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.ListMultimap;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* Convert protobuf objects in AccessControl.proto under hbase-protocol-shaded to user-oriented
* objects and vice versa. <br>
*
* In HBASE-15638, we create a hbase-protocol-shaded module for upgrading protobuf version to 3.x,
* but there are still some coprocessor endpoints(such as AccessControl, Authentication,
* MulitRowMutation) which depend on hbase-protocol module for CPEP compatibility. In fact, we use
* PB objects in AccessControl.proto under hbase-protocol for access control logic and use shaded
* AccessControl.proto only for serializing/deserializing permissions of .snapshotinfo.
*/
@InterfaceAudience.Private
public class ShadedAccessControlUtil {
/**
* Convert a client user permission to a user permission shaded proto.
*/
public static
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action
toPermissionAction(Permission.Action action) {
switch (action) {
case READ:
return org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action.READ;
case WRITE:
return org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action.WRITE;
case EXEC:
return org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action.EXEC;
case CREATE:
return org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action.CREATE;
case ADMIN:
return org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action.ADMIN;
}
throw new IllegalArgumentException("Unknown action value " + action.name());
}
/**
* Convert a Permission.Action shaded proto to a client Permission.Action object.
*/
public static Permission.Action toPermissionAction(
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action action) {
switch (action) {
case READ:
return Permission.Action.READ;
case WRITE:
return Permission.Action.WRITE;
case EXEC:
return Permission.Action.EXEC;
case CREATE:
return Permission.Action.CREATE;
case ADMIN:
return Permission.Action.ADMIN;
}
throw new IllegalArgumentException("Unknown action value " + action.name());
}
/**
* Converts a list of Permission.Action shaded proto to a list of client Permission.Action
* objects.
* @param protoActions the list of shaded protobuf Actions
* @return the converted list of Actions
*/
public static List<Permission.Action> toPermissionActions(
List<org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action> protoActions) {
List<Permission.Action> actions = new ArrayList<>(protoActions.size());
for (org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Action a : protoActions) {
actions.add(toPermissionAction(a));
}
return actions;
}
public static org.apache.hadoop.hbase.TableName toTableName(HBaseProtos.TableName tableNamePB) {
return org.apache.hadoop.hbase.TableName.valueOf(
tableNamePB.getNamespace().asReadOnlyByteBuffer(),
tableNamePB.getQualifier().asReadOnlyByteBuffer());
}
public static HBaseProtos.TableName toProtoTableName(TableName tableName) {
return HBaseProtos.TableName.newBuilder()
.setNamespace(ByteString.copyFrom(tableName.getNamespace()))
.setQualifier(ByteString.copyFrom(tableName.getQualifier())).build();
}
/**
* Converts a Permission shaded proto to a client TablePermission object.
* @param proto the protobuf Permission
* @return the converted TablePermission
*/
public static TablePermission toTablePermission(AccessControlProtos.Permission proto) {
if (proto.getType() == AccessControlProtos.Permission.Type.Global) {
AccessControlProtos.GlobalPermission perm = proto.getGlobalPermission();
List<Action> actions = toPermissionActions(perm.getActionList());
return new TablePermission(null, null, null,
actions.toArray(new Permission.Action[actions.size()]));
}
if (proto.getType() == AccessControlProtos.Permission.Type.Namespace) {
AccessControlProtos.NamespacePermission perm = proto.getNamespacePermission();
List<Permission.Action> actions = toPermissionActions(perm.getActionList());
if (!proto.hasNamespacePermission()) {
throw new IllegalStateException("Namespace must not be empty in NamespacePermission");
}
String namespace = perm.getNamespaceName().toStringUtf8();
return new TablePermission(namespace, actions.toArray(new Permission.Action[actions.size()]));
}
if (proto.getType() == AccessControlProtos.Permission.Type.Table) {
AccessControlProtos.TablePermission perm = proto.getTablePermission();
List<Permission.Action> actions = toPermissionActions(perm.getActionList());
byte[] qualifier = null;
byte[] family = null;
TableName table = null;
if (!perm.hasTableName()) {
throw new IllegalStateException("TableName cannot be empty");
}
table = toTableName(perm.getTableName());
if (perm.hasFamily()) family = perm.getFamily().toByteArray();
if (perm.hasQualifier()) qualifier = perm.getQualifier().toByteArray();
return new TablePermission(table, family, qualifier,
actions.toArray(new Permission.Action[actions.size()]));
}
throw new IllegalStateException("Unrecognize Perm Type: " + proto.getType());
}
/**
* Convert a client Permission to a Permission shaded proto
* @param perm the client Permission
* @return the protobuf Permission
*/
public static org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission
toPermission(Permission perm) {
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Builder ret =
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission
.newBuilder();
if (perm instanceof TablePermission) {
TablePermission tablePerm = (TablePermission) perm;
if (tablePerm.hasNamespace()) {
ret.setType(
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Namespace);
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission.Builder builder =
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission
.newBuilder();
builder.setNamespaceName(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
.copyFromUtf8(tablePerm.getNamespace()));
Permission.Action[] actions = perm.getActions();
if (actions != null) {
for (Permission.Action a : actions) {
builder.addAction(toPermissionAction(a));
}
}
ret.setNamespacePermission(builder);
return ret.build();
} else if (tablePerm.hasTable()) {
ret.setType(
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Table);
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission.Builder builder =
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission
.newBuilder();
builder.setTableName(toProtoTableName(tablePerm.getTableName()));
if (tablePerm.hasFamily()) {
builder.setFamily(ByteString.copyFrom(tablePerm.getFamily()));
}
if (tablePerm.hasQualifier()) {
builder.setQualifier(ByteString.copyFrom(tablePerm.getQualifier()));
}
Permission.Action actions[] = perm.getActions();
if (actions != null) {
for (Permission.Action a : actions) {
builder.addAction(toPermissionAction(a));
}
}
ret.setTablePermission(builder);
return ret.build();
}
}
ret.setType(
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Global);
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission.Builder builder =
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission
.newBuilder();
Permission.Action actions[] = perm.getActions();
if (actions != null) {
for (Permission.Action a : actions) {
builder.addAction(toPermissionAction(a));
}
}
ret.setGlobalPermission(builder);
return ret.build();
}
/**
* Convert a shaded protobuf UserTablePermissions to a ListMultimap&lt;String, TablePermission&gt;
* where key is username.
* @param proto the protobuf UserPermission
* @return the converted UserPermission
*/
public static ListMultimap<String, TablePermission> toUserTablePermissions(
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions proto) {
ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
userPerm = proto.getUserPermissions(i);
for (int j = 0; j < userPerm.getPermissionsCount(); j++) {
TablePermission tablePerm = toTablePermission(userPerm.getPermissions(j));
perms.put(userPerm.getUser().toStringUtf8(), tablePerm);
}
}
return perms;
}
/**
* Convert a ListMultimap&lt;String, TablePermission&gt; where key is username to a shaded
* protobuf UserPermission
* @param perm the list of user and table permissions
* @return the protobuf UserTablePermissions
*/
public static
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions
toUserTablePermissions(ListMultimap<String, TablePermission> perm) {
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder builder =
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions
.newBuilder();
for (Map.Entry<String, Collection<TablePermission>> entry : perm.asMap().entrySet()) {
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder =
org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions
.newBuilder();
userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey()));
for (TablePermission tablePerm : entry.getValue()) {
userPermBuilder.addPermissions(toPermission(tablePerm));
}
builder.addUserPermissions(userPermBuilder.build());
}
return builder.build();
}
}

View File

@ -155,6 +155,10 @@ public class TablePermission extends Permission {
return table;
}
public void setTableName(TableName table) {
this.table = table;
}
public boolean hasFamily() {
return family != null;
}

View File

@ -35,7 +35,6 @@ import java.util.NavigableSet;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
@ -164,6 +163,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
@ -2954,9 +2954,9 @@ public final class ProtobufUtil {
* @param type the SnapshotDescription type
* @return the protobuf SnapshotDescription type
*/
public static HBaseProtos.SnapshotDescription.Type
public static SnapshotProtos.SnapshotDescription.Type
createProtosSnapShotDescType(SnapshotType type) {
return HBaseProtos.SnapshotDescription.Type.valueOf(type.name());
return SnapshotProtos.SnapshotDescription.Type.valueOf(type.name());
}
/**
@ -2965,9 +2965,9 @@ public final class ProtobufUtil {
* @param snapshotDesc string representing the snapshot description type
* @return the protobuf SnapshotDescription type
*/
public static HBaseProtos.SnapshotDescription.Type
public static SnapshotProtos.SnapshotDescription.Type
createProtosSnapShotDescType(String snapshotDesc) {
return HBaseProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase(Locale.ROOT));
return SnapshotProtos.SnapshotDescription.Type.valueOf(snapshotDesc.toUpperCase(Locale.ROOT));
}
/**
@ -2976,7 +2976,7 @@ public final class ProtobufUtil {
* @param type the snapshot description type
* @return the protobuf SnapshotDescription type
*/
public static SnapshotType createSnapshotType(HBaseProtos.SnapshotDescription.Type type) {
public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription.Type type) {
return SnapshotType.valueOf(type.toString());
}
@ -2986,9 +2986,9 @@ public final class ProtobufUtil {
* @param snapshotDesc the POJO SnapshotDescription
* @return the protobuf SnapshotDescription
*/
public static HBaseProtos.SnapshotDescription
public static SnapshotProtos.SnapshotDescription
createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) {
HBaseProtos.SnapshotDescription.Builder builder = HBaseProtos.SnapshotDescription.newBuilder();
SnapshotProtos.SnapshotDescription.Builder builder = SnapshotProtos.SnapshotDescription.newBuilder();
if (snapshotDesc.getTableName() != null) {
builder.setTable(snapshotDesc.getTableNameAsString());
}
@ -3005,7 +3005,7 @@ public final class ProtobufUtil {
builder.setVersion(snapshotDesc.getVersion());
}
builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType()));
HBaseProtos.SnapshotDescription snapshot = builder.build();
SnapshotProtos.SnapshotDescription snapshot = builder.build();
return snapshot;
}
@ -3017,7 +3017,7 @@ public final class ProtobufUtil {
* @return the POJO SnapshotDescription
*/
public static SnapshotDescription
createSnapshotDesc(HBaseProtos.SnapshotDescription snapshotDesc) {
createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) {
return new SnapshotDescription(snapshotDesc.getName(),
snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null,
createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(),

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.snapshot;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -36,7 +37,7 @@ public class ClientSnapshotDescriptionUtils {
* @throws IllegalArgumentException if the name of the snapshot or the name of the table to
* snapshot are not valid names.
*/
public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot)
public static void assertSnapshotRequestIsValid(SnapshotProtos.SnapshotDescription snapshot)
throws IllegalArgumentException {
// make sure the snapshot name is valid
TableName.isLegalTableQualifierName(Bytes.toBytes(snapshot.getName()), true);
@ -57,7 +58,7 @@ public class ClientSnapshotDescriptionUtils {
* @param ssd
* @return Single line string with a summary of the snapshot parameters
*/
public static String toString(HBaseProtos.SnapshotDescription ssd) {
public static String toString(SnapshotProtos.SnapshotDescription ssd) {
if (ssd == null) {
return null;
}

View File

@ -19532,7 +19532,6 @@ public final class AdminProtos {
}
/**
* <pre>
*
* Roll request responses no longer include regions to flush
* this list will always be empty when talking to a 1.0 server
* </pre>
@ -19798,7 +19797,6 @@ public final class AdminProtos {
}
/**
* <pre>
*
* Roll request responses no longer include regions to flush
* this list will always be empty when talking to a 1.0 server
* </pre>

View File

@ -1510,6 +1510,10 @@ public final class MasterProcedureProtos {
* <code>CLONE_SNAPSHOT_POST_OPERATION = 6;</code>
*/
CLONE_SNAPSHOT_POST_OPERATION(6),
/**
* <code>CLONE_SNAPHOST_RESTORE_ACL = 7;</code>
*/
CLONE_SNAPHOST_RESTORE_ACL(7),
;
/**
@ -1536,6 +1540,10 @@ public final class MasterProcedureProtos {
* <code>CLONE_SNAPSHOT_POST_OPERATION = 6;</code>
*/
public static final int CLONE_SNAPSHOT_POST_OPERATION_VALUE = 6;
/**
* <code>CLONE_SNAPHOST_RESTORE_ACL = 7;</code>
*/
public static final int CLONE_SNAPHOST_RESTORE_ACL_VALUE = 7;
public final int getNumber() {
@ -1558,6 +1566,7 @@ public final class MasterProcedureProtos {
case 4: return CLONE_SNAPSHOT_ASSIGN_REGIONS;
case 5: return CLONE_SNAPSHOT_UPDATE_DESC_CACHE;
case 6: return CLONE_SNAPSHOT_POST_OPERATION;
case 7: return CLONE_SNAPHOST_RESTORE_ACL;
default: return null;
}
}
@ -1628,6 +1637,10 @@ public final class MasterProcedureProtos {
* <code>RESTORE_SNAPSHOT_UPDATE_META = 4;</code>
*/
RESTORE_SNAPSHOT_UPDATE_META(4),
/**
* <code>RESTORE_SNAPSHOT_RESTORE_ACL = 5;</code>
*/
RESTORE_SNAPSHOT_RESTORE_ACL(5),
;
/**
@ -1646,6 +1659,10 @@ public final class MasterProcedureProtos {
* <code>RESTORE_SNAPSHOT_UPDATE_META = 4;</code>
*/
public static final int RESTORE_SNAPSHOT_UPDATE_META_VALUE = 4;
/**
* <code>RESTORE_SNAPSHOT_RESTORE_ACL = 5;</code>
*/
public static final int RESTORE_SNAPSHOT_RESTORE_ACL_VALUE = 5;
public final int getNumber() {
@ -1666,6 +1683,7 @@ public final class MasterProcedureProtos {
case 2: return RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR;
case 3: return RESTORE_SNAPSHOT_WRITE_FS_LAYOUT;
case 4: return RESTORE_SNAPSHOT_UPDATE_META;
case 5: return RESTORE_SNAPSHOT_RESTORE_ACL;
default: return null;
}
}
@ -15612,11 +15630,11 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot();
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription getSnapshot();
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
/**
* <code>required .hbase.pb.TableSchema table_schema = 3;</code>
@ -15737,11 +15755,11 @@ public final class MasterProcedureProtos {
break;
}
case 18: {
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = null;
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = snapshot_.toBuilder();
}
snapshot_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.PARSER, extensionRegistry);
snapshot_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(snapshot_);
snapshot_ = subBuilder.buildPartial();
@ -15833,7 +15851,7 @@ public final class MasterProcedureProtos {
}
public static final int SNAPSHOT_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_;
private org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription snapshot_;
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
@ -15843,14 +15861,14 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription getSnapshot() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
}
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
}
public static final int TABLE_SCHEMA_FIELD_NUMBER = 3;
@ -16598,9 +16616,9 @@ public final class MasterProcedureProtos {
return userInfoBuilder_;
}
private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = null;
private org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription snapshot_ = null;
private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
@ -16610,9 +16628,9 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription getSnapshot() {
if (snapshotBuilder_ == null) {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
} else {
return snapshotBuilder_.getMessage();
}
@ -16620,7 +16638,7 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public Builder setSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription value) {
public Builder setSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription value) {
if (snapshotBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -16637,7 +16655,7 @@ public final class MasterProcedureProtos {
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public Builder setSnapshot(
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder builderForValue) {
if (snapshotBuilder_ == null) {
snapshot_ = builderForValue.build();
onChanged();
@ -16650,13 +16668,13 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public Builder mergeSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription value) {
public Builder mergeSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription value) {
if (snapshotBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
snapshot_ != null &&
snapshot_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) {
snapshot_ != org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance()) {
snapshot_ =
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
} else {
snapshot_ = value;
}
@ -16683,7 +16701,7 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() {
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder getSnapshotBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getSnapshotFieldBuilder().getBuilder();
@ -16691,23 +16709,23 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
if (snapshotBuilder_ != null) {
return snapshotBuilder_.getMessageOrBuilder();
} else {
return snapshot_ == null ?
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
}
}
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder>
getSnapshotFieldBuilder() {
if (snapshotBuilder_ == null) {
snapshotBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>(
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder>(
getSnapshot(),
getParentForChildren(),
isClean());
@ -17386,11 +17404,11 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot();
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription getSnapshot();
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder();
/**
* <code>required .hbase.pb.TableSchema modified_table_schema = 3;</code>
@ -17561,11 +17579,11 @@ public final class MasterProcedureProtos {
break;
}
case 18: {
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder subBuilder = null;
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = snapshot_.toBuilder();
}
snapshot_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.PARSER, extensionRegistry);
snapshot_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(snapshot_);
snapshot_ = subBuilder.buildPartial();
@ -17681,7 +17699,7 @@ public final class MasterProcedureProtos {
}
public static final int SNAPSHOT_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_;
private org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription snapshot_;
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
@ -17691,14 +17709,14 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription getSnapshot() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
}
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
}
public static final int MODIFIED_TABLE_SCHEMA_FIELD_NUMBER = 3;
@ -18648,9 +18666,9 @@ public final class MasterProcedureProtos {
return userInfoBuilder_;
}
private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription snapshot_ = null;
private org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription snapshot_ = null;
private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder> snapshotBuilder_;
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
@ -18660,9 +18678,9 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription getSnapshot() {
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription getSnapshot() {
if (snapshotBuilder_ == null) {
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
return snapshot_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
} else {
return snapshotBuilder_.getMessage();
}
@ -18670,7 +18688,7 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public Builder setSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription value) {
public Builder setSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription value) {
if (snapshotBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@ -18687,7 +18705,7 @@ public final class MasterProcedureProtos {
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public Builder setSnapshot(
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder builderForValue) {
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder builderForValue) {
if (snapshotBuilder_ == null) {
snapshot_ = builderForValue.build();
onChanged();
@ -18700,13 +18718,13 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public Builder mergeSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription value) {
public Builder mergeSnapshot(org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription value) {
if (snapshotBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
snapshot_ != null &&
snapshot_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance()) {
snapshot_ != org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance()) {
snapshot_ =
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.newBuilder(snapshot_).mergeFrom(value).buildPartial();
} else {
snapshot_ = value;
}
@ -18733,7 +18751,7 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder getSnapshotBuilder() {
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder getSnapshotBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getSnapshotFieldBuilder().getBuilder();
@ -18741,23 +18759,23 @@ public final class MasterProcedureProtos {
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
public org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder getSnapshotOrBuilder() {
if (snapshotBuilder_ != null) {
return snapshotBuilder_.getMessageOrBuilder();
} else {
return snapshot_ == null ?
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.getDefaultInstance() : snapshot_;
}
}
/**
* <code>required .hbase.pb.SnapshotDescription snapshot = 2;</code>
*/
private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder>
getSnapshotFieldBuilder() {
if (snapshotBuilder_ == null) {
snapshotBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescriptionOrBuilder>(
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescriptionOrBuilder>(
getSnapshot(),
getParentForChildren(),
isClean());
@ -24058,208 +24076,210 @@ public final class MasterProcedureProtos {
static {
java.lang.String[] descriptorData = {
"\n\025MasterProcedure.proto\022\010hbase.pb\032\013HBase" +
".proto\032\tRPC.proto\"\234\001\n\024CreateTableStateDa" +
"ta\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.UserInf" +
"ormation\022+\n\014table_schema\030\002 \002(\0132\025.hbase.p" +
"b.TableSchema\022)\n\013region_info\030\003 \003(\0132\024.hba" +
"se.pb.RegionInfo\"\332\001\n\024ModifyTableStateDat" +
"a\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.UserInfo" +
"rmation\0226\n\027unmodified_table_schema\030\002 \001(\013" +
"2\025.hbase.pb.TableSchema\0224\n\025modified_tabl" +
"e_schema\030\003 \002(\0132\025.hbase.pb.TableSchema\022&\n",
"\036delete_column_family_in_modify\030\004 \002(\010\"\340\001" +
"\n\026TruncateTableStateData\022,\n\tuser_info\030\001 " +
"\002(\0132\031.hbase.pb.UserInformation\022\027\n\017preser" +
"ve_splits\030\002 \002(\010\022\'\n\ntable_name\030\003 \001(\0132\023.hb" +
"ase.pb.TableName\022+\n\014table_schema\030\004 \001(\0132\025" +
".hbase.pb.TableSchema\022)\n\013region_info\030\005 \003" +
"(\0132\024.hbase.pb.RegionInfo\"\230\001\n\024DeleteTable" +
"StateData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb." +
"UserInformation\022\'\n\ntable_name\030\002 \002(\0132\023.hb" +
"ase.pb.TableName\022)\n\013region_info\030\003 \003(\0132\024.",
"hbase.pb.RegionInfo\"W\n\030CreateNamespaceSt" +
"ateData\022;\n\024namespace_descriptor\030\001 \002(\0132\035." +
"hbase.pb.NamespaceDescriptor\"\237\001\n\030ModifyN" +
"amespaceStateData\022;\n\024namespace_descripto" +
"r\030\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\022F" +
"\n\037unmodified_namespace_descriptor\030\002 \001(\0132" +
"\035.hbase.pb.NamespaceDescriptor\"o\n\030Delete" +
"NamespaceStateData\022\026\n\016namespace_name\030\001 \002" +
"(\t\022;\n\024namespace_descriptor\030\002 \001(\0132\035.hbase" +
".pb.NamespaceDescriptor\"\344\001\n\030AddColumnFam",
"ilyStateData\022,\n\tuser_info\030\001 \002(\0132\031.hbase." +
"pb.UserInformation\022\'\n\ntable_name\030\002 \002(\0132\023" +
".hbase.pb.TableName\0229\n\023columnfamily_sche" +
"ma\030\003 \002(\0132\034.hbase.pb.ColumnFamilySchema\0226" +
"\n\027unmodified_table_schema\030\004 \001(\0132\025.hbase." +
"pb.TableSchema\"\347\001\n\033ModifyColumnFamilySta" +
"teData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.Use" +
"rInformation\022\'\n\ntable_name\030\002 \002(\0132\023.hbase" +
".pb.TableName\0229\n\023columnfamily_schema\030\003 \002" +
"(\0132\034.hbase.pb.ColumnFamilySchema\0226\n\027unmo",
"dified_table_schema\030\004 \001(\0132\025.hbase.pb.Tab" +
"leSchema\"\307\001\n\033DeleteColumnFamilyStateData" +
".proto\032\tRPC.proto\032\016Snapshot.proto\"\234\001\n\024Cr" +
"eateTableStateData\022,\n\tuser_info\030\001 \002(\0132\031." +
"hbase.pb.UserInformation\022+\n\014table_schema" +
"\030\002 \002(\0132\025.hbase.pb.TableSchema\022)\n\013region_" +
"info\030\003 \003(\0132\024.hbase.pb.RegionInfo\"\332\001\n\024Mod" +
"ifyTableStateData\022,\n\tuser_info\030\001 \002(\0132\031.h" +
"base.pb.UserInformation\0226\n\027unmodified_ta" +
"ble_schema\030\002 \001(\0132\025.hbase.pb.TableSchema\022" +
"4\n\025modified_table_schema\030\003 \002(\0132\025.hbase.p",
"b.TableSchema\022&\n\036delete_column_family_in" +
"_modify\030\004 \002(\010\"\340\001\n\026TruncateTableStateData" +
"\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.UserInfor" +
"mation\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.Ta" +
"bleName\022\031\n\021columnfamily_name\030\003 \002(\014\0226\n\027un" +
"modified_table_schema\030\004 \001(\0132\025.hbase.pb.T" +
"ableSchema\"\215\001\n\024EnableTableStateData\022,\n\tu" +
"ser_info\030\001 \002(\0132\031.hbase.pb.UserInformatio" +
"n\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableNa" +
"me\022\036\n\026skip_table_state_check\030\003 \002(\010\"\216\001\n\025D",
"isableTableStateData\022,\n\tuser_info\030\001 \002(\0132" +
"\031.hbase.pb.UserInformation\022\'\n\ntable_name" +
"\030\002 \002(\0132\023.hbase.pb.TableName\022\036\n\026skip_tabl" +
"e_state_check\030\003 \002(\010\"u\n\037RestoreParentToCh" +
"ildRegionsPair\022\032\n\022parent_region_name\030\001 \002" +
"(\t\022\032\n\022child1_region_name\030\002 \002(\t\022\032\n\022child2" +
"_region_name\030\003 \002(\t\"\245\002\n\026CloneSnapshotStat" +
"eData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.User" +
"Information\022/\n\010snapshot\030\002 \002(\0132\035.hbase.pb" +
".SnapshotDescription\022+\n\014table_schema\030\003 \002",
"(\0132\025.hbase.pb.TableSchema\022)\n\013region_info" +
"\030\004 \003(\0132\024.hbase.pb.RegionInfo\022T\n!parent_t" +
"o_child_regions_pair_list\030\005 \003(\0132).hbase." +
"pb.RestoreParentToChildRegionsPair\"\245\003\n\030R" +
"estoreSnapshotStateData\022,\n\tuser_info\030\001 \002" +
"(\0132\031.hbase.pb.UserInformation\022/\n\010snapsho" +
"t\030\002 \002(\0132\035.hbase.pb.SnapshotDescription\0224" +
"\n\025modified_table_schema\030\003 \002(\0132\025.hbase.pb" +
".TableSchema\0225\n\027region_info_for_restore\030" +
"\004 \003(\0132\024.hbase.pb.RegionInfo\0224\n\026region_in",
"fo_for_remove\030\005 \003(\0132\024.hbase.pb.RegionInf" +
"o\0221\n\023region_info_for_add\030\006 \003(\0132\024.hbase.p" +
"b.RegionInfo\022T\n!parent_to_child_regions_" +
"pair_list\030\007 \003(\0132).hbase.pb.RestoreParent" +
"ToChildRegionsPair\"\300\001\n\032MergeTableRegions" +
"StateData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb." +
"UserInformation\022)\n\013region_info\030\002 \003(\0132\024.h" +
"base.pb.RegionInfo\0220\n\022merged_region_info" +
"\030\003 \002(\0132\024.hbase.pb.RegionInfo\022\027\n\010forcible" +
"\030\004 \001(\010:\005false\"\254\001\n\031SplitTableRegionStateD",
"ata\022,\n\tuser_info\030\001 \002(\0132\031.hbase.pb.UserIn" +
"formation\0220\n\022parent_region_info\030\002 \002(\0132\024." +
"hbase.pb.RegionInfo\022/\n\021child_region_info" +
"\030\003 \003(\0132\024.hbase.pb.RegionInfo\"\201\002\n\024ServerC" +
"rashStateData\022)\n\013server_name\030\001 \002(\0132\024.hba" +
"se.pb.ServerName\022\036\n\026distributed_log_repl" +
"ay\030\002 \001(\010\0227\n\031regions_on_crashed_server\030\003 " +
"\003(\0132\024.hbase.pb.RegionInfo\022.\n\020regions_ass" +
"igned\030\004 \003(\0132\024.hbase.pb.RegionInfo\022\025\n\rcar" +
"rying_meta\030\005 \001(\010\022\036\n\020should_split_wal\030\006 \001",
"(\010:\004true*\330\001\n\020CreateTableState\022\036\n\032CREATE_" +
"TABLE_PRE_OPERATION\020\001\022 \n\034CREATE_TABLE_WR" +
"ITE_FS_LAYOUT\020\002\022\034\n\030CREATE_TABLE_ADD_TO_M" +
"ETA\020\003\022\037\n\033CREATE_TABLE_ASSIGN_REGIONS\020\004\022\"" +
"\n\036CREATE_TABLE_UPDATE_DESC_CACHE\020\005\022\037\n\033CR" +
"EATE_TABLE_POST_OPERATION\020\006*\207\002\n\020ModifyTa" +
"bleState\022\030\n\024MODIFY_TABLE_PREPARE\020\001\022\036\n\032MO" +
"DIFY_TABLE_PRE_OPERATION\020\002\022(\n$MODIFY_TAB" +
"LE_UPDATE_TABLE_DESCRIPTOR\020\003\022&\n\"MODIFY_T" +
"ABLE_REMOVE_REPLICA_COLUMN\020\004\022!\n\035MODIFY_T",
"ABLE_DELETE_FS_LAYOUT\020\005\022\037\n\033MODIFY_TABLE_" +
"POST_OPERATION\020\006\022#\n\037MODIFY_TABLE_REOPEN_" +
"ALL_REGIONS\020\007*\212\002\n\022TruncateTableState\022 \n\034" +
"TRUNCATE_TABLE_PRE_OPERATION\020\001\022#\n\037TRUNCA" +
"TE_TABLE_REMOVE_FROM_META\020\002\022\"\n\036TRUNCATE_" +
"TABLE_CLEAR_FS_LAYOUT\020\003\022#\n\037TRUNCATE_TABL" +
"E_CREATE_FS_LAYOUT\020\004\022\036\n\032TRUNCATE_TABLE_A" +
"DD_TO_META\020\005\022!\n\035TRUNCATE_TABLE_ASSIGN_RE" +
"GIONS\020\006\022!\n\035TRUNCATE_TABLE_POST_OPERATION" +
"\020\007*\337\001\n\020DeleteTableState\022\036\n\032DELETE_TABLE_",
"PRE_OPERATION\020\001\022!\n\035DELETE_TABLE_REMOVE_F" +
"ROM_META\020\002\022 \n\034DELETE_TABLE_CLEAR_FS_LAYO" +
"UT\020\003\022\"\n\036DELETE_TABLE_UPDATE_DESC_CACHE\020\004" +
"\022!\n\035DELETE_TABLE_UNASSIGN_REGIONS\020\005\022\037\n\033D" +
"ELETE_TABLE_POST_OPERATION\020\006*\320\001\n\024CreateN" +
"amespaceState\022\034\n\030CREATE_NAMESPACE_PREPAR" +
"E\020\001\022%\n!CREATE_NAMESPACE_CREATE_DIRECTORY" +
"\020\002\022)\n%CREATE_NAMESPACE_INSERT_INTO_NS_TA" +
"BLE\020\003\022\036\n\032CREATE_NAMESPACE_UPDATE_ZK\020\004\022(\n" +
"$CREATE_NAMESPACE_SET_NAMESPACE_QUOTA\020\005*",
"z\n\024ModifyNamespaceState\022\034\n\030MODIFY_NAMESP" +
"ACE_PREPARE\020\001\022$\n MODIFY_NAMESPACE_UPDATE" +
"_NS_TABLE\020\002\022\036\n\032MODIFY_NAMESPACE_UPDATE_Z" +
"K\020\003*\332\001\n\024DeleteNamespaceState\022\034\n\030DELETE_N" +
"AMESPACE_PREPARE\020\001\022)\n%DELETE_NAMESPACE_D" +
"ELETE_FROM_NS_TABLE\020\002\022#\n\037DELETE_NAMESPAC" +
"E_REMOVE_FROM_ZK\020\003\022\'\n#DELETE_NAMESPACE_D" +
"ELETE_DIRECTORIES\020\004\022+\n\'DELETE_NAMESPACE_" +
"REMOVE_NAMESPACE_QUOTA\020\005*\331\001\n\024AddColumnFa" +
"milyState\022\035\n\031ADD_COLUMN_FAMILY_PREPARE\020\001",
"\022#\n\037ADD_COLUMN_FAMILY_PRE_OPERATION\020\002\022-\n" +
")ADD_COLUMN_FAMILY_UPDATE_TABLE_DESCRIPT" +
"OR\020\003\022$\n ADD_COLUMN_FAMILY_POST_OPERATION" +
"\020\004\022(\n$ADD_COLUMN_FAMILY_REOPEN_ALL_REGIO" +
"NS\020\005*\353\001\n\027ModifyColumnFamilyState\022 \n\034MODI" +
"FY_COLUMN_FAMILY_PREPARE\020\001\022&\n\"MODIFY_COL" +
"UMN_FAMILY_PRE_OPERATION\020\002\0220\n,MODIFY_COL" +
"UMN_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022\'\n#" +
"MODIFY_COLUMN_FAMILY_POST_OPERATION\020\004\022+\n" +
"\'MODIFY_COLUMN_FAMILY_REOPEN_ALL_REGIONS",
"\020\005*\226\002\n\027DeleteColumnFamilyState\022 \n\034DELETE" +
"_COLUMN_FAMILY_PREPARE\020\001\022&\n\"DELETE_COLUM" +
"N_FAMILY_PRE_OPERATION\020\002\0220\n,DELETE_COLUM" +
"N_FAMILY_UPDATE_TABLE_DESCRIPTOR\020\003\022)\n%DE" +
"LETE_COLUMN_FAMILY_DELETE_FS_LAYOUT\020\004\022\'\n" +
"#DELETE_COLUMN_FAMILY_POST_OPERATION\020\005\022+" +
"\n\'DELETE_COLUMN_FAMILY_REOPEN_ALL_REGION" +
"S\020\006*\350\001\n\020EnableTableState\022\030\n\024ENABLE_TABLE" +
"_PREPARE\020\001\022\036\n\032ENABLE_TABLE_PRE_OPERATION" +
"\020\002\022)\n%ENABLE_TABLE_SET_ENABLING_TABLE_ST",
"ATE\020\003\022$\n ENABLE_TABLE_MARK_REGIONS_ONLIN" +
"E\020\004\022(\n$ENABLE_TABLE_SET_ENABLED_TABLE_ST" +
"ATE\020\005\022\037\n\033ENABLE_TABLE_POST_OPERATION\020\006*\362" +
"\001\n\021DisableTableState\022\031\n\025DISABLE_TABLE_PR" +
"EPARE\020\001\022\037\n\033DISABLE_TABLE_PRE_OPERATION\020\002" +
"\022+\n\'DISABLE_TABLE_SET_DISABLING_TABLE_ST" +
"ATE\020\003\022&\n\"DISABLE_TABLE_MARK_REGIONS_OFFL" +
"INE\020\004\022*\n&DISABLE_TABLE_SET_DISABLED_TABL" +
"E_STATE\020\005\022 \n\034DISABLE_TABLE_POST_OPERATIO" +
"N\020\006*\346\001\n\022CloneSnapshotState\022 \n\034CLONE_SNAP",
"SHOT_PRE_OPERATION\020\001\022\"\n\036CLONE_SNAPSHOT_W" +
"RITE_FS_LAYOUT\020\002\022\036\n\032CLONE_SNAPSHOT_ADD_T" +
"O_META\020\003\022!\n\035CLONE_SNAPSHOT_ASSIGN_REGION" +
"S\020\004\022$\n CLONE_SNAPSHOT_UPDATE_DESC_CACHE\020" +
"\005\022!\n\035CLONE_SNAPSHOT_POST_OPERATION\020\006*\260\001\n" +
"\024RestoreSnapshotState\022\"\n\036RESTORE_SNAPSHO" +
"T_PRE_OPERATION\020\001\022,\n(RESTORE_SNAPSHOT_UP" +
"DATE_TABLE_DESCRIPTOR\020\002\022$\n RESTORE_SNAPS" +
"HOT_WRITE_FS_LAYOUT\020\003\022 \n\034RESTORE_SNAPSHO" +
"T_UPDATE_META\020\004*\376\003\n\026MergeTableRegionsSta",
"te\022\037\n\033MERGE_TABLE_REGIONS_PREPARE\020\001\022.\n*M" +
"ERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_R" +
"S\020\002\022+\n\'MERGE_TABLE_REGIONS_PRE_MERGE_OPE" +
"RATION\020\003\022/\n+MERGE_TABLE_REGIONS_SET_MERG" +
"ING_TABLE_STATE\020\004\022%\n!MERGE_TABLE_REGIONS" +
"_CLOSE_REGIONS\020\005\022,\n(MERGE_TABLE_REGIONS_" +
"CREATE_MERGED_REGION\020\006\0222\n.MERGE_TABLE_RE" +
"GIONS_PRE_MERGE_COMMIT_OPERATION\020\007\022#\n\037ME" +
"RGE_TABLE_REGIONS_UPDATE_META\020\010\0223\n/MERGE" +
"_TABLE_REGIONS_POST_MERGE_COMMIT_OPERATI",
"ON\020\t\022*\n&MERGE_TABLE_REGIONS_OPEN_MERGED_" +
"REGION\020\n\022&\n\"MERGE_TABLE_REGIONS_POST_OPE" +
"RATION\020\013*\304\003\n\025SplitTableRegionState\022\036\n\032SP" +
"LIT_TABLE_REGION_PREPARE\020\001\022$\n SPLIT_TABL" +
"E_REGION_PRE_OPERATION\020\002\0220\n,SPLIT_TABLE_" +
"REGION_SET_SPLITTING_TABLE_STATE\020\003\022*\n&SP" +
"LIT_TABLE_REGION_CLOSE_PARENT_REGION\020\004\022." +
"\n*SPLIT_TABLE_REGION_CREATE_DAUGHTER_REG" +
"IONS\020\005\0220\n,SPLIT_TABLE_REGION_PRE_OPERATI" +
"ON_BEFORE_PONR\020\006\022\"\n\036SPLIT_TABLE_REGION_U",
"PDATE_META\020\007\022/\n+SPLIT_TABLE_REGION_PRE_O" +
"PERATION_AFTER_PONR\020\010\022)\n%SPLIT_TABLE_REG" +
"ION_OPEN_CHILD_REGIONS\020\t\022%\n!SPLIT_TABLE_" +
"REGION_POST_OPERATION\020\n*\234\002\n\020ServerCrashS" +
"tate\022\026\n\022SERVER_CRASH_START\020\001\022\035\n\031SERVER_C" +
"RASH_PROCESS_META\020\002\022\034\n\030SERVER_CRASH_GET_" +
"REGIONS\020\003\022\036\n\032SERVER_CRASH_NO_SPLIT_LOGS\020" +
"\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020\005\022#\n\037SERVER" +
"_CRASH_PREPARE_LOG_REPLAY\020\006\022\027\n\023SERVER_CR" +
"ASH_ASSIGN\020\010\022\037\n\033SERVER_CRASH_WAIT_ON_ASS",
"IGN\020\t\022\027\n\023SERVER_CRASH_FINISH\020dBR\n1org.ap" +
"ache.hadoop.hbase.shaded.protobuf.genera" +
"tedB\025MasterProcedureProtosH\001\210\001\001\240\001\001"
"mation\022\027\n\017preserve_splits\030\002 \002(\010\022\'\n\ntable" +
"_name\030\003 \001(\0132\023.hbase.pb.TableName\022+\n\014tabl" +
"e_schema\030\004 \001(\0132\025.hbase.pb.TableSchema\022)\n" +
"\013region_info\030\005 \003(\0132\024.hbase.pb.RegionInfo" +
"\"\230\001\n\024DeleteTableStateData\022,\n\tuser_info\030\001" +
" \002(\0132\031.hbase.pb.UserInformation\022\'\n\ntable" +
"_name\030\002 \002(\0132\023.hbase.pb.TableName\022)\n\013regi",
"on_info\030\003 \003(\0132\024.hbase.pb.RegionInfo\"W\n\030C" +
"reateNamespaceStateData\022;\n\024namespace_des" +
"criptor\030\001 \002(\0132\035.hbase.pb.NamespaceDescri" +
"ptor\"\237\001\n\030ModifyNamespaceStateData\022;\n\024nam" +
"espace_descriptor\030\001 \002(\0132\035.hbase.pb.Names" +
"paceDescriptor\022F\n\037unmodified_namespace_d" +
"escriptor\030\002 \001(\0132\035.hbase.pb.NamespaceDesc" +
"riptor\"o\n\030DeleteNamespaceStateData\022\026\n\016na" +
"mespace_name\030\001 \002(\t\022;\n\024namespace_descript" +
"or\030\002 \001(\0132\035.hbase.pb.NamespaceDescriptor\"",
"\344\001\n\030AddColumnFamilyStateData\022,\n\tuser_inf" +
"o\030\001 \002(\0132\031.hbase.pb.UserInformation\022\'\n\nta" +
"ble_name\030\002 \002(\0132\023.hbase.pb.TableName\0229\n\023c" +
"olumnfamily_schema\030\003 \002(\0132\034.hbase.pb.Colu" +
"mnFamilySchema\0226\n\027unmodified_table_schem" +
"a\030\004 \001(\0132\025.hbase.pb.TableSchema\"\347\001\n\033Modif" +
"yColumnFamilyStateData\022,\n\tuser_info\030\001 \002(" +
"\0132\031.hbase.pb.UserInformation\022\'\n\ntable_na" +
"me\030\002 \002(\0132\023.hbase.pb.TableName\0229\n\023columnf" +
"amily_schema\030\003 \002(\0132\034.hbase.pb.ColumnFami",
"lySchema\0226\n\027unmodified_table_schema\030\004 \001(" +
"\0132\025.hbase.pb.TableSchema\"\307\001\n\033DeleteColum" +
"nFamilyStateData\022,\n\tuser_info\030\001 \002(\0132\031.hb" +
"ase.pb.UserInformation\022\'\n\ntable_name\030\002 \002" +
"(\0132\023.hbase.pb.TableName\022\031\n\021columnfamily_" +
"name\030\003 \002(\014\0226\n\027unmodified_table_schema\030\004 " +
"\001(\0132\025.hbase.pb.TableSchema\"\215\001\n\024EnableTab" +
"leStateData\022,\n\tuser_info\030\001 \002(\0132\031.hbase.p" +
"b.UserInformation\022\'\n\ntable_name\030\002 \002(\0132\023." +
"hbase.pb.TableName\022\036\n\026skip_table_state_c",
"heck\030\003 \002(\010\"\216\001\n\025DisableTableStateData\022,\n\t" +
"user_info\030\001 \002(\0132\031.hbase.pb.UserInformati" +
"on\022\'\n\ntable_name\030\002 \002(\0132\023.hbase.pb.TableN" +
"ame\022\036\n\026skip_table_state_check\030\003 \002(\010\"u\n\037R" +
"estoreParentToChildRegionsPair\022\032\n\022parent" +
"_region_name\030\001 \002(\t\022\032\n\022child1_region_name" +
"\030\002 \002(\t\022\032\n\022child2_region_name\030\003 \002(\t\"\245\002\n\026C" +
"loneSnapshotStateData\022,\n\tuser_info\030\001 \002(\013" +
"2\031.hbase.pb.UserInformation\022/\n\010snapshot\030" +
"\002 \002(\0132\035.hbase.pb.SnapshotDescription\022+\n\014",
"table_schema\030\003 \002(\0132\025.hbase.pb.TableSchem" +
"a\022)\n\013region_info\030\004 \003(\0132\024.hbase.pb.Region" +
"Info\022T\n!parent_to_child_regions_pair_lis" +
"t\030\005 \003(\0132).hbase.pb.RestoreParentToChildR" +
"egionsPair\"\245\003\n\030RestoreSnapshotStateData\022" +
",\n\tuser_info\030\001 \002(\0132\031.hbase.pb.UserInform" +
"ation\022/\n\010snapshot\030\002 \002(\0132\035.hbase.pb.Snaps" +
"hotDescription\0224\n\025modified_table_schema\030" +
"\003 \002(\0132\025.hbase.pb.TableSchema\0225\n\027region_i" +
"nfo_for_restore\030\004 \003(\0132\024.hbase.pb.RegionI",
"nfo\0224\n\026region_info_for_remove\030\005 \003(\0132\024.hb" +
"ase.pb.RegionInfo\0221\n\023region_info_for_add" +
"\030\006 \003(\0132\024.hbase.pb.RegionInfo\022T\n!parent_t" +
"o_child_regions_pair_list\030\007 \003(\0132).hbase." +
"pb.RestoreParentToChildRegionsPair\"\300\001\n\032M" +
"ergeTableRegionsStateData\022,\n\tuser_info\030\001" +
" \002(\0132\031.hbase.pb.UserInformation\022)\n\013regio" +
"n_info\030\002 \003(\0132\024.hbase.pb.RegionInfo\0220\n\022me" +
"rged_region_info\030\003 \002(\0132\024.hbase.pb.Region" +
"Info\022\027\n\010forcible\030\004 \001(\010:\005false\"\254\001\n\031SplitT",
"ableRegionStateData\022,\n\tuser_info\030\001 \002(\0132\031" +
".hbase.pb.UserInformation\0220\n\022parent_regi" +
"on_info\030\002 \002(\0132\024.hbase.pb.RegionInfo\022/\n\021c" +
"hild_region_info\030\003 \003(\0132\024.hbase.pb.Region" +
"Info\"\201\002\n\024ServerCrashStateData\022)\n\013server_" +
"name\030\001 \002(\0132\024.hbase.pb.ServerName\022\036\n\026dist" +
"ributed_log_replay\030\002 \001(\010\0227\n\031regions_on_c" +
"rashed_server\030\003 \003(\0132\024.hbase.pb.RegionInf" +
"o\022.\n\020regions_assigned\030\004 \003(\0132\024.hbase.pb.R" +
"egionInfo\022\025\n\rcarrying_meta\030\005 \001(\010\022\036\n\020shou",
"ld_split_wal\030\006 \001(\010:\004true*\330\001\n\020CreateTable" +
"State\022\036\n\032CREATE_TABLE_PRE_OPERATION\020\001\022 \n" +
"\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022\034\n\030CREAT" +
"E_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_TABLE_AS" +
"SIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_UPDATE_DE" +
"SC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_OPERATIO" +
"N\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODIFY_TABLE" +
"_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_OPERATION" +
"\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_DESCRIPT" +
"OR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPLICA_COLU",
"MN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_LAYOUT\020\005\022" +
"\037\n\033MODIFY_TABLE_POST_OPERATION\020\006\022#\n\037MODI" +
"FY_TABLE_REOPEN_ALL_REGIONS\020\007*\212\002\n\022Trunca" +
"teTableState\022 \n\034TRUNCATE_TABLE_PRE_OPERA" +
"TION\020\001\022#\n\037TRUNCATE_TABLE_REMOVE_FROM_MET" +
"A\020\002\022\"\n\036TRUNCATE_TABLE_CLEAR_FS_LAYOUT\020\003\022" +
"#\n\037TRUNCATE_TABLE_CREATE_FS_LAYOUT\020\004\022\036\n\032" +
"TRUNCATE_TABLE_ADD_TO_META\020\005\022!\n\035TRUNCATE" +
"_TABLE_ASSIGN_REGIONS\020\006\022!\n\035TRUNCATE_TABL" +
"E_POST_OPERATION\020\007*\337\001\n\020DeleteTableState\022",
"\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022!\n\035DELET" +
"E_TABLE_REMOVE_FROM_META\020\002\022 \n\034DELETE_TAB" +
"LE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_TABLE_UPD" +
"ATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE_UNASSIG" +
"N_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST_OPERATI" +
"ON\020\006*\320\001\n\024CreateNamespaceState\022\034\n\030CREATE_" +
"NAMESPACE_PREPARE\020\001\022%\n!CREATE_NAMESPACE_" +
"CREATE_DIRECTORY\020\002\022)\n%CREATE_NAMESPACE_I" +
"NSERT_INTO_NS_TABLE\020\003\022\036\n\032CREATE_NAMESPAC" +
"E_UPDATE_ZK\020\004\022(\n$CREATE_NAMESPACE_SET_NA",
"MESPACE_QUOTA\020\005*z\n\024ModifyNamespaceState\022" +
"\034\n\030MODIFY_NAMESPACE_PREPARE\020\001\022$\n MODIFY_" +
"NAMESPACE_UPDATE_NS_TABLE\020\002\022\036\n\032MODIFY_NA" +
"MESPACE_UPDATE_ZK\020\003*\332\001\n\024DeleteNamespaceS" +
"tate\022\034\n\030DELETE_NAMESPACE_PREPARE\020\001\022)\n%DE" +
"LETE_NAMESPACE_DELETE_FROM_NS_TABLE\020\002\022#\n" +
"\037DELETE_NAMESPACE_REMOVE_FROM_ZK\020\003\022\'\n#DE" +
"LETE_NAMESPACE_DELETE_DIRECTORIES\020\004\022+\n\'D" +
"ELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA\020\005" +
"*\331\001\n\024AddColumnFamilyState\022\035\n\031ADD_COLUMN_",
"FAMILY_PREPARE\020\001\022#\n\037ADD_COLUMN_FAMILY_PR" +
"E_OPERATION\020\002\022-\n)ADD_COLUMN_FAMILY_UPDAT" +
"E_TABLE_DESCRIPTOR\020\003\022$\n ADD_COLUMN_FAMIL" +
"Y_POST_OPERATION\020\004\022(\n$ADD_COLUMN_FAMILY_" +
"REOPEN_ALL_REGIONS\020\005*\353\001\n\027ModifyColumnFam" +
"ilyState\022 \n\034MODIFY_COLUMN_FAMILY_PREPARE" +
"\020\001\022&\n\"MODIFY_COLUMN_FAMILY_PRE_OPERATION" +
"\020\002\0220\n,MODIFY_COLUMN_FAMILY_UPDATE_TABLE_" +
"DESCRIPTOR\020\003\022\'\n#MODIFY_COLUMN_FAMILY_POS" +
"T_OPERATION\020\004\022+\n\'MODIFY_COLUMN_FAMILY_RE",
"OPEN_ALL_REGIONS\020\005*\226\002\n\027DeleteColumnFamil" +
"yState\022 \n\034DELETE_COLUMN_FAMILY_PREPARE\020\001" +
"\022&\n\"DELETE_COLUMN_FAMILY_PRE_OPERATION\020\002" +
"\0220\n,DELETE_COLUMN_FAMILY_UPDATE_TABLE_DE" +
"SCRIPTOR\020\003\022)\n%DELETE_COLUMN_FAMILY_DELET" +
"E_FS_LAYOUT\020\004\022\'\n#DELETE_COLUMN_FAMILY_PO" +
"ST_OPERATION\020\005\022+\n\'DELETE_COLUMN_FAMILY_R" +
"EOPEN_ALL_REGIONS\020\006*\350\001\n\020EnableTableState" +
"\022\030\n\024ENABLE_TABLE_PREPARE\020\001\022\036\n\032ENABLE_TAB" +
"LE_PRE_OPERATION\020\002\022)\n%ENABLE_TABLE_SET_E",
"NABLING_TABLE_STATE\020\003\022$\n ENABLE_TABLE_MA" +
"RK_REGIONS_ONLINE\020\004\022(\n$ENABLE_TABLE_SET_" +
"ENABLED_TABLE_STATE\020\005\022\037\n\033ENABLE_TABLE_PO" +
"ST_OPERATION\020\006*\362\001\n\021DisableTableState\022\031\n\025" +
"DISABLE_TABLE_PREPARE\020\001\022\037\n\033DISABLE_TABLE" +
"_PRE_OPERATION\020\002\022+\n\'DISABLE_TABLE_SET_DI" +
"SABLING_TABLE_STATE\020\003\022&\n\"DISABLE_TABLE_M" +
"ARK_REGIONS_OFFLINE\020\004\022*\n&DISABLE_TABLE_S" +
"ET_DISABLED_TABLE_STATE\020\005\022 \n\034DISABLE_TAB" +
"LE_POST_OPERATION\020\006*\206\002\n\022CloneSnapshotSta",
"te\022 \n\034CLONE_SNAPSHOT_PRE_OPERATION\020\001\022\"\n\036" +
"CLONE_SNAPSHOT_WRITE_FS_LAYOUT\020\002\022\036\n\032CLON" +
"E_SNAPSHOT_ADD_TO_META\020\003\022!\n\035CLONE_SNAPSH" +
"OT_ASSIGN_REGIONS\020\004\022$\n CLONE_SNAPSHOT_UP" +
"DATE_DESC_CACHE\020\005\022!\n\035CLONE_SNAPSHOT_POST" +
"_OPERATION\020\006\022\036\n\032CLONE_SNAPHOST_RESTORE_A" +
"CL\020\007*\322\001\n\024RestoreSnapshotState\022\"\n\036RESTORE" +
"_SNAPSHOT_PRE_OPERATION\020\001\022,\n(RESTORE_SNA" +
"PSHOT_UPDATE_TABLE_DESCRIPTOR\020\002\022$\n RESTO" +
"RE_SNAPSHOT_WRITE_FS_LAYOUT\020\003\022 \n\034RESTORE",
"_SNAPSHOT_UPDATE_META\020\004\022 \n\034RESTORE_SNAPS" +
"HOT_RESTORE_ACL\020\005*\376\003\n\026MergeTableRegionsS" +
"tate\022\037\n\033MERGE_TABLE_REGIONS_PREPARE\020\001\022.\n" +
"*MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME" +
"_RS\020\002\022+\n\'MERGE_TABLE_REGIONS_PRE_MERGE_O" +
"PERATION\020\003\022/\n+MERGE_TABLE_REGIONS_SET_ME" +
"RGING_TABLE_STATE\020\004\022%\n!MERGE_TABLE_REGIO" +
"NS_CLOSE_REGIONS\020\005\022,\n(MERGE_TABLE_REGION" +
"S_CREATE_MERGED_REGION\020\006\0222\n.MERGE_TABLE_" +
"REGIONS_PRE_MERGE_COMMIT_OPERATION\020\007\022#\n\037",
"MERGE_TABLE_REGIONS_UPDATE_META\020\010\0223\n/MER" +
"GE_TABLE_REGIONS_POST_MERGE_COMMIT_OPERA" +
"TION\020\t\022*\n&MERGE_TABLE_REGIONS_OPEN_MERGE" +
"D_REGION\020\n\022&\n\"MERGE_TABLE_REGIONS_POST_O" +
"PERATION\020\013*\304\003\n\025SplitTableRegionState\022\036\n\032" +
"SPLIT_TABLE_REGION_PREPARE\020\001\022$\n SPLIT_TA" +
"BLE_REGION_PRE_OPERATION\020\002\0220\n,SPLIT_TABL" +
"E_REGION_SET_SPLITTING_TABLE_STATE\020\003\022*\n&" +
"SPLIT_TABLE_REGION_CLOSE_PARENT_REGION\020\004" +
"\022.\n*SPLIT_TABLE_REGION_CREATE_DAUGHTER_R",
"EGIONS\020\005\0220\n,SPLIT_TABLE_REGION_PRE_OPERA" +
"TION_BEFORE_PONR\020\006\022\"\n\036SPLIT_TABLE_REGION" +
"_UPDATE_META\020\007\022/\n+SPLIT_TABLE_REGION_PRE" +
"_OPERATION_AFTER_PONR\020\010\022)\n%SPLIT_TABLE_R" +
"EGION_OPEN_CHILD_REGIONS\020\t\022%\n!SPLIT_TABL" +
"E_REGION_POST_OPERATION\020\n*\234\002\n\020ServerCras" +
"hState\022\026\n\022SERVER_CRASH_START\020\001\022\035\n\031SERVER" +
"_CRASH_PROCESS_META\020\002\022\034\n\030SERVER_CRASH_GE" +
"T_REGIONS\020\003\022\036\n\032SERVER_CRASH_NO_SPLIT_LOG" +
"S\020\004\022\033\n\027SERVER_CRASH_SPLIT_LOGS\020\005\022#\n\037SERV",
"ER_CRASH_PREPARE_LOG_REPLAY\020\006\022\027\n\023SERVER_" +
"CRASH_ASSIGN\020\010\022\037\n\033SERVER_CRASH_WAIT_ON_A" +
"SSIGN\020\t\022\027\n\023SERVER_CRASH_FINISH\020dBR\n1org." +
"apache.hadoop.hbase.shaded.protobuf.gene" +
"ratedB\025MasterProcedureProtosH\001\210\001\001\240\001\001"
};
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor. InternalDescriptorAssigner() {
@ -24274,6 +24294,7 @@ public final class MasterProcedureProtos {
new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(),
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.getDescriptor(),
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.getDescriptor(),
}, assigner);
internal_static_hbase_pb_CreateTableStateData_descriptor =
getDescriptor().getMessageTypes().get(0);
@ -24385,6 +24406,7 @@ public final class MasterProcedureProtos {
new java.lang.String[] { "ServerName", "DistributedLogReplay", "RegionsOnCrashedServer", "RegionsAssigned", "CarryingMeta", "ShouldSplitWal", });
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor();
org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.getDescriptor();
org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)

View File

@ -0,0 +1,130 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hbase.pb;
option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
option java_outer_classname = "AccessControlProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
/**
* Messages and services in shaded AccessControl.proto only use for serializing/deserializing permissions
* in .snapshotinfo, and should not use for access control logic for coprocessor endpoints compatibility
* (use AccessControl.proto under hbase-protocol module instead).
*/
message Permission {
enum Action {
READ = 0;
WRITE = 1;
EXEC = 2;
CREATE = 3;
ADMIN = 4;
}
enum Type {
Global = 1;
Namespace = 2;
Table = 3;
}
required Type type = 1;
optional GlobalPermission global_permission = 2;
optional NamespacePermission namespace_permission = 3;
optional TablePermission table_permission = 4;
}
message TablePermission {
optional TableName table_name = 1;
optional bytes family = 2;
optional bytes qualifier = 3;
repeated Permission.Action action = 4;
}
message NamespacePermission {
optional bytes namespace_name = 1;
repeated Permission.Action action = 2;
}
message GlobalPermission {
repeated Permission.Action action = 1;
}
message UserPermission {
required bytes user = 1;
required Permission permission = 3;
}
/**
* Content of the /hbase/acl/<table or namespace> znode.
*/
message UsersAndPermissions {
message UserPermissions {
required bytes user = 1;
repeated Permission permissions = 2;
}
repeated UserPermissions user_permissions = 1;
}
message GrantRequest {
required UserPermission user_permission = 1;
optional bool merge_existing_permissions = 2 [default = false];
}
message GrantResponse {
}
message RevokeRequest {
required UserPermission user_permission = 1;
}
message RevokeResponse {
}
message GetUserPermissionsRequest {
optional Permission.Type type = 1;
optional TableName table_name = 2;
optional bytes namespace_name = 3;
}
message GetUserPermissionsResponse {
repeated UserPermission user_permission = 1;
}
message CheckPermissionsRequest {
repeated Permission permission = 1;
}
message CheckPermissionsResponse {
}
service AccessControlService {
rpc Grant(GrantRequest)
returns (GrantResponse);
rpc Revoke(RevokeRequest)
returns (RevokeResponse);
rpc GetUserPermissions(GetUserPermissionsRequest)
returns (GetUserPermissionsResponse);
rpc CheckPermissions(CheckPermissionsRequest)
returns (CheckPermissionsResponse);
}

View File

@ -169,22 +169,7 @@ message NameInt64Pair {
optional int64 value = 2;
}
/**
* Description of the snapshot to take
*/
message SnapshotDescription {
required string name = 1;
optional string table = 2; // not needed for delete, but checked for in taking snapshot
optional int64 creation_time = 3 [default = 0];
enum Type {
DISABLED = 0;
FLUSH = 1;
SKIPFLUSH = 2;
}
optional Type type = 4 [default = FLUSH];
optional int32 version = 5;
optional string owner = 6;
}
/**
* Description of the distributed procedure to take

View File

@ -34,6 +34,7 @@ import "LockService.proto";
import "Procedure.proto";
import "Quota.proto";
import "Replication.proto";
import "Snapshot.proto";
/* Column-level protobufs */
@ -405,6 +406,7 @@ message RestoreSnapshotRequest {
required SnapshotDescription snapshot = 1;
optional uint64 nonce_group = 2 [default = 0];
optional uint64 nonce = 3 [default = 0];
optional bool restoreACL = 4 [default = false];
}
message RestoreSnapshotResponse {

View File

@ -25,6 +25,7 @@ option optimize_for = SPEED;
import "HBase.proto";
import "RPC.proto";
import "Snapshot.proto";
// ============================================================================
// WARNING - Compatibility rules
@ -235,6 +236,7 @@ enum CloneSnapshotState {
CLONE_SNAPSHOT_ASSIGN_REGIONS = 4;
CLONE_SNAPSHOT_UPDATE_DESC_CACHE = 5;
CLONE_SNAPSHOT_POST_OPERATION = 6;
CLONE_SNAPHOST_RESTORE_ACL = 7;
}
message CloneSnapshotStateData {
@ -250,6 +252,7 @@ enum RestoreSnapshotState {
RESTORE_SNAPSHOT_UPDATE_TABLE_DESCRIPTOR = 2;
RESTORE_SNAPSHOT_WRITE_FS_LAYOUT = 3;
RESTORE_SNAPSHOT_UPDATE_META = 4;
RESTORE_SNAPSHOT_RESTORE_ACL = 5;
}
message RestoreSnapshotStateData {

View File

@ -23,9 +23,28 @@ option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "AccessControl.proto";
import "FS.proto";
import "HBase.proto";
/**
* Description of the snapshot to take
*/
message SnapshotDescription {
required string name = 1;
optional string table = 2; // not needed for delete, but checked for in taking snapshot
optional int64 creation_time = 3 [default = 0];
enum Type {
DISABLED = 0;
FLUSH = 1;
SKIPFLUSH = 2;
}
optional Type type = 4 [default = FLUSH];
optional int32 version = 5;
optional string owner = 6;
optional UsersAndPermissions users_and_permissions = 7;
}
message SnapshotFileInfo {
enum Type {
HFILE = 1;

View File

@ -67,7 +67,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesR
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@InterfaceAudience.Private
public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService {
@ -324,7 +324,7 @@ public class RSGroupAdminEndpoint implements MasterObserver, CoprocessorService
@Override
public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns) throws IOException {
NamespaceDescriptor ns) throws IOException {
preCreateNamespace(ctx, ns);
}

View File

@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.Bytes;
@ -83,7 +83,7 @@ public class RestoreTool {
/**
* return value represent path for:
* ".../user/biadmin/backup1/default/t1_dn/backup_1396650096738/archive/data/default/t1_dn"
* @param tabelName table name
* @param tableName table name
* @return path to table archive
* @throws IOException exception
*/

View File

@ -44,8 +44,8 @@ import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
/**

View File

@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos.TableSnapshotRegionSplit;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;

View File

@ -153,7 +153,7 @@ import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
@ -2309,7 +2309,7 @@ public class HMaster extends HRegionServer implements MasterServices {
}
public long restoreSnapshot(final SnapshotDescription snapshotDesc,
final long nonceGroup, final long nonce) throws IOException {
final long nonceGroup, final long nonce, final boolean restoreAcl) throws IOException {
checkInitialized();
getSnapshotManager().checkSnapshotSupport();
@ -2321,7 +2321,8 @@ public class HMaster extends HRegionServer implements MasterServices {
new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
@Override
protected void run() throws IOException {
setProcId(getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey()));
setProcId(
getSnapshotManager().restoreOrCloneSnapshot(snapshotDesc, getNonceKey(), restoreAcl));
}
@Override

View File

@ -54,8 +54,8 @@ import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
/**
* Provides the coprocessor framework and environment for master oriented

View File

@ -80,7 +80,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
@ -116,6 +115,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Remov
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.util.Bytes;
@ -1221,8 +1221,8 @@ public class MasterRpcServices extends RSRpcServices
public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
RestoreSnapshotRequest request) throws ServiceException {
try {
long procId = master.restoreSnapshot(request.getSnapshot(),
request.getNonceGroup(), request.getNonce());
long procId = master.restoreSnapshot(request.getSnapshot(), request.getNonceGroup(),
request.getNonce(), request.getRestoreACL());
return RestoreSnapshotResponse.newBuilder().setProcId(procId).build();
} catch (ForeignException e) {
throw new ServiceException(e.getCause());

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.hbase.master;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
/**
* Watch the current snapshot under process

View File

@ -48,9 +48,9 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
@ -69,6 +69,7 @@ public class CloneSnapshotProcedure
private HTableDescriptor hTableDescriptor;
private SnapshotDescription snapshot;
private boolean restoreAcl;
private List<HRegionInfo> newRegions = null;
private Map<String, Pair<String, String> > parentsToChildrenPairMap = new HashMap<>();
@ -83,6 +84,11 @@ public class CloneSnapshotProcedure
public CloneSnapshotProcedure() {
}
public CloneSnapshotProcedure(final MasterProcedureEnv env,
final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
this(env, hTableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
@ -90,10 +96,12 @@ public class CloneSnapshotProcedure
* @param snapshot snapshot to clone from
*/
public CloneSnapshotProcedure(final MasterProcedureEnv env,
final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
this.hTableDescriptor = hTableDescriptor;
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
getMonitorStatus();
}
@ -109,6 +117,14 @@ public class CloneSnapshotProcedure
return monitorStatus;
}
private void restoreSnapshotAcl(MasterProcedureEnv env) throws IOException {
Configuration conf = env.getMasterServices().getConfiguration();
if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils.isSecurityAvailable(conf)) {
RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, hTableDescriptor.getTableName(), conf);
}
}
@Override
protected Flow executeFromState(final MasterProcedureEnv env, final CloneSnapshotState state)
throws InterruptedException {
@ -138,6 +154,10 @@ public class CloneSnapshotProcedure
break;
case CLONE_SNAPSHOT_UPDATE_DESC_CACHE:
CreateTableProcedure.updateTableDescCache(env, getTableName());
setNextState(CloneSnapshotState.CLONE_SNAPHOST_RESTORE_ACL);
break;
case CLONE_SNAPHOST_RESTORE_ACL:
restoreSnapshotAcl(env);
setNextState(CloneSnapshotState.CLONE_SNAPSHOT_POST_OPERATION);
break;
case CLONE_SNAPSHOT_POST_OPERATION:

View File

@ -48,9 +48,9 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
@ -69,6 +69,7 @@ public class RestoreSnapshotProcedure
private Map<String, Pair<String, String>> parentsToChildrenPairMap = new HashMap<>();
private SnapshotDescription snapshot;
private boolean restoreAcl;
// Monitor
private MonitoredTask monitorStatus = null;
@ -81,6 +82,10 @@ public class RestoreSnapshotProcedure
public RestoreSnapshotProcedure() {
}
public RestoreSnapshotProcedure(final MasterProcedureEnv env,
final HTableDescriptor hTableDescriptor, final SnapshotDescription snapshot) {
this(env, hTableDescriptor, snapshot, false);
}
/**
* Constructor
* @param env MasterProcedureEnv
@ -91,12 +96,14 @@ public class RestoreSnapshotProcedure
public RestoreSnapshotProcedure(
final MasterProcedureEnv env,
final HTableDescriptor hTableDescriptor,
final SnapshotDescription snapshot) {
final SnapshotDescription snapshot,
final boolean restoreAcl) {
super(env);
// This is the new schema we are going to write out as this modification.
this.modifiedHTableDescriptor = hTableDescriptor;
// Snapshot information
this.snapshot = snapshot;
this.restoreAcl = restoreAcl;
// Monitor
getMonitorStatus();
@ -140,6 +147,10 @@ public class RestoreSnapshotProcedure
break;
case RESTORE_SNAPSHOT_UPDATE_META:
updateMETA(env);
setNextState(RestoreSnapshotState.RESTORE_SNAPSHOT_RESTORE_ACL);
break;
case RESTORE_SNAPSHOT_RESTORE_ACL:
restoreSnapshotAcl(env);
return Flow.NO_MORE_STATE;
default:
throw new UnsupportedOperationException("unhandled state=" + state);
@ -474,6 +485,16 @@ public class RestoreSnapshotProcedure
monitorStatus.getCompletionTimestamp() - monitorStatus.getStartTime());
}
private void restoreSnapshotAcl(final MasterProcedureEnv env) throws IOException {
if (restoreAcl && snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null
&& SnapshotDescriptionUtils
.isSecurityAvailable(env.getMasterServices().getConfiguration())) {
// restore acl of snapshot to table.
RestoreSnapshotHelper.restoreSnapshotAcl(snapshot, TableName.valueOf(snapshot.getTable()),
env.getMasterServices().getConfiguration());
}
}
/**
* Make sure that region states of the region list is in OFFLINE state.
* @param env MasterProcedureEnv

View File

@ -33,7 +33,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.FSUtils;

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.procedure.Procedure;
import org.apache.hadoop.hbase.procedure.ProcedureCoordinator;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.util.Pair;

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;

View File

@ -67,10 +67,10 @@ import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription.Type;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
@ -680,7 +680,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
*/
private long cloneSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
final NonceKey nonceKey) throws IOException {
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
if (cpHost != null) {
@ -688,7 +688,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
long procId;
try {
procId = cloneSnapshot(snapshot, htd, nonceKey);
procId = cloneSnapshot(snapshot, htd, nonceKey, restoreAcl);
} catch (IOException e) {
LOG.error("Exception occurred while cloning the snapshot " + snapshot.getName()
+ " as table " + tableName.getNameAsString(), e);
@ -712,7 +712,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @return procId the ID of the clone snapshot procedure
*/
synchronized long cloneSnapshot(final SnapshotDescription snapshot,
final HTableDescriptor hTableDescriptor, final NonceKey nonceKey)
final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
TableName tableName = hTableDescriptor.getTableName();
@ -728,8 +728,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new CloneSnapshotProcedure(
master.getMasterProcedureExecutor().getEnvironment(), hTableDescriptor, snapshot),
new CloneSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
hTableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;
@ -747,8 +747,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param nonceKey unique identifier to prevent duplicated RPC
* @throws IOException
*/
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey)
throws IOException {
public long restoreOrCloneSnapshot(final SnapshotDescription reqSnapshot, final NonceKey nonceKey,
final boolean restoreAcl) throws IOException {
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(reqSnapshot, rootDir);
@ -777,28 +777,30 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
// Execute the restore/clone operation
long procId;
if (MetaTableAccessor.tableExists(master.getConnection(), tableName)) {
procId = restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey);
procId = restoreSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey,
restoreAcl);
} else {
procId = cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey);
procId =
cloneSnapshot(reqSnapshot, tableName, snapshot, snapshotTableDesc, nonceKey, restoreAcl);
}
return procId;
}
/**
* Restore the specified snapshot.
* The restore will fail if the destination table has a snapshot or restore in progress.
*
* Restore the specified snapshot. The restore will fail if the destination table has a snapshot
* or restore in progress.
* @param reqSnapshot Snapshot Descriptor from request
* @param tableName table to restore
* @param snapshot Snapshot Descriptor
* @param snapshotTableDesc Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @param restoreAcl true to restore acl of snapshot
* @return procId the ID of the restore snapshot procedure
* @throws IOException
*/
private long restoreSnapshot(final SnapshotDescription reqSnapshot, final TableName tableName,
final SnapshotDescription snapshot, final HTableDescriptor snapshotTableDesc,
final NonceKey nonceKey) throws IOException {
final NonceKey nonceKey, final boolean restoreAcl) throws IOException {
MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();
if (master.getTableStateManager().isTableState(
@ -815,7 +817,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
long procId;
try {
procId = restoreSnapshot(snapshot, snapshotTableDesc, nonceKey);
procId = restoreSnapshot(snapshot, snapshotTableDesc, nonceKey, restoreAcl);
} catch (IOException e) {
LOG.error("Exception occurred while restoring the snapshot " + snapshot.getName()
+ " as table " + tableName.getNameAsString(), e);
@ -831,16 +833,16 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
/**
* Restore the specified snapshot.
* The restore will fail if the destination table has a snapshot or restore in progress.
*
* Restore the specified snapshot. The restore will fail if the destination table has a snapshot
* or restore in progress.
* @param snapshot Snapshot Descriptor
* @param hTableDescriptor Table Descriptor
* @param nonceKey unique identifier to prevent duplicated RPC
* @param restoreAcl true to restore acl of snapshot
* @return procId the ID of the restore snapshot procedure
*/
private synchronized long restoreSnapshot(final SnapshotDescription snapshot,
final HTableDescriptor hTableDescriptor, final NonceKey nonceKey)
final HTableDescriptor hTableDescriptor, final NonceKey nonceKey, final boolean restoreAcl)
throws HBaseSnapshotException {
final TableName tableName = hTableDescriptor.getTableName();
@ -856,8 +858,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
try {
long procId = master.getMasterProcedureExecutor().submitProcedure(
new RestoreSnapshotProcedure(
master.getMasterProcedureExecutor().getEnvironment(), hTableDescriptor, snapshot),
new RestoreSnapshotProcedure(master.getMasterProcedureExecutor().getEnvironment(),
hTableDescriptor, snapshot, restoreAcl),
nonceKey);
this.restoreTableToProcIdMap.put(tableName, procId);
return procId;

View File

@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;

View File

@ -158,7 +158,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceCall;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.StoreSequenceId;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;

View File

@ -28,10 +28,10 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.procedure.ProcedureMember;
import org.apache.hadoop.hbase.procedure.Subprocedure;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager.SnapshotSubprocedurePool;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
/**

View File

@ -51,10 +51,10 @@ import org.apache.hadoop.hbase.procedure.RegionServerProcedureManager;
import org.apache.hadoop.hbase.procedure.Subprocedure;
import org.apache.hadoop.hbase.procedure.SubprocedureFactory;
import org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
import org.apache.zookeeper.KeeperException;

View File

@ -461,7 +461,7 @@ public class AccessControlLists {
return allPerms;
}
static ListMultimap<String, TablePermission> getTablePermissions(Configuration conf,
public static ListMultimap<String, TablePermission> getTablePermissions(Configuration conf,
TableName tableName) throws IOException {
return getPermissions(conf, tableName != null ? tableName.getName() : null, null);
}

View File

@ -113,8 +113,8 @@ import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.util.ByteRange;
import org.apache.hadoop.hbase.util.Bytes;

View File

@ -53,7 +53,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.WALLink;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;

View File

@ -29,10 +29,12 @@ import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ThreadPoolExecutor;
import com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
@ -46,13 +48,17 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.security.access.AccessControlClient;
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
import org.apache.hadoop.hbase.security.access.TablePermission;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
@ -825,4 +831,25 @@ public class RestoreSnapshotHelper {
}
return metaChanges;
}
public static void restoreSnapshotAcl(SnapshotDescription snapshot, TableName newTableName,
Configuration conf) throws IOException {
if (snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null) {
LOG.info("Restore snapshot acl to table. snapshot: " + snapshot + ", table: " + newTableName);
ListMultimap<String, TablePermission> perms =
ShadedAccessControlUtil.toUserTablePermissions(snapshot.getUsersAndPermissions());
try (Connection conn = ConnectionFactory.createConnection(conf)) {
for (Entry<String, TablePermission> e : perms.entries()) {
String user = e.getKey();
TablePermission perm = e.getValue();
perm.setTableName(newTableName);
AccessControlClient.grant(conn, perm.getTableName(), user, perm.getFamily(),
perm.getQualifier(), perm.getActions());
}
} catch (Throwable e) {
throw new IOException("Grant acl into newly creatd table failed. snapshot: " + snapshot
+ ", table: " + newTableName, e);
}
}
}
}

View File

@ -17,10 +17,11 @@
*/
package org.apache.hadoop.hbase.snapshot;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Collections;
import com.google.common.collect.ListMultimap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -30,11 +31,17 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.security.access.AccessControlLists;
import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
import org.apache.hadoop.hbase.security.access.TablePermission;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
@ -247,10 +254,10 @@ public final class SnapshotDescriptionUtils {
* {@link SnapshotDescription}.
*/
public static SnapshotDescription validate(SnapshotDescription snapshot, Configuration conf)
throws IllegalArgumentException {
throws IllegalArgumentException, IOException {
if (!snapshot.hasTable()) {
throw new IllegalArgumentException(
"Descriptor doesn't apply to a table, so we can't build it.");
"Descriptor doesn't apply to a table, so we can't build it.");
}
// set the creation time, if one hasn't been set
@ -263,6 +270,11 @@ public final class SnapshotDescriptionUtils {
builder.setCreationTime(time);
snapshot = builder.build();
}
// set the acl to snapshot if security feature is enabled.
if (isSecurityAvailable(conf)) {
snapshot = writeAclToSnapshotDescription(snapshot, conf);
}
return snapshot;
}
@ -366,4 +378,26 @@ public final class SnapshotDescriptionUtils {
if (!snapshot.hasOwner()) return false;
return snapshot.getOwner().equals(user.getShortName());
}
public static boolean isSecurityAvailable(Configuration conf) throws IOException {
try (Connection conn = ConnectionFactory.createConnection(conf)) {
try (Admin admin = conn.getAdmin()) {
return admin.tableExists(AccessControlLists.ACL_TABLE_NAME);
}
}
}
private static SnapshotDescription writeAclToSnapshotDescription(SnapshotDescription snapshot,
Configuration conf) throws IOException {
ListMultimap<String, TablePermission> perms =
User.runAsLoginUser(new PrivilegedExceptionAction<ListMultimap<String, TablePermission>>() {
@Override
public ListMultimap<String, TablePermission> run() throws Exception {
return AccessControlLists.getTablePermissions(conf,
TableName.valueOf(snapshot.getTable()));
}
});
return snapshot.toBuilder()
.setUsersAndPermissions(ShadedAccessControlUtil.toUserTablePermissions(perms)).build();
}
}

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.util.StringUtils;
@ -144,7 +145,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
private AtomicLong nonSharedHfilesArchiveSize = new AtomicLong();
private AtomicLong logSize = new AtomicLong();
private final HBaseProtos.SnapshotDescription snapshot;
private final SnapshotProtos.SnapshotDescription snapshot;
private final TableName snapshotTable;
private final Configuration conf;
private final FileSystem fs;
@ -159,7 +160,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
}
SnapshotStats(final Configuration conf, final FileSystem fs,
final HBaseProtos.SnapshotDescription snapshot) {
final SnapshotProtos.SnapshotDescription snapshot) {
this.snapshot = snapshot;
this.snapshotTable = TableName.valueOf(snapshot.getTable());
this.conf = conf;
@ -234,7 +235,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
* with other snapshots and tables
*
* This is only calculated when
* {@link #getSnapshotStats(Configuration, HBaseProtos.SnapshotDescription, Map)}
* {@link #getSnapshotStats(Configuration, SnapshotProtos.SnapshotDescription, Map)}
* is called with a non-null Map
*/
public long getNonSharedArchivedStoreFilesSize() {
@ -413,7 +414,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
return false;
}
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotProtos.SnapshotDescription snapshotDesc =
SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
snapshotManifest = SnapshotManifest.open(getConf(), fs, snapshotDir, snapshotDesc);
return true;
@ -423,7 +424,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
* Dump the {@link SnapshotDescription}
*/
private void printInfo() {
HBaseProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
SimpleDateFormat df = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss");
System.out.println("Snapshot Info");
System.out.println("----------------------------------------");
@ -457,7 +458,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
}
// Collect information about hfiles and logs in the snapshot
final HBaseProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
final SnapshotProtos.SnapshotDescription snapshotDesc = snapshotManifest.getSnapshotDescription();
final String table = snapshotDesc.getTable();
final SnapshotDescription desc = ProtobufUtil.createSnapshotDesc(snapshotDesc);
final SnapshotStats stats = new SnapshotStats(this.getConf(), this.fs, desc);
@ -552,7 +553,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
*/
public static SnapshotStats getSnapshotStats(final Configuration conf,
final SnapshotDescription snapshot) throws IOException {
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotProtos.SnapshotDescription snapshotDesc =
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot);
return getSnapshotStats(conf, snapshotDesc, null);
}
@ -565,7 +566,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
* @return the snapshot stats
*/
public static SnapshotStats getSnapshotStats(final Configuration conf,
final HBaseProtos.SnapshotDescription snapshotDesc,
final SnapshotProtos.SnapshotDescription snapshotDesc,
final Map<Path, Integer> filesMap) throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
FileSystem fs = FileSystem.get(rootDir.toUri(), conf);
@ -598,7 +599,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
List<SnapshotDescription> snapshotLists = new ArrayList<>(snapshots.length);
for (FileStatus snapshotDirStat: snapshots) {
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotProtos.SnapshotDescription snapshotDesc =
SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDirStat.getPath());
snapshotLists.add(ProtobufUtil.createSnapshotDesc(snapshotDesc));
}
@ -621,7 +622,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
final ConcurrentHashMap<Path, Integer> filesMap,
final AtomicLong uniqueHFilesArchiveSize, final AtomicLong uniqueHFilesSize,
final AtomicLong uniqueHFilesMobSize) throws IOException {
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotProtos.SnapshotDescription snapshotDesc =
ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot);
Path rootDir = FSUtils.getRootDir(conf);
final FileSystem fs = FileSystem.get(rootDir.toUri(), conf);

View File

@ -46,8 +46,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;

View File

@ -39,7 +39,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.FSUtils;
@ -127,7 +127,7 @@ public final class SnapshotManifestV2 {
}
static List<SnapshotRegionManifest> loadRegionManifests(final Configuration conf,
final Executor executor,final FileSystem fs, final Path snapshotDir,
final Executor executor, final FileSystem fs, final Path snapshotDir,
final SnapshotDescription desc, final int manifestSizeLimit) throws IOException {
FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() {
@Override

View File

@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;

View File

@ -26,10 +26,11 @@
import="org.apache.hadoop.fs.Path"
import="org.apache.hadoop.hbase.HBaseConfiguration"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription"
import="org.apache.hadoop.hbase.snapshot.SnapshotInfo"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.util.StringUtils" %>
import="org.apache.hadoop.util.StringUtils"
import="org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription"
%>
<%
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
Configuration conf = master.getConfiguration();

View File

@ -0,0 +1,240 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.AccessControlConstants;
import org.apache.hadoop.hbase.security.access.AccessController;
import org.apache.hadoop.hbase.security.access.Permission;
import org.apache.hadoop.hbase.security.access.SecureTestUtil;
import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.IOException;
@Category({ MediumTests.class, ClientTests.class })
public class TestSnapshotWithAcl extends SecureTestUtil {
public TableName TEST_TABLE = TableName.valueOf("TestSnapshotWithAcl");
private static final int ROW_COUNT = 30000;
private static byte[] TEST_FAMILY = Bytes.toBytes("f1");
private static byte[] TEST_QUALIFIER = Bytes.toBytes("cq");
private static byte[] TEST_ROW = Bytes.toBytes(0);
private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static Configuration conf;
private static HBaseAdmin admin = null;
// user is table owner. will have all permissions on table
private static User USER_OWNER;
// user with rw permissions on column family.
private static User USER_RW;
// user with read-only permissions
private static User USER_RO;
// user with none permissions
private static User USER_NONE;
static class AccessReadAction implements AccessTestAction {
private TableName tableName;
public AccessReadAction(TableName tableName) {
this.tableName = tableName;
}
@Override
public Object run() throws Exception {
Get g = new Get(TEST_ROW);
g.addFamily(TEST_FAMILY);
try (Connection conn = ConnectionFactory.createConnection(conf)) {
try (Table t = conn.getTable(tableName)) {
t.get(g);
}
}
return null;
}
};
static class AccessWriteAction implements AccessTestAction {
private TableName tableName;
public AccessWriteAction(TableName tableName) {
this.tableName = tableName;
}
@Override
public Object run() throws Exception {
Put p = new Put(TEST_ROW);
p.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(0));
try (Connection conn = ConnectionFactory.createConnection(conf)) {
try (Table t = conn.getTable(tableName)) {
t.put(p);
}
}
return null;
}
}
@BeforeClass
public static void setupBeforeClass() throws Exception {
conf = TEST_UTIL.getConfiguration();
// Enable security
enableSecurity(conf);
conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName());
// Verify enableSecurity sets up what we require
verifyConfiguration(conf);
// Enable EXEC permission checking
conf.setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true);
TEST_UTIL.startMiniCluster();
MasterCoprocessorHost cpHost =
TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterCoprocessorHost();
cpHost.load(AccessController.class, Coprocessor.PRIORITY_HIGHEST, conf);
USER_OWNER = User.createUserForTesting(conf, "owner", new String[0]);
USER_RW = User.createUserForTesting(conf, "rwuser", new String[0]);
USER_RO = User.createUserForTesting(conf, "rouser", new String[0]);
USER_NONE = User.createUserForTesting(conf, "usernone", new String[0]);
}
@Before
public void setUp() throws Exception {
admin = TEST_UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(TEST_TABLE);
HColumnDescriptor hcd = new HColumnDescriptor(TEST_FAMILY);
hcd.setMaxVersions(100);
htd.addFamily(hcd);
htd.setOwner(USER_OWNER);
admin.createTable(htd, new byte[][] { Bytes.toBytes("s") });
TEST_UTIL.waitTableEnabled(TEST_TABLE);
grantOnTable(TEST_UTIL, USER_RW.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ, Permission.Action.WRITE);
grantOnTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ);
}
private void loadData() throws IOException {
try (Connection conn = ConnectionFactory.createConnection(conf)) {
try (Table t = conn.getTable(TEST_TABLE)) {
for (int i = 0; i < ROW_COUNT; i++) {
Put put = new Put(Bytes.toBytes(i));
put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
t.put(put);
}
}
}
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TEST_UTIL.shutdownMiniCluster();
}
private void verifyRows(TableName tableName) throws IOException {
try (Connection conn = ConnectionFactory.createConnection(conf)) {
try (Table t = conn.getTable(tableName)) {
try (ResultScanner scanner = t.getScanner(new Scan())) {
Result result;
int rowCount = 0;
while ((result = scanner.next()) != null) {
byte[] value = result.getValue(TEST_FAMILY, TEST_QUALIFIER);
Assert.assertArrayEquals(value, Bytes.toBytes(rowCount++));
}
Assert.assertEquals(rowCount, ROW_COUNT);
}
}
}
}
@Test
public void testRestoreSnapshot() throws Exception {
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
loadData();
verifyRows(TEST_TABLE);
String snapshotName1 = "testSnapshot1";
admin.snapshot(snapshotName1, TEST_TABLE);
// clone snapshot with restoreAcl true.
TableName tableName1 = TableName.valueOf("tableName1");
admin.cloneSnapshot(snapshotName1, tableName1, true);
verifyRows(tableName1);
verifyAllowed(new AccessReadAction(tableName1), USER_OWNER, USER_RO, USER_RW);
verifyDenied(new AccessReadAction(tableName1), USER_NONE);
verifyAllowed(new AccessWriteAction(tableName1), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(tableName1), USER_RO, USER_NONE);
// clone snapshot with restoreAcl false.
TableName tableName2 = TableName.valueOf("tableName2");
admin.cloneSnapshot(snapshotName1, tableName2, false);
verifyRows(tableName2);
verifyAllowed(new AccessReadAction(tableName2), USER_OWNER);
verifyDenied(new AccessReadAction(tableName2), USER_NONE, USER_RO, USER_RW);
verifyAllowed(new AccessWriteAction(tableName2), USER_OWNER);
verifyDenied(new AccessWriteAction(tableName2), USER_RO, USER_RW, USER_NONE);
// remove read permission for USER_RO.
revokeFromTable(TEST_UTIL, USER_RO.getShortName(), TEST_TABLE, TEST_FAMILY, null,
Permission.Action.READ);
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
// restore snapshot with restoreAcl false.
admin.disableTable(TEST_TABLE);
admin.restoreSnapshot(snapshotName1, false, false);
admin.enableTable(TEST_TABLE);
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_RO, USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
// restore snapshot with restoreAcl true.
admin.disableTable(TEST_TABLE);
admin.restoreSnapshot(snapshotName1, false, true);
admin.enableTable(TEST_TABLE);
verifyAllowed(new AccessReadAction(TEST_TABLE), USER_OWNER, USER_RO, USER_RW);
verifyDenied(new AccessReadAction(TEST_TABLE), USER_NONE);
verifyAllowed(new AccessWriteAction(TEST_TABLE), USER_OWNER, USER_RW);
verifyDenied(new AccessWriteAction(TEST_TABLE), USER_RO, USER_NONE);
}
}

View File

@ -64,10 +64,10 @@ import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.Bytes;

View File

@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
@ -50,6 +49,7 @@ import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CloneSnapshotState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -50,7 +51,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase {
protected final byte[] CF = Bytes.toBytes("cf1");
private static HBaseProtos.SnapshotDescription snapshot = null;
private static SnapshotProtos.SnapshotDescription snapshot = null;
@After
@Override
@ -60,7 +61,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase {
snapshot = null;
}
private HBaseProtos.SnapshotDescription getSnapshot() throws Exception {
private SnapshotProtos.SnapshotDescription getSnapshot() throws Exception {
if (snapshot == null) {
final TableName snapshotTableName = TableName.valueOf("testCloneSnapshot");
long tid = System.currentTimeMillis();
@ -102,7 +103,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase {
final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF);
// take the snapshot
HBaseProtos.SnapshotDescription snapshotDesc = getSnapshot();
SnapshotProtos.SnapshotDescription snapshotDesc = getSnapshot();
long procId = ProcedureTestingUtility.submitAndWait(
procExec, new CloneSnapshotProcedure(procExec.getEnvironment(), htd, snapshotDesc));
@ -115,7 +116,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase {
@Test(timeout=60000)
public void testCloneSnapshotToSameTable() throws Exception {
// take the snapshot
HBaseProtos.SnapshotDescription snapshotDesc = getSnapshot();
SnapshotProtos.SnapshotDescription snapshotDesc = getSnapshot();
final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
final TableName clonedTableName = TableName.valueOf(snapshotDesc.getTable());
@ -137,7 +138,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase {
final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF);
// take the snapshot
HBaseProtos.SnapshotDescription snapshotDesc = getSnapshot();
SnapshotProtos.SnapshotDescription snapshotDesc = getSnapshot();
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
@ -161,7 +162,7 @@ public class TestCloneSnapshotProcedure extends TestTableDDLProcedureBase {
final HTableDescriptor htd = createHTableDescriptor(clonedTableName, CF);
// take the snapshot
HBaseProtos.SnapshotDescription snapshotDesc = getSnapshot();
SnapshotProtos.SnapshotDescription snapshotDesc = getSnapshot();
ProcedureTestingUtility.waitNoProcedureRunning(procExec);
ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RestoreSnapshotState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
@ -67,7 +68,7 @@ public class TestRestoreSnapshotProcedure extends TestTableDDLProcedureBase {
protected final int rowCountCF4 = 40;
protected final int rowCountCF1addition = 10;
private HBaseProtos.SnapshotDescription snapshot = null;
private SnapshotProtos.SnapshotDescription snapshot = null;
private HTableDescriptor snapshotHTD = null;
@Rule

View File

@ -167,7 +167,7 @@ public class SecureTestUtil {
* To indicate the action was not allowed, either throw an AccessDeniedException
* or return an empty list of KeyValues.
*/
static interface AccessTestAction extends PrivilegedExceptionAction<Object> { }
protected static interface AccessTestAction extends PrivilegedExceptionAction<Object> { }
/** This fails only in case of ADE or empty list for any of the actions. */
public static void verifyAllowed(User user, AccessTestAction... actions) throws Exception {

View File

@ -117,8 +117,8 @@ import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -131,7 +131,6 @@ import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import org.mockito.Mockito;
import com.google.protobuf.BlockingRpcChannel;
import com.google.protobuf.RpcCallback;

View File

@ -57,7 +57,6 @@ import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.CompareFilter;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.Region;
@ -67,6 +66,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.access.Permission.Action;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.SecurityTests;
import org.apache.hadoop.hbase.util.Bytes;

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
@ -127,7 +128,7 @@ public final class SnapshotTestingUtils {
* Make sure that there is only one snapshot returned from the master
*/
public static void assertOneSnapshotThatMatches(Admin admin,
HBaseProtos.SnapshotDescription snapshot) throws IOException {
SnapshotProtos.SnapshotDescription snapshot) throws IOException {
assertOneSnapshotThatMatches(admin, snapshot.getName(), TableName.valueOf(snapshot.getTable()));
}
@ -159,20 +160,19 @@ public final class SnapshotTestingUtils {
}
public static void confirmSnapshotValid(HBaseTestingUtility testUtil,
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family)
SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family)
throws IOException {
MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem();
confirmSnapshotValid(snapshotDescriptor, tableName, family,
mfs.getRootDir(), testUtil.getAdmin(), mfs.getFileSystem());
confirmSnapshotValid(snapshotDescriptor, tableName, family, mfs.getRootDir(),
testUtil.getAdmin(), mfs.getFileSystem());
}
/**
* Confirm that the snapshot contains references to all the files that should
* be in the snapshot.
*/
public static void confirmSnapshotValid(
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
public static void confirmSnapshotValid(SnapshotProtos.SnapshotDescription snapshotDescriptor,
TableName tableName, byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException {
ArrayList nonEmptyTestFamilies = new ArrayList(1);
nonEmptyTestFamilies.add(testFamily);
@ -184,7 +184,7 @@ public final class SnapshotTestingUtils {
* Confirm that the snapshot has no references files but only metadata.
*/
public static void confirmEmptySnapshotValid(
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
byte[] testFamily, Path rootDir, Admin admin, FileSystem fs)
throws IOException {
ArrayList emptyTestFamilies = new ArrayList(1);
@ -200,7 +200,7 @@ public final class SnapshotTestingUtils {
* by the MasterSnapshotVerifier, at the end of the snapshot operation.
*/
public static void confirmSnapshotValid(
HBaseProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
SnapshotProtos.SnapshotDescription snapshotDescriptor, TableName tableName,
List<byte[]> nonEmptyTestFamilies, List<byte[]> emptyTestFamilies,
Path rootDir, Admin admin, FileSystem fs) throws IOException {
final Configuration conf = admin.getConfiguration();
@ -210,7 +210,7 @@ public final class SnapshotTestingUtils {
snapshotDescriptor, rootDir);
assertTrue(fs.exists(snapshotDir));
HBaseProtos.SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotProtos.SnapshotDescription desc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
// Extract regions and families with store files
final Set<byte[]> snapshotFamilies = new TreeSet<>(Bytes.BYTES_COMPARATOR);
@ -272,7 +272,7 @@ public final class SnapshotTestingUtils {
* @throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException
*/
public static void waitForSnapshotToComplete(HMaster master,
HBaseProtos.SnapshotDescription snapshot, long sleep)
SnapshotProtos.SnapshotDescription snapshot, long sleep)
throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
final IsSnapshotDoneRequest request = IsSnapshotDoneRequest.newBuilder()
.setSnapshot(snapshot).build();
@ -426,7 +426,7 @@ public final class SnapshotTestingUtils {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName,
mfs.getRootDir());
HBaseProtos.SnapshotDescription snapshotDesc =
SnapshotProtos.SnapshotDescription snapshotDesc =
SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
final TableName table = TableName.valueOf(snapshotDesc.getTable());
@ -476,7 +476,7 @@ public final class SnapshotTestingUtils {
public static class SnapshotBuilder {
private final RegionData[] tableRegions;
private final HBaseProtos.SnapshotDescription desc;
private final SnapshotProtos.SnapshotDescription desc;
private final HTableDescriptor htd;
private final Configuration conf;
private final FileSystem fs;
@ -486,7 +486,7 @@ public final class SnapshotTestingUtils {
public SnapshotBuilder(final Configuration conf, final FileSystem fs,
final Path rootDir, final HTableDescriptor htd,
final HBaseProtos.SnapshotDescription desc, final RegionData[] tableRegions)
final SnapshotProtos.SnapshotDescription desc, final RegionData[] tableRegions)
throws IOException {
this.fs = fs;
this.conf = conf;
@ -503,7 +503,7 @@ public final class SnapshotTestingUtils {
return this.htd;
}
public HBaseProtos.SnapshotDescription getSnapshotDescription() {
public SnapshotProtos.SnapshotDescription getSnapshotDescription() {
return this.desc;
}
@ -527,7 +527,7 @@ public final class SnapshotTestingUtils {
.build());
}
private Path[] addRegion(final HBaseProtos.SnapshotDescription desc) throws IOException {
private Path[] addRegion(final SnapshotProtos.SnapshotDescription desc) throws IOException {
if (this.snapshotted == tableRegions.length) {
throw new UnsupportedOperationException("No more regions in the table");
}
@ -668,7 +668,7 @@ public final class SnapshotTestingUtils {
HTableDescriptor htd = createHtd(tableName);
RegionData[] regions = createTable(htd, numRegions);
HBaseProtos.SnapshotDescription desc = HBaseProtos.SnapshotDescription.newBuilder()
SnapshotProtos.SnapshotDescription desc = SnapshotProtos.SnapshotDescription.newBuilder()
.setTable(htd.getNameAsString())
.setName(snapshotName)
.setVersion(version)

View File

@ -43,7 +43,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.VerySlowMapReduceTests;

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.client.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes;
@ -274,9 +275,9 @@ public class TestFlushSnapshotFromClient {
@Test
public void testAsyncFlushSnapshot() throws Exception {
HBaseProtos.SnapshotDescription snapshot = HBaseProtos.SnapshotDescription.newBuilder()
SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder()
.setName("asyncSnapshot").setTable(TABLE_NAME.getNameAsString())
.setType(HBaseProtos.SnapshotDescription.Type.FLUSH).build();
.setType(SnapshotProtos.SnapshotDescription.Type.FLUSH).build();
// take the snapshot async
admin.takeSnapshotAsync(

View File

@ -31,12 +31,12 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
import org.apache.hadoop.hbase.util.FSTableDescriptors;

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.coprocessor.MasterObserver;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.TestTableName;
import org.junit.After;

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
@ -69,7 +69,7 @@ public class TestSnapshotDescriptionUtils {
private static final Log LOG = LogFactory.getLog(TestSnapshotDescriptionUtils.class);
@Test
public void testValidateMissingTableName() {
public void testValidateMissingTableName() throws IOException {
Configuration conf = new Configuration(false);
try {
SnapshotDescriptionUtils.validate(SnapshotDescription.newBuilder().setName("fail").build(),

View File

@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;

View File

@ -972,14 +972,16 @@ module Hbase
#----------------------------------------------------------------------------------------------
# Restore specified snapshot
def restore_snapshot(snapshot_name)
@admin.restoreSnapshot(snapshot_name)
def restore_snapshot(snapshot_name, restore_acl = false)
conf = @connection.getConfiguration
take_fail_safe_snapshot = conf.getBoolean("hbase.snapshot.restore.take.failsafe.snapshot", false)
@admin.restoreSnapshot(snapshot_name, take_fail_safe_snapshot, restore_acl)
end
#----------------------------------------------------------------------------------------------
# Create a new table by cloning the snapshot content
def clone_snapshot(snapshot_name, table)
@admin.cloneSnapshot(snapshot_name, TableName.valueOf(table))
def clone_snapshot(snapshot_name, table, restore_acl = false)
@admin.cloneSnapshot(snapshot_name, TableName.valueOf(table), restore_acl)
end
#----------------------------------------------------------------------------------------------

View File

@ -83,6 +83,7 @@ module HBaseConstants
DATA = 'DATA'
SERVER_NAME = 'SERVER_NAME'
LOCALITY_THRESHOLD = 'LOCALITY_THRESHOLD'
RESTORE_ACL = 'RESTORE_ACL'
# Load constants from hbase java API
def self.promote_constants(constants)

View File

@ -28,11 +28,18 @@ And writing on the newly created table will not influence the snapshot data.
Examples:
hbase> clone_snapshot 'snapshotName', 'tableName'
hbase> clone_snapshot 'snapshotName', 'namespace:tableName'
Following command will restore all acl from origin snapshot table into the
newly created table.
hbase> clone_snapshot 'snapshotName', 'namespace:tableName', {RESTORE_ACL=>true}
EOF
end
def command(snapshot_name, table)
admin.clone_snapshot(snapshot_name, table)
def command(snapshot_name, table, args = {})
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
restore_acl = args.delete(RESTORE_ACL) || false
admin.clone_snapshot(snapshot_name, table, restore_acl)
end
def handle_exceptions(cause, *args)

View File

@ -28,11 +28,17 @@ The table must be disabled.
Examples:
hbase> restore_snapshot 'snapshotName'
Following command will restore all acl from snapshot table into the table.
hbase> restore_snapshot 'snapshotName', {RESTORE_ACL=>true}
EOF
end
def command(snapshot_name)
admin.restore_snapshot(snapshot_name)
def command(snapshot_name, args = {})
raise(ArgumentError, "Arguments should be a Hash") unless args.kind_of?(Hash)
restore_acl = args.delete(RESTORE_ACL) || false
admin.restore_snapshot(snapshot_name, restore_acl)
end
end
end

View File

@ -23,7 +23,6 @@ import org.apache.hadoop.hbase.testclassification.ClientTests;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.jruby.embed.PathType;
import org.junit.Test;
import org.junit.Ignore;
import org.junit.experimental.categories.Category;
@Category({ ClientTests.class, LargeTests.class })