HBASE-22760 : Pause/Resume/Query Snapshot Auto Cleanup Activity (#619)

This commit is contained in:
Viraj Jasani 2019-09-13 10:38:10 +05:30 committed by Anoop Sam John
parent de9b1d403c
commit 72ea3666c0
23 changed files with 3278 additions and 159 deletions

View File

@ -1658,4 +1658,28 @@ public interface Admin extends Abortable, Closeable {
* @return List of servers that not cleared
*/
List<ServerName> clearDeadServers(final List<ServerName> servers) throws IOException;
/**
* Turn on or off the auto snapshot cleanup based on TTL.
*
* @param on Set to <code>true</code> to enable, <code>false</code> to disable.
* @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed,
* if outstanding.
* @return Previous auto snapshot cleanup value
* @throws IOException if a remote or network exception occurs
*/
boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous)
throws IOException;
/**
* Query the current state of the auto snapshot cleanup based on TTL.
*
* @return <code>true</code> if the auto snapshot cleanup is enabled,
* <code>false</code> otherwise.
* @throws IOException if a remote or network exception occurs
*/
boolean isSnapshotCleanupEnabled() throws IOException;
}

View File

@ -2132,6 +2132,20 @@ class ConnectionManager {
return stub.listNamespaces(controller, request);
}
@Override
public MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup(
RpcController controller, MasterProtos.SetSnapshotCleanupRequest request)
throws ServiceException {
return stub.switchSnapshotCleanup(controller, request);
}
@Override
public MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(
RpcController controller, MasterProtos.IsSnapshotCleanupEnabledRequest request)
throws ServiceException {
return stub.isSnapshotCleanupEnabled(controller, request);
}
@Override
public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller,
ListNamespaceDescriptorsRequest request) throws ServiceException {

View File

@ -136,6 +136,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRe
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
@ -154,6 +155,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRe
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
@ -5053,4 +5055,38 @@ public class HBaseAdmin implements Admin {
private RpcControllerFactory getRpcControllerFactory() {
return rpcControllerFactory;
}
@Override
public boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous)
throws IOException {
return executeCallable(new MasterCallable<Boolean>(getConnection()) {
@Override
public Boolean call(int callTimeout) throws Exception {
HBaseRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
SetSnapshotCleanupRequest req =
RequestConverter.buildSetSnapshotCleanupRequest(on, synchronous);
return master.switchSnapshotCleanup(controller, req).getPrevSnapshotCleanup();
}
});
}
@Override
public boolean isSnapshotCleanupEnabled() throws IOException {
return executeCallable(new MasterCallable<Boolean>(getConnection()) {
@Override
public Boolean call(int callTimeout) throws Exception {
HBaseRpcController controller = rpcControllerFactory.newController();
controller.setCallTimeout(callTimeout);
IsSnapshotCleanupEnabledRequest req =
RequestConverter.buildIsSnapshotCleanupEnabledRequest();
return master.isSnapshotCleanupEnabled(controller, req).getEnabled();
}
});
}
}

View File

@ -100,6 +100,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorE
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos
.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
@ -110,6 +112,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanReq
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
@ -1830,4 +1833,31 @@ public final class RequestConverter {
}
throw new UnsupportedOperationException("Unsupport switch type:" + switchType);
}
/**
* Creates SetSnapshotCleanupRequest for turning on/off auto snapshot cleanup
*
* @param enabled Set to <code>true</code> to enable,
* <code>false</code> to disable.
* @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed,
* if outstanding.
* @return a SetSnapshotCleanupRequest
*/
public static SetSnapshotCleanupRequest buildSetSnapshotCleanupRequest(
final boolean enabled, final boolean synchronous) {
return SetSnapshotCleanupRequest.newBuilder().setEnabled(enabled).setSynchronous(synchronous)
.build();
}
/**
* Creates IsSnapshotCleanupEnabledRequest to determine if auto snapshot cleanup
* based on TTL expiration is turned on
*
* @return IsSnapshotCleanupEnabledRequest
*/
public static IsSnapshotCleanupEnabledRequest buildIsSnapshotCleanupEnabledRequest() {
return IsSnapshotCleanupEnabledRequest.newBuilder().build();
}
}

View File

@ -122,6 +122,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
private String switchZNode;
// znode containing the lock for the tables
public String tableLockZNode;
// znode containing the state of the snapshot auto-cleanup
String snapshotCleanupZNode;
// znode containing the state of recovering regions
public String recoveringRegionsZNode;
// znode containing namespace descriptors
@ -137,6 +139,7 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
}};
public final static String META_ZNODE_PREFIX = "meta-region-server";
private static final String DEFAULT_SNAPSHOT_CLEANUP_ZNODE = "snapshot-cleanup";
private final Configuration conf;
@ -456,6 +459,8 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
switchZNode = ZKUtil.joinZNode(baseZNode, conf.get("zookeeper.znode.switch", "switch"));
tableLockZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.tableLock", "table-lock"));
snapshotCleanupZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE));
recoveringRegionsZNode = ZKUtil.joinZNode(baseZNode,
conf.get("zookeeper.znode.recovering.regions", "recovering-regions"));
namespaceZNode = ZKUtil.joinZNode(baseZNode,

View File

@ -1338,8 +1338,6 @@ public final class HConstants {
// User defined Default TTL config key
public static final String DEFAULT_SNAPSHOT_TTL_CONFIG_KEY = "hbase.master.snapshot.ttl";
public static final String SNAPSHOT_CLEANER_DISABLE = "hbase.master.cleaner.snapshot.disable";
/**
* Configurations for master executor services.
*/

View File

@ -199,6 +199,7 @@
<include>RSGroupAdmin.proto</include>
<include>SecureBulkLoad.proto</include>
<include>Snapshot.proto</include>
<include>SnapshotCleanup.proto</include>
<include>Table.proto</include>
<include>Tracing.proto</include>
<include>VisibilityLabels.proto</include>

View File

@ -0,0 +1,494 @@
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: SnapshotCleanup.proto
package org.apache.hadoop.hbase.protobuf.generated;
public final class SnapshotCleanupProtos {
private SnapshotCleanupProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface SnapshotCleanupStateOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bool snapshot_cleanup_enabled = 1;
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
boolean hasSnapshotCleanupEnabled();
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
boolean getSnapshotCleanupEnabled();
}
/**
* Protobuf type {@code hbase.pb.SnapshotCleanupState}
*/
public static final class SnapshotCleanupState extends
com.google.protobuf.GeneratedMessage
implements SnapshotCleanupStateOrBuilder {
// Use SnapshotCleanupState.newBuilder() to construct.
private SnapshotCleanupState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private SnapshotCleanupState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final SnapshotCleanupState defaultInstance;
public static SnapshotCleanupState getDefaultInstance() {
return defaultInstance;
}
public SnapshotCleanupState getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private SnapshotCleanupState(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
snapshotCleanupEnabled_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.internal_static_hbase_pb_SnapshotCleanupState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.internal_static_hbase_pb_SnapshotCleanupState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState.Builder.class);
}
public static com.google.protobuf.Parser<SnapshotCleanupState> PARSER =
new com.google.protobuf.AbstractParser<SnapshotCleanupState>() {
public SnapshotCleanupState parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new SnapshotCleanupState(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<SnapshotCleanupState> getParserForType() {
return PARSER;
}
private int bitField0_;
// required bool snapshot_cleanup_enabled = 1;
public static final int SNAPSHOT_CLEANUP_ENABLED_FIELD_NUMBER = 1;
private boolean snapshotCleanupEnabled_;
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
public boolean hasSnapshotCleanupEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
public boolean getSnapshotCleanupEnabled() {
return snapshotCleanupEnabled_;
}
private void initFields() {
snapshotCleanupEnabled_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSnapshotCleanupEnabled()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBool(1, snapshotCleanupEnabled_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(1, snapshotCleanupEnabled_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState other = (org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState) obj;
boolean result = true;
result = result && (hasSnapshotCleanupEnabled() == other.hasSnapshotCleanupEnabled());
if (hasSnapshotCleanupEnabled()) {
result = result && (getSnapshotCleanupEnabled()
== other.getSnapshotCleanupEnabled());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSnapshotCleanupEnabled()) {
hash = (37 * hash) + SNAPSHOT_CLEANUP_ENABLED_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getSnapshotCleanupEnabled());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code hbase.pb.SnapshotCleanupState}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupStateOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.internal_static_hbase_pb_SnapshotCleanupState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.internal_static_hbase_pb_SnapshotCleanupState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState.class, org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
snapshotCleanupEnabled_ = false;
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.internal_static_hbase_pb_SnapshotCleanupState_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState build() {
org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState result = new org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.snapshotCleanupEnabled_ = snapshotCleanupEnabled_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState.getDefaultInstance()) return this;
if (other.hasSnapshotCleanupEnabled()) {
setSnapshotCleanupEnabled(other.getSnapshotCleanupEnabled());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSnapshotCleanupEnabled()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos.SnapshotCleanupState) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bool snapshot_cleanup_enabled = 1;
private boolean snapshotCleanupEnabled_ ;
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
public boolean hasSnapshotCleanupEnabled() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
public boolean getSnapshotCleanupEnabled() {
return snapshotCleanupEnabled_;
}
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
public Builder setSnapshotCleanupEnabled(boolean value) {
bitField0_ |= 0x00000001;
snapshotCleanupEnabled_ = value;
onChanged();
return this;
}
/**
* <code>required bool snapshot_cleanup_enabled = 1;</code>
*/
public Builder clearSnapshotCleanupEnabled() {
bitField0_ = (bitField0_ & ~0x00000001);
snapshotCleanupEnabled_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.SnapshotCleanupState)
}
static {
defaultInstance = new SnapshotCleanupState(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:hbase.pb.SnapshotCleanupState)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_hbase_pb_SnapshotCleanupState_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_hbase_pb_SnapshotCleanupState_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\025SnapshotCleanup.proto\022\010hbase.pb\"8\n\024Sna" +
"pshotCleanupState\022 \n\030snapshot_cleanup_en" +
"abled\030\001 \002(\010BK\n*org.apache.hadoop.hbase.p" +
"rotobuf.generatedB\025SnapshotCleanupProtos" +
"H\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_hbase_pb_SnapshotCleanupState_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_hbase_pb_SnapshotCleanupState_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_SnapshotCleanupState_descriptor,
new java.lang.String[] { "SnapshotCleanupEnabled", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}

View File

@ -571,6 +571,23 @@ message ClearDeadServersResponse {
repeated ServerName server_name = 1;
}
message SetSnapshotCleanupRequest {
required bool enabled = 1;
optional bool synchronous = 2;
}
message SetSnapshotCleanupResponse {
required bool prev_snapshot_cleanup = 1;
}
message IsSnapshotCleanupEnabledRequest {
}
message IsSnapshotCleanupEnabledResponse {
required bool enabled = 1;
}
service MasterService {
/** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@ -871,4 +888,18 @@ service MasterService {
/** returns a list of namespace names */
rpc ListNamespaces(ListNamespacesRequest)
returns(ListNamespacesResponse);
/**
* Turn on/off snapshot auto-cleanup based on TTL expiration
*/
rpc SwitchSnapshotCleanup (SetSnapshotCleanupRequest)
returns (SetSnapshotCleanupResponse);
/**
* Determine if snapshot auto-cleanup based on TTL expiration is turned on
*/
rpc IsSnapshotCleanupEnabled (IsSnapshotCleanupEnabledRequest)
returns (IsSnapshotCleanupEnabledResponse);
}

View File

@ -0,0 +1,31 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// This file contains protocol buffers to represent the state of the snapshot auto cleanup based on TTL
package hbase.pb;
option java_package = "org.apache.hadoop.hbase.protobuf.generated";
option java_outer_classname = "SnapshotCleanupProtos";
option java_generic_services = true;
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
message SnapshotCleanupState {
required bool snapshot_cleanup_enabled = 1;
}

View File

@ -174,6 +174,7 @@ import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
import org.apache.hadoop.hbase.zookeeper.RegionServerTracker;
import org.apache.hadoop.hbase.zookeeper.SnapshotCleanupTracker;
import org.apache.hadoop.hbase.zookeeper.SplitOrMergeTracker;
import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@ -284,6 +285,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
/** Namespace stuff */
private TableNamespaceManager tableNamespaceManager;
// Tracker for auto snapshot cleanup state
SnapshotCleanupTracker snapshotCleanupTracker;
//Tracker for master maintenance mode setting
private MasterMaintenanceModeTracker maintenanceModeTracker;
@ -685,6 +689,9 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
this.serverManager);
this.drainingServerTracker.start();
this.snapshotCleanupTracker = new SnapshotCleanupTracker(zooKeeper, this);
this.snapshotCleanupTracker.start();
this.maintenanceModeTracker = new MasterMaintenanceModeTracker(zooKeeper);
this.maintenanceModeTracker.start();
@ -1254,15 +1261,15 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
getMasterFileSystem().getFileSystem(), archiveDir, cleanerPool, params);
getChoreService().scheduleChore(hfileCleaner);
final boolean isSnapshotChoreDisabled = conf.getBoolean(HConstants.SNAPSHOT_CLEANER_DISABLE,
false);
if (isSnapshotChoreDisabled) {
final boolean isSnapshotChoreEnabled = this.snapshotCleanupTracker
.isSnapshotCleanupEnabled();
this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
if (isSnapshotChoreEnabled) {
getChoreService().scheduleChore(this.snapshotCleanerChore);
} else {
if (LOG.isTraceEnabled()) {
LOG.trace("Snapshot Cleaner Chore is disabled. Not starting up the chore..");
}
} else {
this.snapshotCleanerChore = new SnapshotCleanerChore(this, conf, getSnapshotManager());
getChoreService().scheduleChore(this.snapshotCleanerChore);
}
serviceStarted = true;
@ -1347,6 +1354,37 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
procedureExecutor.start(numThreads, abortOnCorruption);
}
/**
* Turn on/off Snapshot Cleanup Chore
*
* @param on indicates whether Snapshot Cleanup Chore is to be run
*/
void switchSnapshotCleanup(final boolean on, final boolean synchronous) {
if (synchronous) {
synchronized (this.snapshotCleanerChore) {
switchSnapshotCleanup(on);
}
} else {
switchSnapshotCleanup(on);
}
}
private void switchSnapshotCleanup(final boolean on) {
try {
snapshotCleanupTracker.setSnapshotCleanupEnabled(on);
if (on) {
if (!getChoreService().isChoreScheduled(this.snapshotCleanerChore)) {
getChoreService().scheduleChore(this.snapshotCleanerChore);
}
} else {
getChoreService().cancelChore(this.snapshotCleanerChore);
}
} catch (KeeperException e) {
LOG.error("Error updating snapshot cleanup mode to " + on, e);
}
}
private void stopProcedureExecutor() {
if (procedureExecutor != null) {
procedureExecutor.stop();

View File

@ -123,6 +123,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRe
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespacesRequest;
@ -168,6 +170,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunn
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetQuotaResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
@ -1356,6 +1360,55 @@ public class MasterRpcServices extends RSRpcServices
}
}
@Override
public SetSnapshotCleanupResponse switchSnapshotCleanup(
RpcController controller, SetSnapshotCleanupRequest request)
throws ServiceException {
try {
master.checkInitialized();
final boolean enabled = request.getEnabled();
final boolean isSynchronous = request.hasSynchronous() && request.getSynchronous();
final boolean prevSnapshotCleanupRunning = this.switchSnapshotCleanup(enabled, isSynchronous);
return SetSnapshotCleanupResponse.newBuilder()
.setPrevSnapshotCleanup(prevSnapshotCleanupRunning).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled(
RpcController controller, IsSnapshotCleanupEnabledRequest request)
throws ServiceException {
try {
master.checkInitialized();
final boolean isSnapshotCleanupEnabled = master.snapshotCleanupTracker
.isSnapshotCleanupEnabled();
return IsSnapshotCleanupEnabledResponse.newBuilder()
.setEnabled(isSnapshotCleanupEnabled).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
/**
* Turn on/off snapshot auto-cleanup based on TTL
*
* @param enabledNewVal Set to <code>true</code> to enable, <code>false</code> to disable
* @param synchronous If <code>true</code>, it waits until current snapshot cleanup is completed,
* if outstanding
* @return previous snapshot auto-cleanup mode
*/
private synchronized boolean switchSnapshotCleanup(final boolean enabledNewVal,
final boolean synchronous) {
final boolean oldValue = master.snapshotCleanupTracker.isSnapshotCleanupEnabled();
master.switchSnapshotCleanup(enabledNewVal, synchronous);
LOG.info(master.getClientIdAuditPrefix() + " Successfully set snapshot cleanup to {}" +
enabledNewVal);
return oldValue;
}
@Override
public RunCatalogScanResponse runCatalogScan(RpcController c,
RunCatalogScanRequest req) throws ServiceException {

View File

@ -0,0 +1,112 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.zookeeper;
import java.io.IOException;
import org.apache.hadoop.hbase.Abortable;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotCleanupProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.zookeeper.KeeperException;
/**
* Tracks status of snapshot auto cleanup based on TTL
*/
@InterfaceAudience.Private
public class SnapshotCleanupTracker extends ZooKeeperNodeTracker {
/**
* Constructs a new ZK node tracker.
*
* <p>After construction, use {@link #start} to kick off tracking.
*
* @param watcher reference to the {@link ZooKeeperWatcher} which also contains configuration
* and constants
* @param abortable used to abort if a fatal error occurs
*/
public SnapshotCleanupTracker(ZooKeeperWatcher watcher, Abortable abortable) {
super(watcher, watcher.snapshotCleanupZNode, abortable);
}
/**
* Returns the current state of the snapshot auto cleanup based on TTL
*
* @return <code>true</code> if the snapshot auto cleanup is enabled,
* <code>false</code> otherwise.
*/
public boolean isSnapshotCleanupEnabled() {
byte[] snapshotCleanupZNodeData = super.getData(false);
try {
// if data in ZK is null, use default of on.
return snapshotCleanupZNodeData == null ||
parseFrom(snapshotCleanupZNodeData).getSnapshotCleanupEnabled();
} catch (DeserializationException dex) {
LOG.error("ZK state for Snapshot Cleanup could not be parsed " +
Bytes.toStringBinary(snapshotCleanupZNodeData), dex);
// return false to be safe.
return false;
}
}
/**
* Set snapshot auto clean on/off
*
* @param snapshotCleanupEnabled true if the snapshot auto cleanup should be on,
* false otherwise
* @throws KeeperException if ZooKeeper operation fails
*/
public void setSnapshotCleanupEnabled(final boolean snapshotCleanupEnabled)
throws KeeperException {
byte [] snapshotCleanupZNodeData = toByteArray(snapshotCleanupEnabled);
try {
ZKUtil.setData(watcher, watcher.snapshotCleanupZNode,
snapshotCleanupZNodeData);
} catch(KeeperException.NoNodeException nne) {
ZKUtil.createAndWatch(watcher, watcher.snapshotCleanupZNode,
snapshotCleanupZNodeData);
}
super.nodeDataChanged(watcher.snapshotCleanupZNode);
}
private byte[] toByteArray(final boolean isSnapshotCleanupEnabled) {
SnapshotCleanupProtos.SnapshotCleanupState.Builder builder =
SnapshotCleanupProtos.SnapshotCleanupState.newBuilder();
builder.setSnapshotCleanupEnabled(isSnapshotCleanupEnabled);
return ProtobufUtil.prependPBMagic(builder.build().toByteArray());
}
private SnapshotCleanupProtos.SnapshotCleanupState parseFrom(final byte[] pbBytes)
throws DeserializationException {
ProtobufUtil.expectPBMagicPrefix(pbBytes);
SnapshotCleanupProtos.SnapshotCleanupState.Builder builder =
SnapshotCleanupProtos.SnapshotCleanupState.newBuilder();
try {
int magicLen = ProtobufUtil.lengthOfPBMagic();
ProtobufUtil.mergeFrom(builder, pbBytes, magicLen, pbBytes.length - magicLen);
} catch (IOException e) {
throw new DeserializationException(e);
}
return builder.build();
}
}

View File

@ -864,4 +864,31 @@ public class TestAdmin2 {
Assert.assertEquals(expectedStoreFilesSize, store.getSize());
}
}
@Test
public void testSnapshotCleanupAsync() throws Exception {
testSnapshotCleanup(false);
}
@Test
public void testSnapshotCleanupSync() throws Exception {
testSnapshotCleanup(true);
}
private void testSnapshotCleanup(final boolean synchronous) throws IOException {
final boolean initialState = admin.isSnapshotCleanupEnabled();
// Switch the snapshot auto cleanup state to opposite to initial state
boolean prevState = admin.snapshotCleanupSwitch(!initialState, synchronous);
// The previous state should be the original state we observed
assertEquals(initialState, prevState);
// Current state should be opposite of the initial state
assertEquals(!initialState, admin.isSnapshotCleanupEnabled());
// Reset the state back to what it was initially
prevState = admin.snapshotCleanupSwitch(initialState, synchronous);
// The previous state should be the opposite of the initial state
assertEquals(!initialState, prevState);
// Current state should be the original state again
assertEquals(initialState, admin.isSnapshotCleanupEnabled());
}
}

View File

@ -98,7 +98,6 @@ public class TestSnapshotCleanerChore {
snapshotManager = Mockito.mock(SnapshotManager.class);
Stoppable stopper = new StoppableImplementation();
Configuration conf = getSnapshotCleanerConf();
conf.setStrings("hbase.master.cleaner.snapshot.disable", "false");
SnapshotCleanerChore snapshotCleanerChore =
new SnapshotCleanerChore(stopper, conf, snapshotManager);
List<SnapshotDescription> snapshotDescriptionList = new ArrayList<>();

View File

@ -22,10 +22,12 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import com.google.common.util.concurrent.Uninterruptibles;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
@ -50,8 +52,11 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.regionserver.CompactedHFilesDischarger;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.regionserver.HRegion;
@ -66,6 +71,7 @@ import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@ -131,6 +137,7 @@ public class TestSnapshotFromMaster {
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
conf.setInt("hbase.hfile.compactions.cleaner.interval", 20 * 1000);
conf.setInt("hbase.master.cleaner.snapshot.interval", 500);
}
@Before
@ -278,6 +285,89 @@ public class TestSnapshotFromMaster {
master.getMasterRpcServices().deleteSnapshot(null, request);
}
@Test
public void testGetCompletedSnapshotsWithCleanup() throws Exception {
// Enable auto snapshot cleanup for the cluster
SetSnapshotCleanupRequest setSnapshotCleanupRequest =
SetSnapshotCleanupRequest.newBuilder().setEnabled(true).build();
master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
// first check when there are no snapshots
GetCompletedSnapshotsRequest request = GetCompletedSnapshotsRequest.newBuilder().build();
GetCompletedSnapshotsResponse response =
master.getMasterRpcServices().getCompletedSnapshots(null, request);
assertEquals("Found unexpected number of snapshots", 0, response.getSnapshotsCount());
// write one snapshot to the fs
createSnapshotWithTtl("snapshot_01", 1L);
createSnapshotWithTtl("snapshot_02", 10L);
// check that we get one snapshot
response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
// check that 1 snapshot is auto cleaned after 1 sec of TTL expiration
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
assertEquals("Found unexpected number of snapshots", 1, response.getSnapshotsCount());
}
@Test
public void testGetCompletedSnapshotsWithoutCleanup() throws Exception {
// Disable auto snapshot cleanup for the cluster
SetSnapshotCleanupRequest setSnapshotCleanupRequest =
SetSnapshotCleanupRequest.newBuilder().setEnabled(false).build();
master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
// first check when there are no snapshots
GetCompletedSnapshotsRequest request = GetCompletedSnapshotsRequest.newBuilder().build();
GetCompletedSnapshotsResponse response =
master.getMasterRpcServices().getCompletedSnapshots(null, request);
assertEquals("Found unexpected number of snapshots", 0, response.getSnapshotsCount());
// write one snapshot to the fs
createSnapshotWithTtl("snapshot_02", 1L);
createSnapshotWithTtl("snapshot_03", 1L);
// check that we get one snapshot
response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
// check that no snapshot is auto cleaned even after 1 sec of TTL expiration
Uninterruptibles.sleepUninterruptibly(2, TimeUnit.SECONDS);
response = master.getMasterRpcServices().getCompletedSnapshots(null, request);
assertEquals("Found unexpected number of snapshots", 2, response.getSnapshotsCount());
}
@Test
public void testSnapshotCleanupStatus() throws Exception {
// Enable auto snapshot cleanup for the cluster
SetSnapshotCleanupRequest setSnapshotCleanupRequest =
SetSnapshotCleanupRequest.newBuilder().setEnabled(true).build();
master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
// Check if auto snapshot cleanup is enabled
IsSnapshotCleanupEnabledRequest isSnapshotCleanupEnabledRequest =
IsSnapshotCleanupEnabledRequest.newBuilder().build();
IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabledResponse =
master.getMasterRpcServices().isSnapshotCleanupEnabled(null,
isSnapshotCleanupEnabledRequest);
Assert.assertTrue(isSnapshotCleanupEnabledResponse.getEnabled());
// Disable auto snapshot cleanup for the cluster
setSnapshotCleanupRequest = SetSnapshotCleanupRequest.newBuilder()
.setEnabled(false).build();
master.getMasterRpcServices().switchSnapshotCleanup(null, setSnapshotCleanupRequest);
// Check if auto snapshot cleanup is disabled
isSnapshotCleanupEnabledRequest = IsSnapshotCleanupEnabledRequest
.newBuilder().build();
isSnapshotCleanupEnabledResponse =
master.getMasterRpcServices().isSnapshotCleanupEnabled(null,
isSnapshotCleanupEnabledRequest);
Assert.assertFalse(isSnapshotCleanupEnabledResponse.getEnabled());
}
/**
* Test that the snapshot hfile archive cleaner works correctly. HFiles that are in snapshots
* should be retained, while those that are not in a snapshot should be deleted.
@ -412,6 +502,16 @@ public class TestSnapshotFromMaster {
UTIL.getHBaseCluster().getMaster().getHFileCleaner().chore();
}
private SnapshotDescription createSnapshotWithTtl(final String snapshotName, final long ttl)
throws IOException {
SnapshotTestingUtils.SnapshotMock snapshotMock =
new SnapshotTestingUtils.SnapshotMock(UTIL.getConfiguration(), fs, rootDir);
SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder =
snapshotMock.createSnapshotV2(snapshotName, "test", 0, ttl);
builder.commit();
return builder.getSnapshotDescription();
}
@Test
public void testAsyncSnapshotWillNotBlockSnapshotHFileCleaner() throws Exception {
// Write some data

View File

@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.regionserver.Region;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
import org.apache.hadoop.hbase.util.FSVisitor;
import org.apache.hadoop.hbase.util.FSUtils;
@ -666,6 +667,12 @@ public final class SnapshotTestingUtils {
return createSnapshot(snapshotName, tableName, numRegions, SnapshotManifestV2.DESCRIPTOR_VERSION);
}
public SnapshotBuilder createSnapshotV2(final String snapshotName, final String tableName,
final int numRegions, final long ttl) throws IOException {
return createSnapshot(snapshotName, tableName, numRegions,
SnapshotManifestV2.DESCRIPTOR_VERSION, ttl);
}
private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName,
final int version) throws IOException {
return createSnapshot(snapshotName, tableName, TEST_NUM_REGIONS, version);
@ -687,6 +694,22 @@ public final class SnapshotTestingUtils {
return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
}
private SnapshotBuilder createSnapshot(final String snapshotName, final String tableName,
final int numRegions, final int version, final long ttl) throws IOException {
HTableDescriptor htd = createHtd(tableName);
RegionData[] regions = createTable(htd, numRegions);
SnapshotDescription desc = SnapshotDescription.newBuilder()
.setTable(htd.getTableName().getNameAsString())
.setName(snapshotName)
.setVersion(version)
.setCreationTime(EnvironmentEdgeManager.currentTime())
.setTtl(ttl)
.build();
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs);
return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
}
public HTableDescriptor createHtd(final String tableName) {
HTableDescriptor htd = new HTableDescriptor(tableName);
htd.addFamily(new HColumnDescriptor(TEST_FAMILY));

View File

@ -468,6 +468,22 @@ module Hbase
@admin.getTableDescriptor(TableName.valueOf(table_name)).toStringTableAttributes
end
#----------------------------------------------------------------------------------------------
# Enable/disable snapshot auto-cleanup based on TTL expiration
# Returns previous snapshot auto-cleanup switch setting.
def snapshot_cleanup_switch(enable_disable)
@admin.snapshotCleanupSwitch(
java.lang.Boolean.valueOf(enable_disable), java.lang.Boolean.valueOf(false)
)
end
#----------------------------------------------------------------------------------------------
# Query the current state of the snapshot auto-cleanup based on TTL
# Returns the snapshot auto-cleanup state (true if enabled)
def snapshot_cleanup_enabled?
@admin.isSnapshotCleanupEnabled
end
#----------------------------------------------------------------------------------------------
# Truncates table (deletes all records by recreating the table)
def truncate(table_name, conf = @conf)

View File

@ -350,6 +350,8 @@ Shell.load_command_group(
compact_rs
compaction_state
trace
snapshot_cleanup_switch
snapshot_cleanup_enabled
splitormerge_switch
splitormerge_enabled
list_deadservers

View File

@ -0,0 +1,39 @@
#
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with this
# work for additional information regarding copyright ownership. The ASF
# licenses this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Prints if snapshot auto cleanup based on TTL is enabled
module Shell
module Commands
class SnapshotCleanupEnabled < Command
def help
<<-EOF
Query the snapshot auto-cleanup state.
Examples:
hbase> snapshot_cleanup_enabled
EOF
end
def command
state = admin.snapshot_cleanup_enabled?
formatter.row([state.to_s])
state
end
end
end
end

View File

@ -0,0 +1,43 @@
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Switch snapshot auto-cleanup based on TTL expiration
module Shell
module Commands
class SnapshotCleanupSwitch < Command
def help
<<-EOF
Enable/Disable snapshot auto-cleanup based on snapshot TTL.
Returns previous snapshot auto-cleanup switch state.
Examples:
hbase> snapshot_cleanup_switch true
hbase> snapshot_cleanup_switch false
EOF
end
def command(enable_disable)
prev_state = admin.snapshot_cleanup_switch(enable_disable) ? 'true' : 'false'
formatter.row(["Previous snapshot cleanup state : #{prev_state}"])
prev_state
end
end
end
end

View File

@ -2038,11 +2038,43 @@ Value 0 for this config indicates TTL: FOREVER
At any point of time, if Snapshot cleanup is supposed to be stopped due to
some snapshot restore activity, it is advisable to disable Snapshot Cleaner with
config:
.Enable/Disable Snapshot Auto Cleanup on running cluster:
`hbase.master.cleaner.snapshot.disable`: "true"
By default, snapshot auto cleanup based on TTL would be enabled
for any new cluster.
At any point in time, if snapshot cleanup is supposed to be stopped due to
some snapshot restore activity or any other reason, it is advisable
to disable it using shell command:
----
hbase> snapshot_cleanup_switch false
----
We can re-enable it using:
----
hbase> snapshot_cleanup_switch true
----
The shell command with switch false would disable snapshot auto
cleanup activity based on TTL and return the previous state of
the activity(true: running already, false: disabled already)
A sample output for above commands:
----
Previous snapshot cleanup state : true
Took 0.0069 seconds
=> "true"
----
We can query whether snapshot auto cleanup is enabled for
cluster using:
----
hbase> snapshot_cleanup_enabled
----
The command would return output in true/false.
[[ops.snapshots.list]]