HBASE-25548 Optionally allow snapshots to preserve cluster's max file… (#2923)
Signed-off-by: Peter Somogyi <psomogyi@apache.org>
This commit is contained in:
parent
95342a23a1
commit
373dc7788d
|
@ -38,6 +38,8 @@ public class SnapshotDescription {
|
||||||
private final long ttl;
|
private final long ttl;
|
||||||
private final int version;
|
private final int version;
|
||||||
|
|
||||||
|
private final long maxFileSize;
|
||||||
|
|
||||||
public SnapshotDescription(String name) {
|
public SnapshotDescription(String name) {
|
||||||
this(name, null);
|
this(name, null);
|
||||||
}
|
}
|
||||||
|
@ -90,14 +92,17 @@ public class SnapshotDescription {
|
||||||
this.snapShotType = type;
|
this.snapShotType = type;
|
||||||
this.owner = owner;
|
this.owner = owner;
|
||||||
this.creationTime = creationTime;
|
this.creationTime = creationTime;
|
||||||
this.ttl = getTtlFromSnapshotProps(snapshotProps);
|
this.ttl = getLongFromSnapshotProps(snapshotProps, "TTL");
|
||||||
this.version = version;
|
this.version = version;
|
||||||
|
this.maxFileSize = getLongFromSnapshotProps(snapshotProps, TableDescriptorBuilder.MAX_FILESIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
private long getTtlFromSnapshotProps(Map<String, Object> snapshotProps) {
|
private long getLongFromSnapshotProps(Map<String, Object> snapshotProps, String property) {
|
||||||
return MapUtils.getLongValue(snapshotProps, "TTL", -1);
|
return MapUtils.getLongValue(snapshotProps, property, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* SnapshotDescription Parameterized Constructor
|
* SnapshotDescription Parameterized Constructor
|
||||||
*
|
*
|
||||||
|
@ -144,6 +149,8 @@ public class SnapshotDescription {
|
||||||
return this.version;
|
return this.version;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getMaxFileSize() { return maxFileSize; }
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return new ToStringBuilder(this)
|
return new ToStringBuilder(this)
|
||||||
|
@ -154,6 +161,7 @@ public class SnapshotDescription {
|
||||||
.append("creationTime", creationTime)
|
.append("creationTime", creationTime)
|
||||||
.append("ttl", ttl)
|
.append("ttl", ttl)
|
||||||
.append("version", version)
|
.append("version", version)
|
||||||
|
.append("maxFileSize", maxFileSize)
|
||||||
.toString();
|
.toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3092,6 +3092,9 @@ public final class ProtobufUtil {
|
||||||
if (snapshotDesc.getVersion() != -1) {
|
if (snapshotDesc.getVersion() != -1) {
|
||||||
builder.setVersion(snapshotDesc.getVersion());
|
builder.setVersion(snapshotDesc.getVersion());
|
||||||
}
|
}
|
||||||
|
if (snapshotDesc.getMaxFileSize() != -1) {
|
||||||
|
builder.setMaxFileSize(snapshotDesc.getMaxFileSize());
|
||||||
|
}
|
||||||
builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType()));
|
builder.setType(ProtobufUtil.createProtosSnapShotDescType(snapshotDesc.getType()));
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
@ -3107,6 +3110,7 @@ public final class ProtobufUtil {
|
||||||
createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) {
|
createSnapshotDesc(SnapshotProtos.SnapshotDescription snapshotDesc) {
|
||||||
final Map<String, Object> snapshotProps = new HashMap<>();
|
final Map<String, Object> snapshotProps = new HashMap<>();
|
||||||
snapshotProps.put("TTL", snapshotDesc.getTtl());
|
snapshotProps.put("TTL", snapshotDesc.getTtl());
|
||||||
|
snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, snapshotDesc.getMaxFileSize());
|
||||||
return new SnapshotDescription(snapshotDesc.getName(),
|
return new SnapshotDescription(snapshotDesc.getName(),
|
||||||
snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null,
|
snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null,
|
||||||
createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(),
|
createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(),
|
||||||
|
|
|
@ -45,6 +45,7 @@ message SnapshotDescription {
|
||||||
optional string owner = 6;
|
optional string owner = 6;
|
||||||
optional UsersAndPermissions users_and_permissions = 7;
|
optional UsersAndPermissions users_and_permissions = 7;
|
||||||
optional int64 ttl = 8 [default = 0];
|
optional int64 ttl = 8 [default = 0];
|
||||||
|
optional int64 max_file_size = 9 [default = 0];
|
||||||
}
|
}
|
||||||
|
|
||||||
message SnapshotFileInfo {
|
message SnapshotFileInfo {
|
||||||
|
|
|
@ -150,6 +150,10 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
||||||
/** number of current operations running on the master */
|
/** number of current operations running on the master */
|
||||||
public static final int SNAPSHOT_POOL_THREADS_DEFAULT = 1;
|
public static final int SNAPSHOT_POOL_THREADS_DEFAULT = 1;
|
||||||
|
|
||||||
|
/** Conf key for preserving original max file size configs */
|
||||||
|
public static final String SNAPSHOT_MAX_FILE_SIZE_PRESERVE =
|
||||||
|
"hbase.snapshot.max.filesize.preserve";
|
||||||
|
|
||||||
private boolean stopped;
|
private boolean stopped;
|
||||||
private MasterServices master; // Needed by TableEventHandlers
|
private MasterServices master; // Needed by TableEventHandlers
|
||||||
private ProcedureCoordinator coordinator;
|
private ProcedureCoordinator coordinator;
|
||||||
|
|
|
@ -17,7 +17,6 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.master.snapshot;
|
package org.apache.hadoop.hbase.master.snapshot;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -31,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
import org.apache.hadoop.hbase.errorhandling.ForeignException;
|
||||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
|
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
|
||||||
|
@ -139,12 +139,17 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
|
||||||
}
|
}
|
||||||
|
|
||||||
private TableDescriptor loadTableDescriptor()
|
private TableDescriptor loadTableDescriptor()
|
||||||
throws FileNotFoundException, IOException {
|
throws IOException {
|
||||||
TableDescriptor htd =
|
TableDescriptor htd =
|
||||||
this.master.getTableDescriptors().get(snapshotTable);
|
this.master.getTableDescriptors().get(snapshotTable);
|
||||||
if (htd == null) {
|
if (htd == null) {
|
||||||
throw new IOException("TableDescriptor missing for " + snapshotTable);
|
throw new IOException("TableDescriptor missing for " + snapshotTable);
|
||||||
}
|
}
|
||||||
|
if (htd.getMaxFileSize()==-1 &&
|
||||||
|
this.snapshot.getMaxFileSize()>0) {
|
||||||
|
htd = TableDescriptorBuilder.newBuilder(htd).setValue(TableDescriptorBuilder.MAX_FILESIZE,
|
||||||
|
Long.toString(this.snapshot.getMaxFileSize())).build();
|
||||||
|
}
|
||||||
return htd;
|
return htd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,111 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.master.snapshot;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||||
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.ClassRule;
|
||||||
|
import org.junit.Rule;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.junit.experimental.categories.Category;
|
||||||
|
import org.junit.rules.TestName;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unfortunately, couldn't test TakeSnapshotHandler using mocks, because it relies on TableLock,
|
||||||
|
* which is tightly coupled to LockManager and LockProcedure classes, which are both final and
|
||||||
|
* prevents us from mocking its behaviour. Looks like an overkill having to emulate a
|
||||||
|
* whole cluster run for such a small optional property behaviour.
|
||||||
|
*/
|
||||||
|
@Category({ MediumTests.class})
|
||||||
|
public class TestTakeSnapshotHandler {
|
||||||
|
|
||||||
|
private static HBaseTestingUtility UTIL;
|
||||||
|
|
||||||
|
@ClassRule
|
||||||
|
public static final HBaseClassTestRule CLASS_RULE =
|
||||||
|
HBaseClassTestRule.forClass(TestTakeSnapshotHandler.class);
|
||||||
|
|
||||||
|
@Rule
|
||||||
|
public TestName name = new TestName();
|
||||||
|
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() {
|
||||||
|
UTIL = new HBaseTestingUtility();
|
||||||
|
}
|
||||||
|
|
||||||
|
public TableDescriptor createTableInsertDataAndTakeSnapshot(Map<String, Object> snapshotProps)
|
||||||
|
throws Exception {
|
||||||
|
TableDescriptor descriptor =
|
||||||
|
TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName()))
|
||||||
|
.setColumnFamily(
|
||||||
|
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("f")).build()).build();
|
||||||
|
UTIL.getConnection().getAdmin().createTable(descriptor);
|
||||||
|
Table table = UTIL.getConnection().getTable(descriptor.getTableName());
|
||||||
|
Put put = new Put(Bytes.toBytes("1"));
|
||||||
|
put.addColumn(Bytes.toBytes("f"), Bytes.toBytes("1"), Bytes.toBytes("v1"));
|
||||||
|
table.put(put);
|
||||||
|
String snapName = "snap"+name.getMethodName();
|
||||||
|
UTIL.getAdmin().snapshot(snapName, descriptor.getTableName(), snapshotProps);
|
||||||
|
TableName cloned = TableName.valueOf(name.getMethodName() + "clone");
|
||||||
|
UTIL.getAdmin().cloneSnapshot(snapName, cloned);
|
||||||
|
return descriptor;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPreparePreserveMaxFileSizeEnabled() throws Exception {
|
||||||
|
UTIL.startMiniCluster();
|
||||||
|
Map<String, Object> snapshotProps = new HashMap<>();
|
||||||
|
snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, Long.parseLong("21474836480"));
|
||||||
|
TableDescriptor descriptor = createTableInsertDataAndTakeSnapshot(snapshotProps);
|
||||||
|
TableName cloned = TableName.valueOf(name.getMethodName() + "clone");
|
||||||
|
assertEquals(-1,
|
||||||
|
UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize());
|
||||||
|
assertEquals(21474836480L, UTIL.getAdmin().getDescriptor(cloned).getMaxFileSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPreparePreserveMaxFileSizeDisabled() throws Exception {
|
||||||
|
UTIL.startMiniCluster();
|
||||||
|
TableDescriptor descriptor = createTableInsertDataAndTakeSnapshot(null);
|
||||||
|
TableName cloned = TableName.valueOf(name.getMethodName() + "clone");
|
||||||
|
assertEquals(-1,
|
||||||
|
UTIL.getAdmin().getDescriptor(descriptor.getTableName()).getMaxFileSize());
|
||||||
|
assertEquals(-1, UTIL.getAdmin().getDescriptor(cloned).getMaxFileSize());
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void shutdown() throws Exception {
|
||||||
|
UTIL.shutdownMiniCluster();
|
||||||
|
}
|
||||||
|
}
|
|
@ -1206,6 +1206,9 @@ module Hbase
|
||||||
ttl = ttl ? ttl.to_java(:long) : -1
|
ttl = ttl ? ttl.to_java(:long) : -1
|
||||||
snapshot_props = java.util.HashMap.new
|
snapshot_props = java.util.HashMap.new
|
||||||
snapshot_props.put("TTL", ttl)
|
snapshot_props.put("TTL", ttl)
|
||||||
|
max_filesize = arg[MAX_FILESIZE]
|
||||||
|
max_filesize = max_filesize ? max_filesize.to_java(:long) : -1
|
||||||
|
snapshot_props.put("MAX_FILESIZE", max_filesize)
|
||||||
if arg[SKIP_FLUSH] == true
|
if arg[SKIP_FLUSH] == true
|
||||||
@admin.snapshot(snapshot_name, table_name,
|
@admin.snapshot(snapshot_name, table_name,
|
||||||
org.apache.hadoop.hbase.client.SnapshotType::SKIPFLUSH, snapshot_props)
|
org.apache.hadoop.hbase.client.SnapshotType::SKIPFLUSH, snapshot_props)
|
||||||
|
|
|
@ -24,7 +24,7 @@ module Shell
|
||||||
Take a snapshot of specified table. Examples:
|
Take a snapshot of specified table. Examples:
|
||||||
|
|
||||||
hbase> snapshot 'sourceTable', 'snapshotName'
|
hbase> snapshot 'sourceTable', 'snapshotName'
|
||||||
hbase> snapshot 'namespace:sourceTable', 'snapshotName', {SKIP_FLUSH => true}
|
hbase> snapshot 'namespace:sourceTable', 'snapshotName', {SKIP_FLUSH => true, MAX_FILESIZE => 21474836480}
|
||||||
EOF
|
EOF
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
|
@ -3167,6 +3167,21 @@ providing default TTL in sec for key: `hbase.master.snapshot.ttl`.
|
||||||
Value 0 for this config indicates TTL: FOREVER
|
Value 0 for this config indicates TTL: FOREVER
|
||||||
|
|
||||||
|
|
||||||
|
.Take a snapshot with custom MAX_FILESIZE
|
||||||
|
|
||||||
|
Optionally, snapshots can be created with a custom max file size configuration that will be
|
||||||
|
used by cloned tables, instead of the global `hbase.hregion.max.filesize` configuration property.
|
||||||
|
This is mostly useful when exporting snapshots between different clusters. If the HBase cluster where
|
||||||
|
the snapshot is originally taken has a much larger value set for `hbase.hregion.max.filesize` than
|
||||||
|
one or more clusters where the snapshot is being exported to, a storm of region splits may occur when
|
||||||
|
restoring the snapshot on destination clusters. Specifying `MAX_FILESIZE` on properties passed to
|
||||||
|
`snapshot` command will save informed value into the table's `MAX_FILESIZE`
|
||||||
|
decriptor at snapshot creation time. If the table already defines `MAX_FILESIZE` descriptor,
|
||||||
|
this property would be ignored and have no effect.
|
||||||
|
|
||||||
|
----
|
||||||
|
snapshot 'table01', 'snap01', {MAX_FILESIZE => 21474836480}
|
||||||
|
----
|
||||||
|
|
||||||
.Enable/Disable Snapshot Auto Cleanup on running cluster:
|
.Enable/Disable Snapshot Auto Cleanup on running cluster:
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue