HBASE-21098 Improve Snapshot Performance with Temporary Snapshot Directory when rootDir on S3

Signed-off-by: Zach York <zyork@apache.org>
Signed-off-by: Mingliang Liu <liuml07@apache.org>
This commit is contained in:
Tyler Mi 2018-09-04 16:44:59 -07:00 committed by Mingliang Liu
parent b82a1d65dd
commit d40348e8cd
20 changed files with 925 additions and 107 deletions

View File

@ -1316,6 +1316,15 @@ possible configurations would overwhelm and obscure the important.
You can use the {snapshot.name}, {table.name} and {restore.timestamp} variables
to create a name based on what you are restoring.</description>
</property>
<property>
<name>hbase.snapshot.working.dir</name>
<value></value>
<description>Location where the snapshotting process will occur. The location of the
completed snapshots will not change, but the temporary directory where the snapshot
process occurs will be set to this location. This can be a separate filesystem than
the root directory, for performance increase purposes. See HBASE-21098 for more
information</description>
</property>
<property>
<name>hbase.server.compactchecker.interval.multiplier</name>
<value>1000</value>

View File

@ -972,10 +972,12 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf);
LOG.debug("outputFs=" + outputFs.getUri().toString() + " outputRoot=" + outputRoot.toString());
boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false);
boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) ||
conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null;
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot);
Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot);
Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot,
destConf);
Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot);
Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir;
@ -985,7 +987,7 @@ public class ExportSnapshot extends AbstractHBaseTool implements Tool {
if (skipTmp) {
needSetOwnerDir = outputSnapshotDir;
} else {
needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot);
needSetOwnerDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(outputRoot, destConf);
if (outputFs.exists(needSetOwnerDir)) {
needSetOwnerDir = snapshotTmpDir;
}

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.snapshot;
import java.io.File;
import java.nio.file.Paths;
import java.util.UUID;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
@Category({MediumTests.class})
public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestExportSnapshotWithTemporaryDirectory.class);
protected static String TEMP_DIR = Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR
+ UUID.randomUUID().toString();
@BeforeClass
public static void setUpBeforeClass() throws Exception {
setUpBaseConf(TEST_UTIL.getConfiguration());
TEST_UTIL.startMiniCluster(3);
TEST_UTIL.startMiniMapReduceCluster();
}
@AfterClass
public static void tearDownAfterClass() throws Exception {
TestExportSnapshot.tearDownAfterClass();
FileUtils.deleteDirectory(new File(TEMP_DIR));
}
public static void setUpBaseConf(Configuration conf) {
TestExportSnapshot.setUpBaseConf(conf);
conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + TEMP_DIR + "/.tmpdir/");
}
}

View File

@ -54,9 +54,12 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
/**
* @param snapshot descriptor of the snapshot to take
* @param masterServices master services provider
* @throws IOException if it cannot access the filesystem of the snapshot
* temporary directory
*/
public DisabledTableSnapshotHandler(SnapshotDescription snapshot,
final MasterServices masterServices, final SnapshotManager snapshotManager) {
final MasterServices masterServices, final SnapshotManager snapshotManager)
throws IOException {
super(snapshot, masterServices, snapshotManager);
}

View File

@ -50,7 +50,7 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler {
private final ProcedureCoordinator coordinator;
public EnabledTableSnapshotHandler(SnapshotDescription snapshot, MasterServices master,
final SnapshotManager manager) {
final SnapshotManager manager) throws IOException {
super(snapshot, master, manager);
this.coordinator = manager.getCoordinator();
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
@ -79,21 +80,20 @@ public final class MasterSnapshotVerifier {
private static final Logger LOG = LoggerFactory.getLogger(MasterSnapshotVerifier.class);
private SnapshotDescription snapshot;
private FileSystem fs;
private Path rootDir;
private FileSystem workingDirFs;
private TableName tableName;
private MasterServices services;
/**
* @param services services for the master
* @param snapshot snapshot to check
* @param rootDir root directory of the hbase installation.
* @param workingDirFs the file system containing the temporary snapshot information
*/
public MasterSnapshotVerifier(MasterServices services, SnapshotDescription snapshot, Path rootDir) {
this.fs = services.getMasterFileSystem().getFileSystem();
public MasterSnapshotVerifier(MasterServices services,
SnapshotDescription snapshot, FileSystem workingDirFs) {
this.workingDirFs = workingDirFs;
this.services = services;
this.snapshot = snapshot;
this.rootDir = rootDir;
this.tableName = TableName.valueOf(snapshot.getTable());
}
@ -107,7 +107,7 @@ public final class MasterSnapshotVerifier {
*/
public void verifySnapshot(Path snapshotDir, Set<String> snapshotServers)
throws CorruptedSnapshotException, IOException {
SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), fs,
SnapshotManifest manifest = SnapshotManifest.open(services.getConfiguration(), workingDirFs,
snapshotDir, snapshot);
// verify snapshot info matches
verifySnapshotDescription(snapshotDir);
@ -124,7 +124,8 @@ public final class MasterSnapshotVerifier {
* @param snapshotDir snapshot directory to check
*/
private void verifySnapshotDescription(Path snapshotDir) throws CorruptedSnapshotException {
SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
SnapshotDescription found = SnapshotDescriptionUtils.readSnapshotInfo(workingDirFs,
snapshotDir);
if (!this.snapshot.equals(found)) {
throw new CorruptedSnapshotException(
"Snapshot read (" + found + ") doesn't equal snapshot we ran (" + snapshot + ").",
@ -206,7 +207,9 @@ public final class MasterSnapshotVerifier {
}
// Verify Snapshot HFiles
SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(), fs, manifest);
// Requires the root directory file system as HFiles are stored in the root directory
SnapshotReferenceUtil.verifySnapshot(services.getConfiguration(),
FSUtils.getRootDirFileSystem(services.getConfiguration()), manifest);
}
/**

View File

@ -276,11 +276,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
*/
void resetTempDir() throws IOException {
// cleanup any existing snapshots.
Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir);
if (master.getMasterFileSystem().getFileSystem().exists(tmpdir)) {
if (!master.getMasterFileSystem().getFileSystem().delete(tmpdir, true)) {
LOG.warn("Couldn't delete working snapshot directory: " + tmpdir);
}
Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
master.getConfiguration());
FileSystem tmpFs = tmpdir.getFileSystem(master.getConfiguration());
if (!tmpFs.delete(tmpdir, true)) {
LOG.warn("Couldn't delete working snapshot directory: " + tmpdir);
}
}
@ -433,8 +433,8 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
*/
private synchronized void prepareToTakeSnapshot(SnapshotDescription snapshot)
throws HBaseSnapshotException {
FileSystem fs = master.getMasterFileSystem().getFileSystem();
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir,
master.getConfiguration());
TableName snapshotTable =
TableName.valueOf(snapshot.getTable());
@ -457,15 +457,15 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
}
try {
FileSystem workingDirFS = workingDir.getFileSystem(master.getConfiguration());
// delete the working directory, since we aren't running the snapshot. Likely leftovers
// from a failed attempt.
fs.delete(workingDir, true);
workingDirFS.delete(workingDir, true);
// recreate the working directory for the snapshot
if (!fs.mkdirs(workingDir)) {
throw new SnapshotCreationException(
"Couldn't create working directory (" + workingDir + ") for snapshot",
ProtobufUtil.createSnapshotDesc(snapshot));
if (!workingDirFS.mkdirs(workingDir)) {
throw new SnapshotCreationException("Couldn't create working directory (" + workingDir
+ ") for snapshot" , ProtobufUtil.createSnapshotDesc(snapshot));
}
} catch (HBaseSnapshotException e) {
throw e;
@ -479,10 +479,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
/**
* Take a snapshot of a disabled table.
* @param snapshot description of the snapshot to take. Modified to be {@link Type#DISABLED}.
* @throws HBaseSnapshotException if the snapshot could not be started
* @throws IOException if the snapshot could not be started or filesystem for snapshot
* temporary directory could not be determined
*/
private synchronized void snapshotDisabledTable(SnapshotDescription snapshot)
throws HBaseSnapshotException {
throws IOException {
// setup the snapshot
prepareToTakeSnapshot(snapshot);
@ -498,10 +499,11 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
/**
* Take a snapshot of an enabled table.
* @param snapshot description of the snapshot to take.
* @throws HBaseSnapshotException if the snapshot could not be started
* @throws IOException if the snapshot could not be started or filesystem for snapshot
* temporary directory could not be determined
*/
private synchronized void snapshotEnabledTable(SnapshotDescription snapshot)
throws HBaseSnapshotException {
throws IOException {
// setup the snapshot
prepareToTakeSnapshot(snapshot);
@ -520,16 +522,18 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
* @param handler the snapshot handler
*/
private synchronized void snapshotTable(SnapshotDescription snapshot,
final TakeSnapshotHandler handler) throws HBaseSnapshotException {
final TakeSnapshotHandler handler) throws IOException {
try {
handler.prepare();
this.executorService.submit(handler);
this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
} catch (Exception e) {
// cleanup the working directory by trying to delete it from the fs.
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir,
master.getConfiguration());
FileSystem workingDirFs = workingDir.getFileSystem(master.getConfiguration());
try {
if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
if (!workingDirFs.delete(workingDir, true)) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
ClientSnapshotDescriptionUtils.toString(snapshot));
}

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master.snapshot;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
@ -27,6 +28,7 @@ import java.util.concurrent.locks.ReentrantLock;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ServerName;
@ -56,6 +58,8 @@ import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
/**
@ -77,7 +81,8 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
protected final MetricsSnapshot metricsSnapshot = new MetricsSnapshot();
protected final SnapshotDescription snapshot;
protected final Configuration conf;
protected final FileSystem fs;
protected final FileSystem rootFs;
protected final FileSystem workingDirFs;
protected final Path rootDir;
private final Path snapshotDir;
protected final Path workingDir;
@ -94,31 +99,40 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
/**
* @param snapshot descriptor of the snapshot to take
* @param masterServices master services provider
* @throws IllegalArgumentException if the working snapshot directory set from the
* configuration is the same as the completed snapshot directory
* @throws IOException if the file system of the working snapshot directory cannot be
* determined
*/
public TakeSnapshotHandler(SnapshotDescription snapshot, final MasterServices masterServices,
final SnapshotManager snapshotManager) {
final SnapshotManager snapshotManager) throws IOException {
super(masterServices, EventType.C_M_SNAPSHOT_TABLE);
assert snapshot != null : "SnapshotDescription must not be nul1";
assert masterServices != null : "MasterServices must not be nul1";
this.master = masterServices;
this.conf = this.master.getConfiguration();
this.rootDir = this.master.getMasterFileSystem().getRootDir();
this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
Preconditions.checkArgument(!SnapshotDescriptionUtils.isSubDirectoryOf(workingDir, rootDir) ||
SnapshotDescriptionUtils.isWithinDefaultWorkingDir(workingDir, conf),
"The working directory " + workingDir + " cannot be in the root directory unless it is "
+ "within the default working directory");
this.snapshot = snapshot;
this.snapshotManager = snapshotManager;
this.snapshotTable = TableName.valueOf(snapshot.getTable());
this.conf = this.master.getConfiguration();
this.fs = this.master.getMasterFileSystem().getFileSystem();
this.rootDir = this.master.getMasterFileSystem().getRootDir();
this.rootFs = this.master.getMasterFileSystem().getFileSystem();
this.snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir);
this.workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
this.workingDirFs = this.workingDir.getFileSystem(this.conf);
this.monitor = new ForeignExceptionDispatcher(snapshot.getName());
this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
this.snapshotManifest = SnapshotManifest.create(conf, rootFs, workingDir, snapshot, monitor);
this.tableLock = master.getLockManager().createMasterLock(
snapshotTable, LockType.EXCLUSIVE,
this.getClass().getName() + ": take snapshot " + snapshot.getName());
// prepare the verify
this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, rootDir);
this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, workingDirFs);
// update the running tasks
this.status = TaskMonitor.get().createStatus(
"Taking " + snapshot.getType() + " snapshot on table: " + snapshotTable);
@ -166,7 +180,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
// an external exception that gets captured here.
// write down the snapshot info in the working directory
SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, fs);
SnapshotDescriptionUtils.writeSnapshotInfo(snapshot, workingDir, workingDirFs);
snapshotManifest.addTableDescriptor(this.htd);
monitor.rethrowException();
@ -202,7 +216,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
verifier.verifySnapshot(this.workingDir, serverNames);
// complete the snapshot, atomically moving from tmp to .snapshot dir.
completeSnapshot(this.snapshotDir, this.workingDir, this.fs);
completeSnapshot(this.snapshotDir, this.workingDir, this.rootFs, this.workingDirFs);
msg = "Snapshot " + snapshot.getName() + " of table " + snapshotTable + " completed";
status.markComplete(msg);
LOG.info(msg);
@ -222,7 +236,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
try {
// if the working dir is still present, the snapshot has failed. it is present we delete
// it.
if (fs.exists(workingDir) && !this.fs.delete(workingDir, true)) {
if (!workingDirFs.delete(workingDir, true)) {
LOG.error("Couldn't delete snapshot working directory:" + workingDir);
}
} catch (IOException e) {
@ -234,20 +248,35 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
}
/**
* Reset the manager to allow another snapshot to proceed
* Reset the manager to allow another snapshot to proceed.
* Commits the snapshot process by moving the working snapshot
* to the finalized filepath
*
* @param snapshotDir The file path of the completed snapshots
* @param workingDir The file path of the in progress snapshots
* @param fs The file system of the completed snapshots
* @param workingDirFs The file system of the in progress snapshots
*
* @param snapshotDir final path of the snapshot
* @param workingDir directory where the in progress snapshot was built
* @param fs {@link FileSystem} where the snapshot was built
* @throws SnapshotCreationException if the snapshot could not be moved
* @throws IOException the filesystem could not be reached
*/
public void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs)
throws SnapshotCreationException, IOException {
public void completeSnapshot(Path snapshotDir, Path workingDir, FileSystem fs,
FileSystem workingDirFs) throws SnapshotCreationException, IOException {
LOG.debug("Sentinel is done, just moving the snapshot from " + workingDir + " to "
+ snapshotDir);
if (!fs.rename(workingDir, snapshotDir)) {
throw new SnapshotCreationException("Failed to move working directory(" + workingDir
// If the working and completed snapshot directory are on the same file system, attempt
// to rename the working snapshot directory to the completed location. If that fails,
// or the file systems differ, attempt to copy the directory over, throwing an exception
// if this fails
URI workingURI = workingDirFs.getUri();
URI rootURI = fs.getUri();
if ((!workingURI.getScheme().equals(rootURI.getScheme()) ||
!workingURI.getAuthority().equals(rootURI.getAuthority()) ||
workingURI.getUserInfo() == null ||
!workingURI.getUserInfo().equals(rootURI.getUserInfo()) ||
!fs.rename(workingDir, snapshotDir)) && !FileUtil.copy(workingDirFs, workingDir, fs,
snapshotDir, true, true, this.conf)) {
throw new SnapshotCreationException("Failed to copy working directory(" + workingDir
+ ") to completed directory(" + snapshotDir + ").");
}
finished = true;

View File

@ -4228,7 +4228,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
public void addRegionToSnapshot(SnapshotDescription desc,
ForeignExceptionSnare exnSnare) throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
snapshotDir, desc, exnSnare);

View File

@ -114,6 +114,12 @@ public final class SnapshotDescriptionUtils {
/** Temporary directory under the snapshot directory to store in-progress snapshots */
public static final String SNAPSHOT_TMP_DIR_NAME = ".tmp";
/**
* The configuration property that determines the filepath of the snapshot
* base working directory
*/
public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir";
/** This tag will be created in in-progess snapshots */
public static final String SNAPSHOT_IN_PROGRESS = ".inprogress";
// snapshot operation values
@ -193,46 +199,52 @@ public final class SnapshotDescriptionUtils {
* @return the final directory for the completed snapshot
*/
public static Path getCompletedSnapshotDir(final String snapshotName, final Path rootDir) {
return getCompletedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
return getSpecifiedSnapshotDir(getSnapshotsDir(rootDir), snapshotName);
}
/**
* Get the general working directory for snapshots - where they are built, where they are
* temporarily copied on export, etc.
* @param rootDir root directory of the HBase installation
* @param conf Configuration of the HBase instance
* @return Path to the snapshot tmp directory, relative to the passed root directory
*/
public static Path getWorkingSnapshotDir(final Path rootDir) {
return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
public static Path getWorkingSnapshotDir(final Path rootDir, final Configuration conf) {
return new Path(conf.get(SNAPSHOT_WORKING_DIR,
getDefaultWorkingSnapshotDir(rootDir).toString()));
}
/**
* Get the directory to build a snapshot, before it is finalized
* @param snapshot snapshot that will be built
* @param rootDir root directory of the hbase installation
* @param conf Configuration of the HBase instance
* @return {@link Path} where one can build a snapshot
*/
public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir) {
return getCompletedSnapshotDir(getWorkingSnapshotDir(rootDir), snapshot.getName());
public static Path getWorkingSnapshotDir(SnapshotDescription snapshot, final Path rootDir,
Configuration conf) {
return getWorkingSnapshotDir(snapshot.getName(), rootDir, conf);
}
/**
* Get the directory to build a snapshot, before it is finalized
* @param snapshotName name of the snapshot
* @param rootDir root directory of the hbase installation
* @param conf Configuration of the HBase instance
* @return {@link Path} where one can build a snapshot
*/
public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir) {
return getCompletedSnapshotDir(getWorkingSnapshotDir(rootDir), snapshotName);
public static Path getWorkingSnapshotDir(String snapshotName, final Path rootDir,
Configuration conf) {
return getSpecifiedSnapshotDir(getWorkingSnapshotDir(rootDir, conf), snapshotName);
}
/**
* Get the directory to store the snapshot instance
* @param snapshotsDir hbase-global directory for storing all snapshots
* Get the directory within the given filepath to store the snapshot instance
* @param snapshotsDir directory to store snapshot directory within
* @param snapshotName name of the snapshot to take
* @return the final directory for the completed snapshot
* @return the final directory for the snapshot in the given filepath
*/
private static final Path getCompletedSnapshotDir(final Path snapshotsDir, String snapshotName) {
private static final Path getSpecifiedSnapshotDir(final Path snapshotsDir, String snapshotName) {
return new Path(snapshotsDir, snapshotName);
}
@ -244,6 +256,39 @@ public final class SnapshotDescriptionUtils {
return new Path(rootDir, HConstants.SNAPSHOT_DIR_NAME);
}
/**
* Determines if the given workingDir is a subdirectory of the given "root directory"
* @param workingDir a directory to check
* @param rootDir root directory of the HBase installation
* @return true if the given workingDir is a subdirectory of the given root directory,
* false otherwise
*/
public static boolean isSubDirectoryOf(final Path workingDir, final Path rootDir) {
return workingDir.toString().startsWith(rootDir.toString() + Path.SEPARATOR);
}
/**
* Determines if the given workingDir is a subdirectory of the default working snapshot directory
* @param workingDir a directory to check
* @param conf configuration for the HBase cluster
* @return true if the given workingDir is a subdirectory of the default working directory for
* snapshots, false otherwise
*/
public static boolean isWithinDefaultWorkingDir(final Path workingDir, Configuration conf) {
Path defaultWorkingDir = getDefaultWorkingSnapshotDir(new Path(conf.get(HConstants.HBASE_DIR)));
return workingDir.equals(defaultWorkingDir) || isSubDirectoryOf(workingDir, defaultWorkingDir);
}
/**
* Get the default working directory for snapshots - where they are built, where they are
* temporarily copied on export, etc.
* @param rootDir root directory of the HBase installation
* @return Path to the default snapshot tmp directory, relative to the passed root directory
*/
private static Path getDefaultWorkingSnapshotDir(final Path rootDir) {
return new Path(getSnapshotsDir(rootDir), SNAPSHOT_TMP_DIR_NAME);
}
/**
* Convert the passed snapshot description into a 'full' snapshot description based on default
* parameters, if none have been supplied. This resolves any 'optional' parameters that aren't

View File

@ -81,18 +81,29 @@ public final class SnapshotManifest {
private final ForeignExceptionSnare monitor;
private final Configuration conf;
private final Path workingDir;
private final FileSystem fs;
private final FileSystem rootFs;
private final FileSystem workingDirFs;
private int manifestSizeLimit;
private SnapshotManifest(final Configuration conf, final FileSystem fs,
/**
*
* @param conf configuration file for HBase setup
* @param rootFs root filesystem containing HFiles
* @param workingDir file path of where the manifest should be located
* @param desc description of snapshot being taken
* @param monitor monitor of foreign exceptions
* @throws IOException if the working directory file system cannot be
* determined from the config file
*/
private SnapshotManifest(final Configuration conf, final FileSystem rootFs,
final Path workingDir, final SnapshotDescription desc,
final ForeignExceptionSnare monitor) {
final ForeignExceptionSnare monitor) throws IOException {
this.monitor = monitor;
this.desc = desc;
this.workingDir = workingDir;
this.conf = conf;
this.fs = fs;
this.rootFs = rootFs;
this.workingDirFs = this.workingDir.getFileSystem(this.conf);
this.manifestSizeLimit = conf.getInt(SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 64 * 1024 * 1024);
}
@ -111,7 +122,7 @@ public final class SnapshotManifest {
*/
public static SnapshotManifest create(final Configuration conf, final FileSystem fs,
final Path workingDir, final SnapshotDescription desc,
final ForeignExceptionSnare monitor) {
final ForeignExceptionSnare monitor) throws IOException {
return new SnapshotManifest(conf, fs, workingDir, desc, monitor);
}
@ -154,9 +165,9 @@ public final class SnapshotManifest {
private RegionVisitor createRegionVisitor(final SnapshotDescription desc) throws IOException {
switch (getSnapshotFormat(desc)) {
case SnapshotManifestV1.DESCRIPTOR_VERSION:
return new SnapshotManifestV1.ManifestBuilder(conf, fs, workingDir);
return new SnapshotManifestV1.ManifestBuilder(conf, rootFs, workingDir);
case SnapshotManifestV2.DESCRIPTOR_VERSION:
return new SnapshotManifestV2.ManifestBuilder(conf, fs, workingDir);
return new SnapshotManifestV2.ManifestBuilder(conf, rootFs, workingDir);
default:
throw new CorruptedSnapshotException("Invalid Snapshot version: " + desc.getVersion(),
ProtobufUtil.createSnapshotDesc(desc));
@ -275,7 +286,7 @@ public final class SnapshotManifest {
if (isMobRegion) {
baseDir = FSUtils.getTableDir(MobUtils.getMobHome(conf), regionInfo.getTable());
}
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs,
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, rootFs,
baseDir, regionInfo, true);
monitor.rethrowException();
@ -323,12 +334,12 @@ public final class SnapshotManifest {
}
private List<StoreFileInfo> getStoreFiles(Path storeDir) throws IOException {
FileStatus[] stats = FSUtils.listStatus(fs, storeDir);
FileStatus[] stats = FSUtils.listStatus(rootFs, storeDir);
if (stats == null) return null;
ArrayList<StoreFileInfo> storeFiles = new ArrayList<>(stats.length);
for (int i = 0; i < stats.length; ++i) {
storeFiles.add(new StoreFileInfo(conf, fs, stats[i]));
storeFiles.add(new StoreFileInfo(conf, rootFs, stats[i]));
}
return storeFiles;
}
@ -364,11 +375,11 @@ public final class SnapshotManifest {
private void load() throws IOException {
switch (getSnapshotFormat(desc)) {
case SnapshotManifestV1.DESCRIPTOR_VERSION: {
this.htd = FSTableDescriptors.getTableDescriptorFromFs(fs, workingDir);
this.htd = FSTableDescriptors.getTableDescriptorFromFs(workingDirFs, workingDir);
ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
try {
this.regionManifests =
SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs, workingDir, desc);
} finally {
tpool.shutdown();
}
@ -385,9 +396,10 @@ public final class SnapshotManifest {
List<SnapshotRegionManifest> v1Regions, v2Regions;
ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
try {
v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc,
manifestSizeLimit);
v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, rootFs,
workingDir, desc);
v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, rootFs,
workingDir, desc, manifestSizeLimit);
} catch (InvalidProtocolBufferException e) {
throw new CorruptedSnapshotException("unable to parse region manifest " +
e.getMessage(), e);
@ -460,7 +472,7 @@ public final class SnapshotManifest {
Path rootDir = FSUtils.getRootDir(conf);
LOG.info("Using old Snapshot Format");
// write a copy of descriptor to the snapshot directory
new FSTableDescriptors(conf, fs, rootDir)
new FSTableDescriptors(conf, workingDirFs, rootDir)
.createTableDescriptorForTableDirectory(workingDir, htd, false);
} else {
LOG.debug("Convert to Single Snapshot Manifest");
@ -477,9 +489,10 @@ public final class SnapshotManifest {
List<SnapshotRegionManifest> v1Regions, v2Regions;
ThreadPoolExecutor tpool = createExecutor("SnapshotManifestLoader");
try {
v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, fs, workingDir, desc);
v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, fs, workingDir, desc,
manifestSizeLimit);
v1Regions = SnapshotManifestV1.loadRegionManifests(conf, tpool, workingDirFs,
workingDir, desc);
v2Regions = SnapshotManifestV2.loadRegionManifests(conf, tpool, workingDirFs,
workingDir, desc, manifestSizeLimit);
} finally {
tpool.shutdown();
}
@ -509,12 +522,12 @@ public final class SnapshotManifest {
// them we will get the same information.
if (v1Regions != null && v1Regions.size() > 0) {
for (SnapshotRegionManifest regionManifest: v1Regions) {
SnapshotManifestV1.deleteRegionManifest(fs, workingDir, regionManifest);
SnapshotManifestV1.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
}
}
if (v2Regions != null && v2Regions.size() > 0) {
for (SnapshotRegionManifest regionManifest: v2Regions) {
SnapshotManifestV2.deleteRegionManifest(fs, workingDir, regionManifest);
SnapshotManifestV2.deleteRegionManifest(workingDirFs, workingDir, regionManifest);
}
}
}
@ -524,7 +537,7 @@ public final class SnapshotManifest {
*/
private void writeDataManifest(final SnapshotDataManifest manifest)
throws IOException {
FSDataOutputStream stream = fs.create(new Path(workingDir, DATA_MANIFEST_NAME));
FSDataOutputStream stream = workingDirFs.create(new Path(workingDir, DATA_MANIFEST_NAME));
try {
manifest.writeTo(stream);
} finally {
@ -538,7 +551,7 @@ public final class SnapshotManifest {
private SnapshotDataManifest readDataManifest() throws IOException {
FSDataInputStream in = null;
try {
in = fs.open(new Path(workingDir, DATA_MANIFEST_NAME));
in = workingDirFs.open(new Path(workingDir, DATA_MANIFEST_NAME));
CodedInputStream cin = CodedInputStream.newInstance(in);
cin.setSizeLimit(manifestSizeLimit);
return SnapshotDataManifest.parseFrom(cin);

View File

@ -67,18 +67,21 @@ public final class SnapshotManifestV1 {
HRegionFileSystem, Path> {
private final Configuration conf;
private final Path snapshotDir;
private final FileSystem fs;
private final FileSystem rootFs;
private final FileSystem workingDirFs;
public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
public ManifestBuilder(final Configuration conf, final FileSystem rootFs,
final Path snapshotDir) throws IOException {
this.snapshotDir = snapshotDir;
this.conf = conf;
this.fs = fs;
this.rootFs = rootFs;
this.workingDirFs = snapshotDir.getFileSystem(conf);
}
@Override
public HRegionFileSystem regionOpen(final RegionInfo regionInfo) throws IOException {
HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
fs, snapshotDir, regionInfo);
workingDirFs, snapshotDir, regionInfo);
return snapshotRegionFs;
}
@ -103,13 +106,13 @@ public final class SnapshotManifestV1 {
boolean success = true;
if (storeFile.isReference()) {
// write the Reference object to the snapshot
storeFile.getReference().write(fs, referenceFile);
storeFile.getReference().write(workingDirFs, referenceFile);
} else {
// create "reference" to this store file. It is intentionally an empty file -- all
// necessary information is captured by its fs location and filename. This allows us to
// only figure out what needs to be done via a single nn operation (instead of having to
// open and read the files as well).
success = fs.createNewFile(referenceFile);
success = workingDirFs.createNewFile(referenceFile);
}
if (!success) {
throw new IOException("Failed to create reference file:" + referenceFile);

View File

@ -69,12 +69,13 @@ public final class SnapshotManifestV2 {
SnapshotRegionManifest.Builder, SnapshotRegionManifest.FamilyFiles.Builder> {
private final Configuration conf;
private final Path snapshotDir;
private final FileSystem fs;
private final FileSystem rootFs;
public ManifestBuilder(final Configuration conf, final FileSystem fs, final Path snapshotDir) {
public ManifestBuilder(final Configuration conf, final FileSystem rootFs,
final Path snapshotDir) {
this.snapshotDir = snapshotDir;
this.conf = conf;
this.fs = fs;
this.rootFs = rootFs;
}
@Override
@ -88,9 +89,11 @@ public final class SnapshotManifestV2 {
public void regionClose(final SnapshotRegionManifest.Builder region) throws IOException {
// we should ensure the snapshot dir exist, maybe it has been deleted by master
// see HBASE-16464
if (fs.exists(snapshotDir)) {
FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf);
if (workingDirFs.exists(snapshotDir)) {
SnapshotRegionManifest manifest = region.build();
FSDataOutputStream stream = fs.create(getRegionManifestPath(snapshotDir, manifest));
FSDataOutputStream stream = workingDirFs.create(
getRegionManifestPath(snapshotDir, manifest));
try {
manifest.writeTo(stream);
} finally {
@ -126,7 +129,7 @@ public final class SnapshotManifestV2 {
if (storeFile.isReference()) {
sfManifest.setReference(storeFile.getReference().convert());
}
sfManifest.setFileSize(storeFile.getReferencedFileStatus(fs).getLen());
sfManifest.setFileSize(storeFile.getReferencedFileStatus(rootFs).getLen());
family.addStoreFiles(sfManifest.build());
}
}

View File

@ -0,0 +1,79 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import java.io.IOException;
import java.util.UUID;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
/**
* This class tests that the use of a temporary snapshot directory supports snapshot functionality
* while the temporary directory is on the same file system as the root directory
* <p>
* This is an end-to-end test for the snapshot utility
*/
@Category(LargeTests.class)
public class TestSnapshotDFSTemporaryDirectory
extends TestSnapshotTemporaryDirectory {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSnapshotDFSTemporaryDirectory.class);
/**
* Setup the config for the cluster
*
* @throws Exception on failure
*/
@BeforeClass public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_RS);
admin = UTIL.getHBaseAdmin();
}
private static void setupConf(Configuration conf) throws IOException {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, UTIL.getDefaultRootDirPath().toString()
+ Path.SEPARATOR + UUID.randomUUID().toString() + Path.SEPARATOR + ".tmpdir"
+ Path.SEPARATOR);
}
}

View File

@ -0,0 +1,471 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.UUID;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV1;
import org.apache.hadoop.hbase.snapshot.SnapshotManifestV2;
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
/**
* This class tests that the use of a temporary snapshot directory supports snapshot functionality
* while the temporary directory is on a different file system than the root directory
* <p>
* This is an end-to-end test for the snapshot utility
*/
@Category(LargeTests.class)
@RunWith(Parameterized.class)
public class TestSnapshotTemporaryDirectory {
@ClassRule public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSnapshotTemporaryDirectory.class);
@Parameterized.Parameters public static Iterable<Integer> data() {
return Arrays
.asList(SnapshotManifestV1.DESCRIPTOR_VERSION, SnapshotManifestV2.DESCRIPTOR_VERSION);
}
@Parameterized.Parameter public int manifestVersion;
private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotTemporaryDirectory.class);
protected static final int NUM_RS = 2;
protected static String TEMP_DIR =
Paths.get("").toAbsolutePath().toString() + Path.SEPARATOR + UUID.randomUUID().toString();
protected static Admin admin;
protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
protected static final String STRING_TABLE_NAME = "test";
protected static final byte[] TEST_FAM = Bytes.toBytes("fam");
protected static final TableName TABLE_NAME = TableName.valueOf(STRING_TABLE_NAME);
/**
* Setup the config for the cluster
*
* @throws Exception on failure
*/
@BeforeClass public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_RS);
admin = UTIL.getHBaseAdmin();
}
private static void setupConf(Configuration conf) {
// disable the ui
conf.setInt("hbase.regionsever.info.port", -1);
// change the flush size to a small amount, regulating number of store files
conf.setInt("hbase.hregion.memstore.flush.size", 25000);
// so make sure we get a compaction when doing a load, but keep around some
// files in the store
conf.setInt("hbase.hstore.compaction.min", 10);
conf.setInt("hbase.hstore.compactionThreshold", 10);
// block writes if we get to 12 store files
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
// Enable snapshot
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
ConstantSizeRegionSplitPolicy.class.getName());
conf.set(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR, "file://" + TEMP_DIR + "/.tmpdir/");
}
@Before public void setup() throws Exception {
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
htd.setRegionReplication(getNumReplicas());
UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
}
protected int getNumReplicas() {
return 1;
}
@After public void tearDown() throws Exception {
UTIL.deleteTable(TABLE_NAME);
SnapshotTestingUtils.deleteAllSnapshots(UTIL.getHBaseAdmin());
SnapshotTestingUtils.deleteArchiveDirectory(UTIL);
}
@AfterClass public static void cleanupTest() {
try {
UTIL.shutdownMiniCluster();
FileUtils.deleteDirectory(new File(TEMP_DIR));
} catch (Exception e) {
LOG.warn("failure shutting down cluster", e);
}
}
@Test(timeout = 180000) public void testRestoreDisabledSnapshot()
throws IOException, InterruptedException {
long tid = System.currentTimeMillis();
TableName tableName = TableName.valueOf("testtb-" + tid);
byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
int snapshot0Rows;
int snapshot1Rows;
// create Table and disable it
SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
admin.disableTable(tableName);
// take an empty snapshot
takeSnapshot(tableName, Bytes.toString(emptySnapshot), true);
// enable table and insert data
admin.enableTable(tableName);
SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
try (Table table = UTIL.getConnection().getTable(tableName)) {
snapshot0Rows = UTIL.countRows(table);
}
admin.disableTable(tableName);
// take a snapshot
takeSnapshot(tableName, Bytes.toString(snapshotName0), true);
// enable table and insert more data
admin.enableTable(tableName);
SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
try (Table table = UTIL.getConnection().getTable(tableName)) {
snapshot1Rows = UTIL.countRows(table);
}
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
admin.disableTable(tableName);
takeSnapshot(tableName, Bytes.toString(snapshotName1), true);
// Restore from snapshot-0
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from emptySnapshot
admin.disableTable(tableName);
admin.restoreSnapshot(emptySnapshot);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName1);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
UTIL.deleteTable(tableName);
admin.restoreSnapshot(snapshotName1);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
}
@Test(timeout = 180000) public void testRestoreEnabledSnapshot()
throws IOException, InterruptedException {
long tid = System.currentTimeMillis();
TableName tableName = TableName.valueOf("testtb-" + tid);
byte[] emptySnapshot = Bytes.toBytes("emptySnaptb-" + tid);
byte[] snapshotName0 = Bytes.toBytes("snaptb0-" + tid);
byte[] snapshotName1 = Bytes.toBytes("snaptb1-" + tid);
int snapshot0Rows;
int snapshot1Rows;
// create Table
SnapshotTestingUtils.createTable(UTIL, tableName, getNumReplicas(), TEST_FAM);
// take an empty snapshot
takeSnapshot(tableName, Bytes.toString(emptySnapshot), false);
// Insert data
SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
try (Table table = UTIL.getConnection().getTable(tableName)) {
snapshot0Rows = UTIL.countRows(table);
}
// take a snapshot
takeSnapshot(tableName, Bytes.toString(snapshotName0), false);
// Insert more data
SnapshotTestingUtils.loadData(UTIL, tableName, 500, TEST_FAM);
try (Table table = UTIL.getConnection().getTable(tableName)) {
snapshot1Rows = UTIL.countRows(table);
}
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
takeSnapshot(tableName, Bytes.toString(snapshotName1), false);
// Restore from snapshot-0
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName0);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot0Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from emptySnapshot
admin.disableTable(tableName);
admin.restoreSnapshot(emptySnapshot);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, 0);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
admin.disableTable(tableName);
admin.restoreSnapshot(snapshotName1);
admin.enableTable(tableName);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
// Restore from snapshot-1
UTIL.deleteTable(tableName);
admin.restoreSnapshot(snapshotName1);
SnapshotTestingUtils.verifyRowCount(UTIL, tableName, snapshot1Rows);
SnapshotTestingUtils.verifyReplicasCameOnline(tableName, admin, getNumReplicas());
}
/**
* Test snapshotting a table that is offline
*
* @throws Exception if snapshot does not complete successfully
*/
@Test(timeout = 300000) public void testOfflineTableSnapshot() throws Exception {
Admin admin = UTIL.getHBaseAdmin();
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
Table table = UTIL.getConnection().getTable(TABLE_NAME);
UTIL.loadTable(table, TEST_FAM, false);
LOG.debug("FS state before disable:");
FSUtils
.logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
LOG);
// XXX if this is flakey, might want to consider using the async version and looping as
// disableTable can succeed and still timeout.
admin.disableTable(TABLE_NAME);
LOG.debug("FS state before snapshot:");
FSUtils
.logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
LOG);
// take a snapshot of the disabled table
final String SNAPSHOT_NAME = "offlineTableSnapshot";
byte[] snapshot = Bytes.toBytes(SNAPSHOT_NAME);
takeSnapshot(TABLE_NAME, SNAPSHOT_NAME, true);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots =
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils
.logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
LOG);
SnapshotTestingUtils
.confirmSnapshotValid(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)),
TABLE_NAME, TEST_FAM, rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
SnapshotTestingUtils.assertNoSnapshots(admin);
}
/**
* Tests that snapshot has correct contents by taking snapshot, cloning it, then affirming
* the contents of the original and cloned table match
*
* @throws Exception if snapshot does not complete successfully
*/
@Test(timeout = 180000) public void testSnapshotCloneContents() throws Exception {
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
// put some stuff in the table
Table table = UTIL.getConnection().getTable(TABLE_NAME);
UTIL.loadTable(table, TEST_FAM);
table.close();
String snapshot1 = "TableSnapshot1";
takeSnapshot(TABLE_NAME, snapshot1, false);
LOG.debug("Snapshot1 completed.");
TableName clone = TableName.valueOf("Table1Clone");
admin.cloneSnapshot(snapshot1, clone, false);
Scan original = new Scan();
Scan cloned = new Scan();
ResultScanner originalScan = admin.getConnection().getTable(TABLE_NAME).getScanner(original);
ResultScanner clonedScan =
admin.getConnection().getTable(TableName.valueOf("Table1Clone")).getScanner(cloned);
Iterator<Result> i = originalScan.iterator();
Iterator<Result> i2 = clonedScan.iterator();
assertTrue(i.hasNext());
while (i.hasNext()) {
assertTrue(i2.hasNext());
assertEquals(Bytes.toString(i.next().getValue(TEST_FAM, new byte[] {})),
Bytes.toString(i2.next().getValue(TEST_FAM, new byte[] {})));
}
assertFalse(i2.hasNext());
admin.deleteSnapshot(snapshot1);
UTIL.deleteTable(clone);
admin.close();
}
@Test(timeout = 180000) public void testOfflineTableSnapshotWithEmptyRegion() throws Exception {
// test with an empty table with one region
// make sure we don't fail on listing snapshots
SnapshotTestingUtils.assertNoSnapshots(admin);
LOG.debug("FS state before disable:");
FSUtils
.logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
LOG);
admin.disableTable(TABLE_NAME);
LOG.debug("FS state before snapshot:");
FSUtils
.logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
LOG);
// take a snapshot of the disabled table
byte[] snapshot = Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegion");
takeSnapshot(TABLE_NAME, Bytes.toString(snapshot), true);
LOG.debug("Snapshot completed.");
// make sure we have the snapshot
List<SnapshotDescription> snapshots =
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot, TABLE_NAME);
// make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:");
FSUtils
.logFileSystemState(UTIL.getTestFileSystem(), FSUtils.getRootDir(UTIL.getConfiguration()),
LOG);
List<byte[]> emptyCfs = Lists.newArrayList(TEST_FAM); // no file in the region
List<byte[]> nonEmptyCfs = Lists.newArrayList();
SnapshotTestingUtils
.confirmSnapshotValid(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshots.get(0)),
TABLE_NAME, nonEmptyCfs, emptyCfs, rootDir, admin, fs);
admin.deleteSnapshot(snapshot);
SnapshotTestingUtils.assertNoSnapshots(admin);
}
// Ensures that the snapshot is transferred to the proper completed snapshot directory
@Test(timeout = 180000) public void testEnsureTemporaryDirectoryTransfer() throws Exception {
Admin admin = null;
TableName tableName2 = TableName.valueOf("testListTableSnapshots");
try {
admin = UTIL.getHBaseAdmin();
HTableDescriptor htd = new HTableDescriptor(tableName2);
UTIL.createTable(htd, new byte[][] { TEST_FAM }, UTIL.getConfiguration());
String table1Snapshot1 = "Table1Snapshot1";
takeSnapshot(TABLE_NAME, table1Snapshot1, false);
LOG.debug("Snapshot1 completed.");
String table1Snapshot2 = "Table1Snapshot2";
takeSnapshot(TABLE_NAME, table1Snapshot2, false);
LOG.debug("Snapshot2 completed.");
String table2Snapshot1 = "Table2Snapshot1";
takeSnapshot(TABLE_NAME, table2Snapshot1, false);
LOG.debug("Table2Snapshot1 completed.");
List<SnapshotDescription> listTableSnapshots = admin.listTableSnapshots("test.*", ".*");
List<String> listTableSnapshotNames = new ArrayList<String>();
assertEquals(3, listTableSnapshots.size());
for (SnapshotDescription s : listTableSnapshots) {
listTableSnapshotNames.add(s.getName());
}
assertTrue(listTableSnapshotNames.contains(table1Snapshot1));
assertTrue(listTableSnapshotNames.contains(table1Snapshot2));
assertTrue(listTableSnapshotNames.contains(table2Snapshot1));
} finally {
if (admin != null) {
try {
admin.deleteSnapshots("Table.*");
} catch (SnapshotDoesNotExistException ignore) {
}
if (admin.tableExists(tableName2)) {
UTIL.deleteTable(tableName2);
}
admin.close();
}
}
}
private void takeSnapshot(TableName tableName, String snapshotName, boolean disabled)
throws IOException {
SnapshotType type = disabled ? SnapshotType.DISABLED : SnapshotType.FLUSH;
SnapshotDescription desc =
new SnapshotDescription(snapshotName, tableName.getNameAsString(), type, null, -1,
manifestVersion);
admin.snapshot(desc);
}
}

View File

@ -0,0 +1,37 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.junit.ClassRule;
import org.junit.experimental.categories.Category;
@Category(LargeTests.class)
public class TestSnapshotTemporaryDirectoryWithRegionReplicas
extends TestSnapshotTemporaryDirectory {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestSnapshotTemporaryDirectoryWithRegionReplicas.class);
@Override
protected int getNumReplicas() {
return 3;
}
}

View File

@ -149,7 +149,8 @@ public class TestSnapshotHFileCleaner {
} catch (CorruptedSnapshotException cse) {
LOG.info("Expected exception " + cse);
} finally {
fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
TEST_UTIL.getConfiguration()), true);
}
}
@ -176,7 +177,8 @@ public class TestSnapshotHFileCleaner {
} catch (CorruptedSnapshotException cse) {
LOG.info("Expected exception " + cse);
} finally {
fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir), true);
fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
TEST_UTIL.getConfiguration()), true);
}
}

View File

@ -507,7 +507,7 @@ public final class SnapshotTestingUtils {
this.htd = htd;
this.desc = desc;
this.tableRegions = tableRegions;
this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
this.snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
new FSTableDescriptors(conf)
.createTableDescriptorForTableDirectory(snapshotDir, htd, false);
}
@ -687,7 +687,7 @@ public final class SnapshotTestingUtils {
.setVersion(version)
.build();
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir, conf);
SnapshotDescriptionUtils.writeSnapshotInfo(desc, workingDir, fs);
return new SnapshotBuilder(conf, fs, rootDir, htd, desc, regions);
}

View File

@ -124,7 +124,7 @@ public class TestRegionSnapshotTask {
final HRegion region = spy(hRegions.get(0));
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
final SnapshotManifest manifest =
SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
manifest.addTableDescriptor(table.getTableDescriptor());
@ -166,7 +166,7 @@ public class TestRegionSnapshotTask {
private void addRegionToSnapshot(SnapshotProtos.SnapshotDescription snapshot,
HRegion region, SnapshotManifest manifest) throws Exception {
LOG.info("Adding region to snapshot: " + region.getRegionInfo().getRegionNameAsString());
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir, conf);
SnapshotManifest.RegionVisitor visitor = createRegionVisitorWithDelay(snapshot, workingDir);
manifest.addRegion(region, visitor);
LOG.info("Added the region to snapshot: " + region.getRegionInfo().getRegionNameAsString());

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hbase.snapshot;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
@ -107,4 +108,58 @@ public class TestSnapshotDescriptionUtils {
LOG.info("Correctly failed to move non-existant directory: " + e.getMessage());
}
}
@Test
public void testIsSubDirectoryWorks() {
Path rootDir = new Path("hdfs://root/.hbase-snapshot/");
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(rootDir, rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("hdfs://root/.hbase-snapshotdir"), rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("hdfs://root/.hbase-snapshot"), rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("hdfs://.hbase-snapshot"), rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("hdfs://.hbase-snapshot/.tmp"), rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(new Path("hdfs://root"), rootDir));
assertTrue(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("hdfs://root/.hbase-snapshot/.tmp"), rootDir));
assertTrue(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("hdfs://root/.hbase-snapshot/.tmp/snapshot"), rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("s3://root/.hbase-snapshot/"), rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(new Path("s3://root"), rootDir));
assertFalse(SnapshotDescriptionUtils.isSubDirectoryOf(
new Path("s3://root/.hbase-snapshot/.tmp/snapshot"), rootDir));
}
@Test
public void testIsWithinWorkingDir() {
Configuration conf = new Configuration();
conf.set(HConstants.HBASE_DIR, "hdfs://root/");
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("hdfs://root/"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("hdfs://root/.hbase-snapshotdir"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("hdfs://root/.hbase-snapshot"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("hdfs://.hbase-snapshot"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("hdfs://.hbase-snapshot/.tmp"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("hdfs://root"), conf));
assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("hdfs://root/.hbase-snapshot/.tmp"), conf));
assertTrue(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("hdfs://root/.hbase-snapshot/.tmp/snapshot"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("s3://root/.hbase-snapshot/"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(new Path("s3://root"), conf));
assertFalse(SnapshotDescriptionUtils.isWithinDefaultWorkingDir(
new Path("s3://root/.hbase-snapshot/.tmp/snapshot"), conf));
}
}