HBASE-7294 Check for snapshot file cleaners on start (Matteo Bertozzi)
git-svn-id: https://svn.apache.org/repos/asf/hbase/branches/hbase-7290@1445826 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9bc5d78e3a
commit
55a44243a9
|
@ -2439,6 +2439,12 @@ Server {
|
|||
@Override
|
||||
public TakeSnapshotResponse snapshot(RpcController controller, TakeSnapshotRequest request)
|
||||
throws ServiceException {
|
||||
try {
|
||||
this.snapshotManager.checkSnapshotSupport();
|
||||
} catch (UnsupportedOperationException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
|
||||
LOG.debug("Starting snapshot for:" + request);
|
||||
// get the snapshot information
|
||||
SnapshotDescription snapshot = SnapshotDescriptionUtils.validate(request.getSnapshot(),
|
||||
|
@ -2485,6 +2491,12 @@ Server {
|
|||
@Override
|
||||
public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
|
||||
DeleteSnapshotRequest request) throws ServiceException {
|
||||
try {
|
||||
this.snapshotManager.checkSnapshotSupport();
|
||||
} catch (UnsupportedOperationException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
snapshotManager.deleteSnapshot(request.getSnapshot());
|
||||
return DeleteSnapshotResponse.newBuilder().build();
|
||||
|
@ -2530,6 +2542,12 @@ Server {
|
|||
@Override
|
||||
public RestoreSnapshotResponse restoreSnapshot(RpcController controller,
|
||||
RestoreSnapshotRequest request) throws ServiceException {
|
||||
try {
|
||||
this.snapshotManager.checkSnapshotSupport();
|
||||
} catch (UnsupportedOperationException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
|
||||
try {
|
||||
SnapshotDescription reqSnapshot = request.getSnapshot();
|
||||
snapshotManager.restoreSnapshot(reqSnapshot);
|
||||
|
|
|
@ -20,10 +20,13 @@ package org.apache.hadoop.hbase.master.snapshot;
|
|||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -34,6 +37,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.Stoppable;
|
||||
import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||
|
@ -44,6 +48,10 @@ import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
|
|||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.SnapshotSentinel;
|
||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
||||
import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription.Type;
|
||||
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
|
||||
|
@ -57,6 +65,7 @@ import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException;
|
|||
import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
/**
|
||||
* This class manages the procedure of taking and restoring snapshots. There is only one
|
||||
|
@ -75,6 +84,9 @@ public class SnapshotManager implements Stoppable {
|
|||
/** By default, check to see if the snapshot is complete every WAKE MILLIS (ms) */
|
||||
public static final int SNAPSHOT_WAKE_MILLIS_DEFAULT = 500;
|
||||
|
||||
/** Enable or disable snapshot support */
|
||||
public static final String HBASE_SNAPSHOT_ENABLED = "hbase.snapshot.enabled";
|
||||
|
||||
/**
|
||||
* Conf key for # of ms elapsed between checks for snapshot errors while waiting for
|
||||
* completion.
|
||||
|
@ -102,6 +114,9 @@ public class SnapshotManager implements Stoppable {
|
|||
private final long wakeFrequency;
|
||||
private final MasterServices master; // Needed by TableEventHandlers
|
||||
|
||||
// Is snapshot feature enabled?
|
||||
private boolean isSnapshotSupported = false;
|
||||
|
||||
// A reference to a handler. If the handler is non-null, then it is assumed that a snapshot is
|
||||
// in progress currently
|
||||
// TODO: this is a bad smell; likely replace with a collection in the future. Also this gets
|
||||
|
@ -119,7 +134,7 @@ public class SnapshotManager implements Stoppable {
|
|||
* @param master
|
||||
* @param comms
|
||||
*/
|
||||
public SnapshotManager(final MasterServices master) throws IOException {
|
||||
public SnapshotManager(final MasterServices master) throws IOException, UnsupportedOperationException {
|
||||
this.master = master;
|
||||
|
||||
// get the configuration for the coordinator
|
||||
|
@ -127,6 +142,8 @@ public class SnapshotManager implements Stoppable {
|
|||
this.wakeFrequency = conf.getInt(SNAPSHOT_WAKE_MILLIS_KEY, SNAPSHOT_WAKE_MILLIS_DEFAULT);
|
||||
this.rootDir = master.getMasterFileSystem().getRootDir();
|
||||
this.executorService = master.getExecutorService();
|
||||
|
||||
checkSnapshotSupport(master.getConfiguration(), master.getMasterFileSystem());
|
||||
resetTempDir();
|
||||
}
|
||||
|
||||
|
@ -716,4 +733,91 @@ public class SnapshotManager implements Stoppable {
|
|||
public boolean isStopped() {
|
||||
return this.stopped;
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an exception if snapshot operations (take a snapshot, restore, clone) are not supported.
|
||||
* Called at the beginning of snapshot() and restoreSnapshot() methods.
|
||||
* @throws UnsupportedOperationException if snapshot are not supported
|
||||
*/
|
||||
public void checkSnapshotSupport() throws UnsupportedOperationException {
|
||||
if (!this.isSnapshotSupported) {
|
||||
throw new UnsupportedOperationException(
|
||||
"To use snapshots, You must add to the hbase-site.xml of the HBase Master: '" +
|
||||
HBASE_SNAPSHOT_ENABLED + "' property with value 'true'.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Called at startup, to verify if snapshot operation is supported, and to avoid
|
||||
* starting the master if there're snapshots present but the cleaners needed are missing.
|
||||
* Otherwise we can end up with snapshot data loss.
|
||||
* @param conf The {@link Configuration} object to use
|
||||
* @param mfs The MasterFileSystem to use
|
||||
* @throws IOException in case of file-system operation failure
|
||||
* @throws UnsupportedOperationException in case cleaners are missing and
|
||||
* there're snapshot in the system
|
||||
*/
|
||||
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
|
||||
throws IOException, UnsupportedOperationException {
|
||||
// Verify if snapshot are disabled by the user
|
||||
String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
|
||||
boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
|
||||
boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);
|
||||
|
||||
// Extract cleaners from conf
|
||||
Set<String> hfileCleaners = new HashSet<String>();
|
||||
String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
|
||||
if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
|
||||
|
||||
Set<String> logCleaners = new HashSet<String>();
|
||||
cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
|
||||
if (cleaners != null) Collections.addAll(logCleaners, cleaners);
|
||||
|
||||
// If the user has enabled the snapshot, we force the cleaners to be present
|
||||
// otherwise we still need to check if cleaners are enabled or not and verify
|
||||
// that there're no snapshot in the .snapshot folder.
|
||||
if (snapshotEnabled) {
|
||||
// Inject snapshot cleaners, if snapshot.enable is true
|
||||
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
|
||||
hfileCleaners.add(HFileLinkCleaner.class.getName());
|
||||
logCleaners.add(SnapshotLogCleaner.class.getName());
|
||||
|
||||
// Set cleaners conf
|
||||
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
||||
hfileCleaners.toArray(new String[hfileCleaners.size()]));
|
||||
conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
|
||||
logCleaners.toArray(new String[logCleaners.size()]));
|
||||
} else {
|
||||
// Verify if cleaners are present
|
||||
snapshotEnabled = logCleaners.contains(SnapshotLogCleaner.class.getName()) &&
|
||||
hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
|
||||
hfileCleaners.contains(HFileLinkCleaner.class.getName());
|
||||
|
||||
// Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
|
||||
if (snapshotEnabled) {
|
||||
LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " +
|
||||
"but the '" + HBASE_SNAPSHOT_ENABLED + "' property " +
|
||||
(userDisabled ? "is set to 'false'." : "is not set."));
|
||||
}
|
||||
}
|
||||
|
||||
// Mark snapshot feature as enabled if cleaners are present and user as not disabled it.
|
||||
this.isSnapshotSupported = snapshotEnabled && !userDisabled;
|
||||
|
||||
// If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
|
||||
// otherwise we end up with snapshot data loss.
|
||||
if (!snapshotEnabled) {
|
||||
LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
|
||||
FileSystem fs = mfs.getFileSystem();
|
||||
if (fs.exists(snapshotDir)) {
|
||||
FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir,
|
||||
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
|
||||
if (snapshots != null) {
|
||||
LOG.error("Snapshots are present, but cleaners are not enabled.");
|
||||
checkSnapshotSupport();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
|
@ -71,9 +72,7 @@ public class TestRestoreSnapshotFromClient {
|
|||
|
||||
@BeforeClass
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
TEST_UTIL.getConfiguration().set("hbase.master.hfilecleaner.plugins",
|
||||
"org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner," +
|
||||
"org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner");
|
||||
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
|
|||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.LargeTests;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
|
||||
|
@ -83,6 +84,8 @@ public class TestSnapshotFromClient {
|
|||
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
|
||||
// drop the number of attempts for the hbase admin
|
||||
conf.setInt("hbase.client.retries.number", 1);
|
||||
// Enable snapshot
|
||||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
}
|
||||
|
||||
@Before
|
||||
|
|
|
@ -34,12 +34,14 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.DeleteSnapshotRequest;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.IsSnapshotDoneRequest;
|
||||
|
@ -94,10 +96,10 @@ public class TestSnapshotFromMaster {
|
|||
setupConf(UTIL.getConfiguration());
|
||||
UTIL.startMiniCluster(NUM_RS);
|
||||
fs = UTIL.getDFSCluster().getFileSystem();
|
||||
rootDir = FSUtils.getRootDir(UTIL.getConfiguration());
|
||||
snapshots = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
|
||||
archiveDir = new Path(rootDir, ".archive");
|
||||
master = UTIL.getMiniHBaseCluster().getMaster();
|
||||
rootDir = master.getMasterFileSystem().getRootDir();
|
||||
snapshots = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
|
||||
archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
|
||||
}
|
||||
|
||||
private static void setupConf(Configuration conf) {
|
||||
|
@ -113,9 +115,11 @@ public class TestSnapshotFromMaster {
|
|||
conf.setInt("hbase.hstore.blockingStoreFiles", 12);
|
||||
// drop the number of attempts for the hbase admin
|
||||
conf.setInt("hbase.client.retries.number", 1);
|
||||
// set the only HFile cleaner as the snapshot cleaner
|
||||
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
||||
SnapshotHFileCleaner.class.getCanonicalName());
|
||||
// Ensure no extra cleaners on by default (e.g. TimeToLiveHFileCleaner)
|
||||
conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "");
|
||||
conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, "");
|
||||
// Enable snapshot
|
||||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
conf.setLong(SnapshotHFileCleaner.HFILE_CACHE_REFRESH_PERIOD_CONF_KEY, cacheRefreshPeriod);
|
||||
}
|
||||
|
||||
|
@ -205,9 +209,7 @@ public class TestSnapshotFromMaster {
|
|||
|
||||
// then create a snapshot to the fs and make sure that we can find it when checking done
|
||||
snapshotName = "completed";
|
||||
FileSystem fs = master.getMasterFileSystem().getFileSystem();
|
||||
Path root = master.getMasterFileSystem().getRootDir();
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, root);
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
|
||||
desc = desc.toBuilder().setName(snapshotName).build();
|
||||
SnapshotDescriptionUtils.writeSnapshotInfo(desc, snapshotDir, fs);
|
||||
|
||||
|
@ -294,10 +296,8 @@ public class TestSnapshotFromMaster {
|
|||
byte[] snapshotNameBytes = Bytes.toBytes(snapshotName);
|
||||
admin.snapshot(snapshotNameBytes, TABLE_NAME);
|
||||
|
||||
Configuration conf = UTIL.getConfiguration();
|
||||
Path rootDir = FSUtils.getRootDir(conf);
|
||||
FileSystem fs = FSUtils.getCurrentFileSystem(conf);
|
||||
|
||||
Configuration conf = master.getConfiguration();
|
||||
LOG.info("After snapshot File-System state");
|
||||
FSUtils.logFileSystemState(fs, rootDir, LOG);
|
||||
|
||||
// ensure we only have one snapshot
|
||||
|
@ -309,12 +309,17 @@ public class TestSnapshotFromMaster {
|
|||
// compact the files so we get some archived files for the table we just snapshotted
|
||||
List<HRegion> regions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
|
||||
for (HRegion region : regions) {
|
||||
region.waitForFlushesAndCompactions(); // enable can trigger a compaction, wait for it.
|
||||
region.compactStores(); // min is 3 so will compact and archive
|
||||
}
|
||||
LOG.info("After compaction File-System state");
|
||||
FSUtils.logFileSystemState(fs, rootDir, LOG);
|
||||
|
||||
// make sure the cleaner has run
|
||||
LOG.debug("Running hfile cleaners");
|
||||
ensureHFileCleanersRun();
|
||||
LOG.info("After cleaners File-System state: " + rootDir);
|
||||
FSUtils.logFileSystemState(fs, rootDir, LOG);
|
||||
|
||||
// get the snapshot files for the table
|
||||
Path snapshotTable = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
|
||||
|
@ -325,7 +330,7 @@ public class TestSnapshotFromMaster {
|
|||
LOG.debug(file.getPath());
|
||||
}
|
||||
// get the archived files for the table
|
||||
Collection<String> files = getArchivedHFiles(conf, rootDir, fs, STRING_TABLE_NAME);
|
||||
Collection<String> files = getArchivedHFiles(archiveDir, rootDir, fs, STRING_TABLE_NAME);
|
||||
|
||||
// and make sure that there is a proper subset
|
||||
for (FileStatus file : snapshotHFiles) {
|
||||
|
@ -341,13 +346,18 @@ public class TestSnapshotFromMaster {
|
|||
// make sure we wait long enough to refresh the snapshot hfile
|
||||
List<BaseHFileCleanerDelegate> delegates = UTIL.getMiniHBaseCluster().getMaster()
|
||||
.getHFileCleaner().cleanersChain;
|
||||
((SnapshotHFileCleaner) delegates.get(0)).getFileCacheForTesting()
|
||||
.triggerCacheRefreshForTesting();
|
||||
for (BaseHFileCleanerDelegate delegate: delegates) {
|
||||
if (delegate instanceof SnapshotHFileCleaner) {
|
||||
((SnapshotHFileCleaner)delegate).getFileCacheForTesting().triggerCacheRefreshForTesting();
|
||||
}
|
||||
}
|
||||
// run the cleaner again
|
||||
LOG.debug("Running hfile cleaners");
|
||||
ensureHFileCleanersRun();
|
||||
LOG.info("After delete snapshot cleaners run File-System state");
|
||||
FSUtils.logFileSystemState(fs, rootDir, LOG);
|
||||
|
||||
files = getArchivedHFiles(conf, rootDir, fs, STRING_TABLE_NAME);
|
||||
files = getArchivedHFiles(archiveDir, rootDir, fs, STRING_TABLE_NAME);
|
||||
assertEquals("Still have some hfiles in the archive, when their snapshot has been deleted.", 0,
|
||||
files.size());
|
||||
}
|
||||
|
@ -356,12 +366,12 @@ public class TestSnapshotFromMaster {
|
|||
* @return all the HFiles for a given table that have been archived
|
||||
* @throws IOException on expected failure
|
||||
*/
|
||||
private final Collection<String> getArchivedHFiles(Configuration conf, Path rootDir,
|
||||
private final Collection<String> getArchivedHFiles(Path archiveDir, Path rootDir,
|
||||
FileSystem fs, String tableName) throws IOException {
|
||||
Path tableArchive = HFileArchiveUtil.getTableArchivePath(new Path(rootDir, tableName));
|
||||
Path tableArchive = new Path(archiveDir, tableName);
|
||||
FileStatus[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive);
|
||||
List<String> files = new ArrayList<String>(archivedHFiles.length);
|
||||
LOG.debug("Have archived hfiles:");
|
||||
LOG.debug("Have archived hfiles: " + tableArchive);
|
||||
for (FileStatus file : archivedHFiles) {
|
||||
LOG.debug(file.getPath());
|
||||
files.add(file.getPath().getName());
|
||||
|
|
|
@ -19,15 +19,24 @@ package org.apache.hadoop.hbase.master.snapshot;
|
|||
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
|
||||
import org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotLogCleaner;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
@ -52,9 +61,13 @@ public class TestSnapshotManager {
|
|||
}
|
||||
}
|
||||
|
||||
private SnapshotManager getNewManager() throws KeeperException, IOException {
|
||||
private SnapshotManager getNewManager() throws IOException, KeeperException {
|
||||
return getNewManager(UTIL.getConfiguration());
|
||||
}
|
||||
|
||||
private SnapshotManager getNewManager(final Configuration conf) throws IOException, KeeperException {
|
||||
Mockito.reset(services);
|
||||
Mockito.when(services.getConfiguration()).thenReturn(UTIL.getConfiguration());
|
||||
Mockito.when(services.getConfiguration()).thenReturn(conf);
|
||||
Mockito.when(services.getMasterFileSystem()).thenReturn(mfs);
|
||||
Mockito.when(mfs.getFileSystem()).thenReturn(fs);
|
||||
Mockito.when(mfs.getRootDir()).thenReturn(UTIL.getDataTestDir());
|
||||
|
@ -72,4 +85,71 @@ public class TestSnapshotManager {
|
|||
Mockito.when(handler.isFinished()).thenReturn(true);
|
||||
assertFalse("Manager is process when handler isn't running", manager.isTakingSnapshot());
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify the snapshot support based on the configuration.
|
||||
*/
|
||||
@Test
|
||||
public void testSnapshotSupportConfiguration() throws Exception {
|
||||
// No configuration (no cleaners, not enabled): snapshot feature disabled
|
||||
Configuration conf = new Configuration();
|
||||
SnapshotManager manager = getNewManager(conf);
|
||||
assertFalse("Snapshot should be disabled with no configuration", isSnapshotSupported(manager));
|
||||
|
||||
// force snapshot feature to be enabled
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
manager = getNewManager(conf);
|
||||
assertTrue("Snapshot should be enabled", isSnapshotSupported(manager));
|
||||
|
||||
// force snapshot feature to be disabled
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, false);
|
||||
manager = getNewManager(conf);
|
||||
assertFalse("Snapshot should be disabled", isSnapshotSupported(manager));
|
||||
|
||||
// force snapshot feature to be disabled, even if cleaners are present
|
||||
conf = new Configuration();
|
||||
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
||||
SnapshotHFileCleaner.class.getName(), HFileLinkCleaner.class.getName());
|
||||
conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, SnapshotLogCleaner.class.getName());
|
||||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, false);
|
||||
manager = getNewManager(conf);
|
||||
assertFalse("Snapshot should be disabled", isSnapshotSupported(manager));
|
||||
|
||||
// cleaners are present, but missing snapshot enabled property
|
||||
conf = new Configuration();
|
||||
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
|
||||
SnapshotHFileCleaner.class.getName(), HFileLinkCleaner.class.getName());
|
||||
conf.set(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS, SnapshotLogCleaner.class.getName());
|
||||
manager = getNewManager(conf);
|
||||
assertTrue("Snapshot should be enabled, because cleaners are present",
|
||||
isSnapshotSupported(manager));
|
||||
|
||||
// Create a "test snapshot"
|
||||
Path rootDir = UTIL.getDataTestDir();
|
||||
Path testSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(
|
||||
"testSnapshotSupportConfiguration", rootDir);
|
||||
fs.mkdirs(testSnapshotDir);
|
||||
try {
|
||||
// force snapshot feature to be disabled, but snapshots are present
|
||||
conf = new Configuration();
|
||||
conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, false);
|
||||
manager = getNewManager(conf);
|
||||
fail("Master should not start when snapshot is disabled, but snapshots are present");
|
||||
} catch (UnsupportedOperationException e) {
|
||||
// expected
|
||||
} finally {
|
||||
fs.delete(testSnapshotDir, true);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean isSnapshotSupported(final SnapshotManager manager) {
|
||||
try {
|
||||
manager.checkSnapshotSupport();
|
||||
return true;
|
||||
} catch (UnsupportedOperationException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.MediumTests;
|
||||
import org.apache.hadoop.hbase.MiniHBaseCluster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -75,6 +76,7 @@ public class TestExportSnapshot {
|
|||
|
||||
@BeforeClass
|
||||
public static void setUpBeforeClass() throws Exception {
|
||||
TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
|
||||
TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
|
||||
|
|
Loading…
Reference in New Issue