HBASE-14714 some cleanup to snapshot code
This commit is contained in:
parent
066f3f7545
commit
569fe82acf
|
@ -371,8 +371,21 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
|
|||
* @param desc The descriptor.
|
||||
*/
|
||||
public HTableDescriptor(final HTableDescriptor desc) {
|
||||
this(desc.name, desc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a table descriptor by cloning the descriptor passed as a parameter
|
||||
* but using a different table name.
|
||||
* <p>
|
||||
* Makes a deep copy of the supplied descriptor.
|
||||
* Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
|
||||
* @param name Table name.
|
||||
* @param desc The descriptor.
|
||||
*/
|
||||
public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
|
||||
super();
|
||||
setName(desc.name);
|
||||
setName(name);
|
||||
setMetaFlags(this.name);
|
||||
for (HColumnDescriptor c: desc.families.values()) {
|
||||
this.families.put(c.getName(), new HColumnDescriptor(c));
|
||||
|
|
|
@ -34,11 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
/**
|
||||
|
@ -49,10 +45,10 @@ import org.apache.hadoop.hbase.util.FSUtils;
|
|||
* <p>
|
||||
* This also allows one to run the scan from an
|
||||
* online or offline hbase cluster. The snapshot files can be exported by using the
|
||||
* {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster,
|
||||
* and this scanner can be used to run the scan directly over the snapshot files.
|
||||
* The snapshot should not be deleted while there are open scanners reading from snapshot
|
||||
* files.
|
||||
* {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool,
|
||||
* to a pure-hdfs cluster, and this scanner can be used to
|
||||
* run the scan directly over the snapshot files. The snapshot should not be deleted while there
|
||||
* are open scanners reading from snapshot files.
|
||||
*
|
||||
* <p>
|
||||
* An internal RegionScanner is used to execute the {@link Scan} obtained
|
||||
|
@ -125,23 +121,14 @@ public class TableSnapshotScanner extends AbstractClientScanner {
|
|||
}
|
||||
|
||||
private void init() throws IOException {
|
||||
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
|
||||
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir);
|
||||
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc);
|
||||
|
||||
// load table descriptor
|
||||
htd = manifest.getTableDescriptor();
|
||||
|
||||
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
|
||||
if (regionManifests == null) {
|
||||
throw new IllegalArgumentException("Snapshot seems empty");
|
||||
}
|
||||
|
||||
regions = new ArrayList<HRegionInfo>(regionManifests.size());
|
||||
for (SnapshotRegionManifest regionManifest : regionManifests) {
|
||||
// load region descriptor
|
||||
HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
|
||||
final RestoreSnapshotHelper.RestoreMetaChanges meta =
|
||||
RestoreSnapshotHelper.copySnapshotForScanner(
|
||||
conf, fs, rootDir, restoreDir, snapshotName);
|
||||
final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd();
|
||||
|
||||
htd = meta.getTableDescriptor();
|
||||
regions = new ArrayList<HRegionInfo>(restoredRegions.size());
|
||||
for (HRegionInfo hri: restoredRegions) {
|
||||
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
|
||||
hri.getStartKey(), hri.getEndKey())) {
|
||||
regions.add(hri);
|
||||
|
@ -150,11 +137,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
|
|||
|
||||
// sort for regions according to startKey.
|
||||
Collections.sort(regions);
|
||||
|
||||
initScanMetrics(scan);
|
||||
|
||||
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
|
||||
rootDir, restoreDir, snapshotName);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -184,7 +167,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
|
|||
if (result == null) {
|
||||
currentRegionScanner.close();
|
||||
currentRegionScanner = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -596,4 +596,8 @@ public class MasterFileSystem {
|
|||
public RecoveryMode getLogRecoveryMode() {
|
||||
return this.splitLogManager.getRecoveryMode();
|
||||
}
|
||||
|
||||
public void logFileSystemState(Log log) throws IOException {
|
||||
FSUtils.logFileSystemState(fs, rootdir, log);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.security.User;
|
|||
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
|
||||
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
|
||||
|
@ -741,7 +740,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
|||
cpHost.postRestoreSnapshot(reqSnapshot, snapshotTableDesc);
|
||||
}
|
||||
} else {
|
||||
HTableDescriptor htd = RestoreSnapshotHelper.cloneTableSchema(snapshotTableDesc, tableName);
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
|
||||
if (cpHost != null) {
|
||||
cpHost.preCloneSnapshot(reqSnapshot, htd);
|
||||
}
|
||||
|
@ -761,7 +760,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, TableName tableName)
|
||||
throws IOException {
|
||||
if (this.master.getMasterQuotaManager().isQuotaEnabled()) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.InputStream;
|
|||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
|
@ -40,7 +41,6 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
||||
|
@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.Connection;
|
|||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
|
@ -183,7 +182,7 @@ public class RestoreSnapshotHelper {
|
|||
return null;
|
||||
}
|
||||
|
||||
RestoreMetaChanges metaChanges = new RestoreMetaChanges(parentsMap);
|
||||
RestoreMetaChanges metaChanges = new RestoreMetaChanges(tableDesc, parentsMap);
|
||||
|
||||
// Take a copy of the manifest.keySet() since we are going to modify
|
||||
// this instance, by removing the regions already present in the restore dir.
|
||||
|
@ -245,13 +244,19 @@ public class RestoreSnapshotHelper {
|
|||
*/
|
||||
public static class RestoreMetaChanges {
|
||||
private final Map<String, Pair<String, String> > parentsMap;
|
||||
private final HTableDescriptor htd;
|
||||
|
||||
private List<HRegionInfo> regionsToRestore = null;
|
||||
private List<HRegionInfo> regionsToRemove = null;
|
||||
private List<HRegionInfo> regionsToAdd = null;
|
||||
|
||||
RestoreMetaChanges(final Map<String, Pair<String, String> > parentsMap) {
|
||||
RestoreMetaChanges(HTableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
|
||||
this.parentsMap = parentsMap;
|
||||
this.htd = htd;
|
||||
}
|
||||
|
||||
public HTableDescriptor getTableDescriptor() {
|
||||
return htd;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -477,13 +482,12 @@ public class RestoreSnapshotHelper {
|
|||
* @return The set of files in the specified family directory.
|
||||
*/
|
||||
private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
|
||||
Set<String> familyFiles = new HashSet<String>();
|
||||
|
||||
FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir);
|
||||
if (hfiles == null) return familyFiles;
|
||||
if (hfiles == null) return Collections.emptySet();
|
||||
|
||||
for (FileStatus hfileRef: hfiles) {
|
||||
String hfileName = hfileRef.getPath().getName();
|
||||
Set<String> familyFiles = new HashSet<String>(hfiles.length);
|
||||
for (int i = 0; i < hfiles.length; ++i) {
|
||||
String hfileName = hfiles[i].getPath().getName();
|
||||
familyFiles.add(hfileName);
|
||||
}
|
||||
|
||||
|
@ -667,7 +671,11 @@ public class RestoreSnapshotHelper {
|
|||
* @return the new HRegion instance
|
||||
*/
|
||||
public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
|
||||
HRegionInfo regionInfo = new HRegionInfo(tableDesc.getTableName(),
|
||||
return cloneRegionInfo(tableDesc.getTableName(), snapshotRegionInfo);
|
||||
}
|
||||
|
||||
public static HRegionInfo cloneRegionInfo(TableName tableName, HRegionInfo snapshotRegionInfo) {
|
||||
HRegionInfo regionInfo = new HRegionInfo(tableName,
|
||||
snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
|
||||
snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
|
||||
regionInfo.setOffline(snapshotRegionInfo.isOffline());
|
||||
|
@ -682,9 +690,9 @@ public class RestoreSnapshotHelper {
|
|||
FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
|
||||
if (regionDirs == null) return null;
|
||||
|
||||
List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
|
||||
for (FileStatus regionDir: regionDirs) {
|
||||
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath());
|
||||
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionDirs.length);
|
||||
for (int i = 0; i < regionDirs.length; ++i) {
|
||||
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath());
|
||||
regions.add(hri);
|
||||
}
|
||||
LOG.debug("found " + regions.size() + " regions for table=" +
|
||||
|
@ -692,30 +700,6 @@ public class RestoreSnapshotHelper {
|
|||
return regions;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new table descriptor cloning the snapshot table schema.
|
||||
*
|
||||
* @param snapshotTableDescriptor
|
||||
* @param tableName
|
||||
* @return cloned table descriptor
|
||||
* @throws IOException
|
||||
*/
|
||||
public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor,
|
||||
final TableName tableName) throws IOException {
|
||||
HTableDescriptor htd = new HTableDescriptor(tableName);
|
||||
for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) {
|
||||
htd.addFamily(hcd);
|
||||
}
|
||||
for (Map.Entry<ImmutableBytesWritable, ImmutableBytesWritable> e:
|
||||
snapshotTableDescriptor.getValues().entrySet()) {
|
||||
htd.setValue(e.getKey(), e.getValue());
|
||||
}
|
||||
for (Map.Entry<String, String> e: snapshotTableDescriptor.getConfiguration().entrySet()) {
|
||||
htd.setConfiguration(e.getKey(), e.getValue());
|
||||
}
|
||||
return htd;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy the snapshot files for a snapshot scanner, discards meta changes.
|
||||
* @param conf
|
||||
|
@ -725,8 +709,8 @@ public class RestoreSnapshotHelper {
|
|||
* @param snapshotName
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void copySnapshotForScanner(Configuration conf, FileSystem fs, Path rootDir,
|
||||
Path restoreDir, String snapshotName) throws IOException {
|
||||
public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs,
|
||||
Path rootDir, Path restoreDir, String snapshotName) throws IOException {
|
||||
// ensure that restore dir is not under root dir
|
||||
if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) {
|
||||
throw new IllegalArgumentException("Filesystems for restore directory and HBase root directory " +
|
||||
|
@ -749,11 +733,12 @@ public class RestoreSnapshotHelper {
|
|||
// in the base hbase root dir.
|
||||
RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs,
|
||||
manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false);
|
||||
helper.restoreHdfsRegions(); // TODO: parallelize.
|
||||
RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize.
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Restored table dir:" + restoreDir);
|
||||
FSUtils.logFileSystemState(fs, restoreDir, LOG);
|
||||
}
|
||||
return metaChanges;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.io.HFileLink;
|
|||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
|
||||
|
||||
/**
|
||||
* Utility methods for interacting with the snapshot referenced files.
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.io.IOException;
|
|||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.CategoryBasedTimeout;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.TableName;
|
|||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
|
|
|
@ -22,9 +22,7 @@ import static org.junit.Assert.assertTrue;
|
|||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -39,7 +37,6 @@ import org.apache.hadoop.hbase.HTableDescriptor;
|
|||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.master.HMaster;
|
||||
import org.apache.hadoop.hbase.master.snapshot.DisabledTableSnapshotHandler;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
|
||||
|
@ -381,17 +378,7 @@ public class TestSnapshotFromMaster {
|
|||
private final Collection<String> getArchivedHFiles(Path archiveDir, Path rootDir,
|
||||
FileSystem fs, TableName tableName) throws IOException {
|
||||
Path tableArchive = FSUtils.getTableDir(archiveDir, tableName);
|
||||
Path[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive);
|
||||
List<String> files = new ArrayList<String>(archivedHFiles.length);
|
||||
LOG.debug("Have archived hfiles: " + tableArchive);
|
||||
for (Path file : archivedHFiles) {
|
||||
LOG.debug(file);
|
||||
files.add(file.getName());
|
||||
}
|
||||
// sort the archived files
|
||||
|
||||
Collections.sort(files);
|
||||
return files;
|
||||
return SnapshotTestingUtils.listHFileNames(fs, tableArchive);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.executor.ExecutorService;
|
||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||
|
|
|
@ -22,7 +22,7 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -43,11 +43,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
|||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
||||
import org.apache.hadoop.hbase.Waiter;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.BufferedMutator;
|
||||
import org.apache.hadoop.hbase.client.Durability;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
|
@ -150,6 +148,14 @@ public class SnapshotTestingUtils {
|
|||
tableName);
|
||||
}
|
||||
|
||||
public static void confirmSnapshotValid(HBaseTestingUtility testUtil,
|
||||
SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family)
|
||||
throws IOException {
|
||||
MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem();
|
||||
confirmSnapshotValid(snapshotDescriptor, tableName, family,
|
||||
mfs.getRootDir(), testUtil.getHBaseAdmin(), mfs.getFileSystem());
|
||||
}
|
||||
|
||||
/**
|
||||
* Confirm that the snapshot contains references to all the files that should
|
||||
* be in the snapshot.
|
||||
|
@ -330,28 +336,18 @@ public class SnapshotTestingUtils {
|
|||
* @return array of the current HFiles in the table (could be a zero-length array)
|
||||
* @throws IOException on unexecpted error reading the FS
|
||||
*/
|
||||
public static Path[] listHFiles(final FileSystem fs, final Path tableDir)
|
||||
public static ArrayList<String> listHFileNames(final FileSystem fs, final Path tableDir)
|
||||
throws IOException {
|
||||
final ArrayList<Path> hfiles = new ArrayList<Path>();
|
||||
final ArrayList<String> hfiles = new ArrayList<String>();
|
||||
FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
|
||||
@Override
|
||||
public void storeFile(final String region, final String family, final String hfileName)
|
||||
throws IOException {
|
||||
hfiles.add(new Path(tableDir, new Path(region, new Path(family, hfileName))));
|
||||
hfiles.add(hfileName);
|
||||
}
|
||||
});
|
||||
return hfiles.toArray(new Path[hfiles.size()]);
|
||||
}
|
||||
|
||||
public static String[] listHFileNames(final FileSystem fs, final Path tableDir)
|
||||
throws IOException {
|
||||
Path[] files = listHFiles(fs, tableDir);
|
||||
String[] names = new String[files.length];
|
||||
for (int i = 0; i < files.length; ++i) {
|
||||
names[i] = files[i].getName();
|
||||
}
|
||||
Arrays.sort(names);
|
||||
return names;
|
||||
Collections.sort(hfiles);
|
||||
return hfiles;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptio
|
|||
import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
|
||||
import org.apache.hadoop.hbase.testclassification.LargeTests;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -140,8 +139,7 @@ public class TestFlushSnapshotFromClient {
|
|||
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
|
||||
|
||||
LOG.debug("FS state before snapshot:");
|
||||
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
// take a snapshot of the enabled table
|
||||
String snapshotString = "offlineTableSnapshot";
|
||||
|
@ -154,14 +152,10 @@ public class TestFlushSnapshotFromClient {
|
|||
snapshot, TABLE_NAME);
|
||||
|
||||
// make sure its a valid snapshot
|
||||
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
|
||||
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
|
||||
LOG.debug("FS state after snapshot:");
|
||||
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
|
||||
admin, fs);
|
||||
SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -180,8 +174,7 @@ public class TestFlushSnapshotFromClient {
|
|||
}
|
||||
|
||||
LOG.debug("FS state before snapshot:");
|
||||
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
// take a snapshot of the enabled table
|
||||
String snapshotString = "skipFlushTableSnapshot";
|
||||
|
@ -194,14 +187,10 @@ public class TestFlushSnapshotFromClient {
|
|||
snapshot, TABLE_NAME);
|
||||
|
||||
// make sure its a valid snapshot
|
||||
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
|
||||
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
|
||||
LOG.debug("FS state after snapshot:");
|
||||
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
|
||||
admin, fs);
|
||||
SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
|
||||
|
||||
admin.deleteSnapshot(snapshot);
|
||||
snapshots = admin.listSnapshots();
|
||||
|
@ -223,8 +212,7 @@ public class TestFlushSnapshotFromClient {
|
|||
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
|
||||
|
||||
LOG.debug("FS state before snapshot:");
|
||||
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
// take a snapshot of the enabled table
|
||||
String snapshotString = "offlineTableSnapshot";
|
||||
|
@ -242,14 +230,10 @@ public class TestFlushSnapshotFromClient {
|
|||
snapshot, TABLE_NAME);
|
||||
|
||||
// make sure its a valid snapshot
|
||||
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
|
||||
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
|
||||
LOG.debug("FS state after snapshot:");
|
||||
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir,
|
||||
admin, fs);
|
||||
SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
|
||||
}
|
||||
|
||||
@Test (timeout=300000)
|
||||
|
@ -296,8 +280,8 @@ public class TestFlushSnapshotFromClient {
|
|||
HMaster master = UTIL.getMiniHBaseCluster().getMaster();
|
||||
SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
|
||||
LOG.info(" === Async Snapshot Completed ===");
|
||||
FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
|
||||
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
// make sure we get the snapshot
|
||||
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
|
||||
}
|
||||
|
@ -497,7 +481,7 @@ public class TestFlushSnapshotFromClient {
|
|||
}
|
||||
|
||||
// dump for debugging
|
||||
logFSTree(FSUtils.getRootDir(UTIL.getConfiguration()));
|
||||
UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
|
||||
List<SnapshotDescription> taken = admin.listSnapshots();
|
||||
int takenSize = taken.size();
|
||||
|
@ -521,10 +505,6 @@ public class TestFlushSnapshotFromClient {
|
|||
UTIL.deleteTable(TABLE2_NAME);
|
||||
}
|
||||
|
||||
private void logFSTree(Path root) throws IOException {
|
||||
FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
|
||||
}
|
||||
|
||||
private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
|
||||
throws IOException, InterruptedException {
|
||||
Admin admin = UTIL.getHBaseAdmin();
|
||||
|
|
|
@ -27,12 +27,10 @@ import org.apache.hadoop.hbase.testclassification.LargeTests;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.master.MasterFileSystem;
|
||||
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Before;
|
||||
|
@ -199,7 +197,6 @@ public class TestRestoreFlushSnapshotFromClient {
|
|||
// Helpers
|
||||
// ==========================================================================
|
||||
private void logFSTree() throws IOException {
|
||||
MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
|
||||
FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
|
||||
UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -116,12 +117,12 @@ public class TestRestoreSnapshotHelper {
|
|||
|
||||
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
|
||||
final HTableDescriptor htdClone) throws IOException {
|
||||
String[] files = SnapshotTestingUtils.listHFileNames(fs,
|
||||
List<String> files = SnapshotTestingUtils.listHFileNames(fs,
|
||||
FSUtils.getTableDir(rootDir, htdClone.getTableName()));
|
||||
assertEquals(12, files.length);
|
||||
for (int i = 0; i < files.length; i += 2) {
|
||||
String linkFile = files[i];
|
||||
String refFile = files[i+1];
|
||||
assertEquals(12, files.size());
|
||||
for (int i = 0; i < files.size(); i += 2) {
|
||||
String linkFile = files.get(i);
|
||||
String refFile = files.get(i+1);
|
||||
assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
|
||||
assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
|
||||
assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));
|
||||
|
|
Loading…
Reference in New Issue