HBASE-14714 some cleanup to snapshot code

This commit is contained in:
Matteo Bertozzi 2015-10-29 13:48:59 -07:00
parent 094d65e6f5
commit 4b91a6425e
14 changed files with 133 additions and 212 deletions

View File

@ -355,8 +355,21 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
* @param desc The descriptor. * @param desc The descriptor.
*/ */
public HTableDescriptor(final HTableDescriptor desc) { public HTableDescriptor(final HTableDescriptor desc) {
this(desc.name, desc);
}
/**
* Construct a table descriptor by cloning the descriptor passed as a parameter
* but using a different table name.
* <p>
* Makes a deep copy of the supplied descriptor.
* Can make a modifiable descriptor from an UnmodifyableHTableDescriptor.
* @param name Table name.
* @param desc The descriptor.
*/
public HTableDescriptor(final TableName name, final HTableDescriptor desc) {
super(); super();
setName(desc.name); setName(name);
setMetaFlags(this.name); setMetaFlags(this.name);
for (HColumnDescriptor c: desc.families.values()) { for (HColumnDescriptor c: desc.families.values()) {
this.families.put(c.getName(), new HColumnDescriptor(c)); this.families.put(c.getName(), new HColumnDescriptor(c));

View File

@ -34,11 +34,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.FSUtils;
/** /**
@ -49,8 +45,8 @@ import org.apache.hadoop.hbase.util.FSUtils;
* <p> * <p>
* This also allows one to run the scan from an * This also allows one to run the scan from an
* online or offline hbase cluster. The snapshot files can be exported by using the * online or offline hbase cluster. The snapshot files can be exported by using the
* {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool,
* to a pure-hdfs cluster, and this scanner can be used to * to a pure-hdfs cluster, and this scanner can be used to
* run the scan directly over the snapshot files. The snapshot should not be deleted while there * run the scan directly over the snapshot files. The snapshot should not be deleted while there
* are open scanners reading from snapshot files. * are open scanners reading from snapshot files.
* *
@ -125,23 +121,14 @@ public class TableSnapshotScanner extends AbstractClientScanner {
} }
private void init() throws IOException { private void init() throws IOException {
Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); final RestoreSnapshotHelper.RestoreMetaChanges meta =
SnapshotDescription snapshotDesc = SnapshotDescriptionUtils.readSnapshotInfo(fs, snapshotDir); RestoreSnapshotHelper.copySnapshotForScanner(
SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshotDesc); conf, fs, rootDir, restoreDir, snapshotName);
final List<HRegionInfo> restoredRegions = meta.getRegionsToAdd();
// load table descriptor
htd = manifest.getTableDescriptor();
List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
if (regionManifests == null) {
throw new IllegalArgumentException("Snapshot seems empty");
}
regions = new ArrayList<HRegionInfo>(regionManifests.size());
for (SnapshotRegionManifest regionManifest : regionManifests) {
// load region descriptor
HRegionInfo hri = HRegionInfo.convert(regionManifest.getRegionInfo());
htd = meta.getTableDescriptor();
regions = new ArrayList<HRegionInfo>(restoredRegions.size());
for (HRegionInfo hri: restoredRegions) {
if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), if (CellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
hri.getStartKey(), hri.getEndKey())) { hri.getStartKey(), hri.getEndKey())) {
regions.add(hri); regions.add(hri);
@ -150,11 +137,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
// sort for regions according to startKey. // sort for regions according to startKey.
Collections.sort(regions); Collections.sort(regions);
initScanMetrics(scan); initScanMetrics(scan);
RestoreSnapshotHelper.copySnapshotForScanner(conf, fs,
rootDir, restoreDir, snapshotName);
} }
@Override @Override
@ -184,7 +167,7 @@ public class TableSnapshotScanner extends AbstractClientScanner {
if (result == null) { if (result == null) {
currentRegionScanner.close(); currentRegionScanner.close();
currentRegionScanner = null; currentRegionScanner = null;
} }
} }
} }
} }

View File

@ -612,4 +612,8 @@ public class MasterFileSystem {
public RecoveryMode getLogRecoveryMode() { public RecoveryMode getLogRecoveryMode() {
return this.splitLogManager.getRecoveryMode(); return this.splitLogManager.getRecoveryMode();
} }
public void logFileSystemState(Log log) throws IOException {
FSUtils.logFileSystemState(fs, rootdir, log);
}
} }

View File

@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper;
import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException; import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
@ -741,7 +740,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
cpHost.postRestoreSnapshot(reqSnapshot, snapshotTableDesc); cpHost.postRestoreSnapshot(reqSnapshot, snapshotTableDesc);
} }
} else { } else {
HTableDescriptor htd = RestoreSnapshotHelper.cloneTableSchema(snapshotTableDesc, tableName); HTableDescriptor htd = new HTableDescriptor(tableName, snapshotTableDesc);
if (cpHost != null) { if (cpHost != null) {
cpHost.preCloneSnapshot(reqSnapshot, htd); cpHost.preCloneSnapshot(reqSnapshot, htd);
} }
@ -761,7 +760,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
} }
} }
} }
private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, TableName tableName) private void checkAndUpdateNamespaceQuota(SnapshotManifest manifest, TableName tableName)
throws IOException { throws IOException {
if (this.master.getMasterQuotaManager().isQuotaEnabled()) { if (this.master.getMasterQuotaManager().isQuotaEnabled()) {

View File

@ -23,6 +23,7 @@ import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.LinkedList; import java.util.LinkedList;
@ -40,7 +41,6 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.backup.HFileArchiver;
@ -183,7 +183,7 @@ public class RestoreSnapshotHelper {
return null; return null;
} }
RestoreMetaChanges metaChanges = new RestoreMetaChanges(parentsMap); RestoreMetaChanges metaChanges = new RestoreMetaChanges(tableDesc, parentsMap);
// Take a copy of the manifest.keySet() since we are going to modify // Take a copy of the manifest.keySet() since we are going to modify
// this instance, by removing the regions already present in the restore dir. // this instance, by removing the regions already present in the restore dir.
@ -259,13 +259,19 @@ public class RestoreSnapshotHelper {
*/ */
public static class RestoreMetaChanges { public static class RestoreMetaChanges {
private final Map<String, Pair<String, String> > parentsMap; private final Map<String, Pair<String, String> > parentsMap;
private final HTableDescriptor htd;
private List<HRegionInfo> regionsToRestore = null; private List<HRegionInfo> regionsToRestore = null;
private List<HRegionInfo> regionsToRemove = null; private List<HRegionInfo> regionsToRemove = null;
private List<HRegionInfo> regionsToAdd = null; private List<HRegionInfo> regionsToAdd = null;
RestoreMetaChanges(final Map<String, Pair<String, String> > parentsMap) { RestoreMetaChanges(HTableDescriptor htd, Map<String, Pair<String, String> > parentsMap) {
this.parentsMap = parentsMap; this.parentsMap = parentsMap;
this.htd = htd;
}
public HTableDescriptor getTableDescriptor() {
return htd;
} }
/** /**
@ -527,13 +533,12 @@ public class RestoreSnapshotHelper {
* @return The set of files in the specified family directory. * @return The set of files in the specified family directory.
*/ */
private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException { private Set<String> getTableRegionFamilyFiles(final Path familyDir) throws IOException {
Set<String> familyFiles = new HashSet<String>();
FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir); FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir);
if (hfiles == null) return familyFiles; if (hfiles == null) return Collections.emptySet();
for (FileStatus hfileRef: hfiles) { Set<String> familyFiles = new HashSet<String>(hfiles.length);
String hfileName = hfileRef.getPath().getName(); for (int i = 0; i < hfiles.length; ++i) {
String hfileName = hfiles[i].getPath().getName();
familyFiles.add(hfileName); familyFiles.add(hfileName);
} }
@ -685,7 +690,7 @@ public class RestoreSnapshotHelper {
Path refPath = Path refPath =
StoreFileInfo.getReferredToFile(new Path(new Path(new Path(new Path(snapshotTable StoreFileInfo.getReferredToFile(new Path(new Path(new Path(new Path(snapshotTable
.getNamespaceAsString(), snapshotTable.getQualifierAsString()), regionInfo .getNamespaceAsString(), snapshotTable.getQualifierAsString()), regionInfo
.getEncodedName()), familyDir.getName()), hfileName)); .getEncodedName()), familyDir.getName()), hfileName));
String snapshotRegionName = refPath.getParent().getParent().getName(); String snapshotRegionName = refPath.getParent().getParent().getName();
String fileName = refPath.getName(); String fileName = refPath.getName();
@ -744,7 +749,11 @@ public class RestoreSnapshotHelper {
* @return the new HRegion instance * @return the new HRegion instance
*/ */
public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) { public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) {
HRegionInfo regionInfo = new HRegionInfo(tableDesc.getTableName(), return cloneRegionInfo(tableDesc.getTableName(), snapshotRegionInfo);
}
public static HRegionInfo cloneRegionInfo(TableName tableName, HRegionInfo snapshotRegionInfo) {
HRegionInfo regionInfo = new HRegionInfo(tableName,
snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(), snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(),
snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId()); snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId());
regionInfo.setOffline(snapshotRegionInfo.isOffline()); regionInfo.setOffline(snapshotRegionInfo.isOffline());
@ -759,9 +768,9 @@ public class RestoreSnapshotHelper {
FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs));
if (regionDirs == null) return null; if (regionDirs == null) return null;
List<HRegionInfo> regions = new LinkedList<HRegionInfo>(); List<HRegionInfo> regions = new ArrayList<HRegionInfo>(regionDirs.length);
for (FileStatus regionDir: regionDirs) { for (int i = 0; i < regionDirs.length; ++i) {
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir.getPath()); HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDirs[i].getPath());
regions.add(hri); regions.add(hri);
} }
LOG.debug("found " + regions.size() + " regions for table=" + LOG.debug("found " + regions.size() + " regions for table=" +
@ -769,30 +778,6 @@ public class RestoreSnapshotHelper {
return regions; return regions;
} }
/**
* Create a new table descriptor cloning the snapshot table schema.
*
* @param snapshotTableDescriptor
* @param tableName
* @return cloned table descriptor
* @throws IOException
*/
public static HTableDescriptor cloneTableSchema(final HTableDescriptor snapshotTableDescriptor,
final TableName tableName) throws IOException {
HTableDescriptor htd = new HTableDescriptor(tableName);
for (HColumnDescriptor hcd: snapshotTableDescriptor.getColumnFamilies()) {
htd.addFamily(hcd);
}
for (Map.Entry<Bytes, Bytes> e:
snapshotTableDescriptor.getValues().entrySet()) {
htd.setValue(e.getKey(), e.getValue());
}
for (Map.Entry<String, String> e: snapshotTableDescriptor.getConfiguration().entrySet()) {
htd.setConfiguration(e.getKey(), e.getValue());
}
return htd;
}
/** /**
* Copy the snapshot files for a snapshot scanner, discards meta changes. * Copy the snapshot files for a snapshot scanner, discards meta changes.
* @param conf * @param conf
@ -802,8 +787,8 @@ public class RestoreSnapshotHelper {
* @param snapshotName * @param snapshotName
* @throws IOException * @throws IOException
*/ */
public static void copySnapshotForScanner(Configuration conf, FileSystem fs, Path rootDir, public static RestoreMetaChanges copySnapshotForScanner(Configuration conf, FileSystem fs,
Path restoreDir, String snapshotName) throws IOException { Path rootDir, Path restoreDir, String snapshotName) throws IOException {
// ensure that restore dir is not under root dir // ensure that restore dir is not under root dir
if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) { if (!restoreDir.getFileSystem(conf).getUri().equals(rootDir.getFileSystem(conf).getUri())) {
throw new IllegalArgumentException("Filesystems for restore directory and HBase root " + throw new IllegalArgumentException("Filesystems for restore directory and HBase root " +
@ -826,11 +811,12 @@ public class RestoreSnapshotHelper {
// in the base hbase root dir. // in the base hbase root dir.
RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs,
manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false); manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false);
helper.restoreHdfsRegions(); // TODO: parallelize. RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize.
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Restored table dir:" + restoreDir); LOG.debug("Restored table dir:" + restoreDir);
FSUtils.logFileSystemState(fs, restoreDir, LOG); FSUtils.logFileSystemState(fs, restoreDir, LOG);
} }
return metaChanges;
} }
} }

View File

@ -176,33 +176,16 @@ public final class SnapshotManifest {
monitor.rethrowException(); monitor.rethrowException();
Path storePath = MobUtils.getMobFamilyPath(mobRegionPath, hcd.getNameAsString()); Path storePath = MobUtils.getMobFamilyPath(mobRegionPath, hcd.getNameAsString());
if (!fs.exists(storePath)) { List<StoreFileInfo> storeFiles = getStoreFiles(storePath);
continue; if (storeFiles == null) {
}
FileStatus[] stats = fs.listStatus(storePath);
if (stats == null) {
continue;
}
List<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>();
for (FileStatus stat : stats) {
storeFiles.add(new StoreFileInfo(conf, fs, stat));
}
if (LOG.isDebugEnabled()) {
LOG.debug("Adding snapshot references for " + storeFiles + " mob files");
}
// 2.2. iterate through all the mob files and create "references".
for (int i = 0, sz = storeFiles.size(); i < sz; i++) {
StoreFileInfo storeFile = storeFiles.get(i);
monitor.rethrowException();
// create "reference" to this store file.
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Adding reference for mob file (" + (i + 1) + "/" + sz + "): " LOG.debug("No mob files under family: " + hcd.getNameAsString());
+ storeFile.getPath());
} }
visitor.storeFile(regionData, familyData, storeFile); continue;
} }
addReferenceFiles(visitor, regionData, familyData, storeFiles, true);
visitor.familyClose(regionData, familyData); visitor.familyClose(regionData, familyData);
} }
visitor.regionClose(regionData); visitor.regionClose(regionData);
@ -286,20 +269,11 @@ public final class SnapshotManifest {
if (isMobRegion) { if (isMobRegion) {
Path regionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable()); Path regionPath = MobUtils.getMobRegionPath(conf, regionInfo.getTable());
Path storePath = MobUtils.getMobFamilyPath(regionPath, familyName); Path storePath = MobUtils.getMobFamilyPath(regionPath, familyName);
if (!fs.exists(storePath)) { storeFiles = getStoreFiles(storePath);
continue;
}
FileStatus[] stats = fs.listStatus(storePath);
if (stats == null) {
continue;
}
storeFiles = new ArrayList<StoreFileInfo>();
for (FileStatus stat : stats) {
storeFiles.add(new StoreFileInfo(conf, fs, stat));
}
} else { } else {
storeFiles = regionFs.getStoreFiles(familyName); storeFiles = regionFs.getStoreFiles(familyName);
} }
if (storeFiles == null) { if (storeFiles == null) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("No files under family: " + familyName); LOG.debug("No files under family: " + familyName);
@ -308,21 +282,9 @@ public final class SnapshotManifest {
} }
// 2.1. build the snapshot reference for the store // 2.1. build the snapshot reference for the store
if (LOG.isDebugEnabled()) { // iterate through all the store's files and create "references".
LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); addReferenceFiles(visitor, regionData, familyData, storeFiles, false);
}
// 2.2. iterate through all the store's files and create "references".
int i = 0;
int sz = storeFiles.size();
for (StoreFileInfo storeFile: storeFiles) {
monitor.rethrowException();
// create "reference" to this store file.
LOG.debug("Adding reference for file (" + (++i) + "/" + sz + "): "
+ storeFile.getPath());
visitor.storeFile(regionData, familyData, storeFile);
}
visitor.familyClose(regionData, familyData); visitor.familyClose(regionData, familyData);
} }
} }
@ -335,6 +297,38 @@ public final class SnapshotManifest {
} }
} }
private List<StoreFileInfo> getStoreFiles(Path storeDir) throws IOException {
FileStatus[] stats = FSUtils.listStatus(fs, storeDir);
if (stats == null) return null;
ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(stats.length);
for (int i = 0; i < stats.length; ++i) {
storeFiles.add(new StoreFileInfo(conf, fs, stats[i]));
}
return storeFiles;
}
private void addReferenceFiles(RegionVisitor visitor, Object regionData, Object familyData,
Collection<StoreFileInfo> storeFiles, boolean isMob) throws IOException {
final String fileType = isMob ? "mob file" : "hfile";
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Adding snapshot references for %s %ss", storeFiles, fileType));
}
int i = 0;
int sz = storeFiles.size();
for (StoreFileInfo storeFile: storeFiles) {
monitor.rethrowException();
LOG.debug(String.format("Adding reference for %s (%d/%d): %s",
fileType, ++i, sz, storeFile.getPath()));
// create "reference" to this store file.
visitor.storeFile(regionData, familyData, storeFile);
}
}
/** /**
* Load the information in the SnapshotManifest. Called by SnapshotManifest.open() * Load the information in the SnapshotManifest. Called by SnapshotManifest.open()
* *

View File

@ -22,9 +22,7 @@ import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
@ -380,17 +378,7 @@ public class TestSnapshotFromMaster {
private final Collection<String> getArchivedHFiles(Path archiveDir, Path rootDir, private final Collection<String> getArchivedHFiles(Path archiveDir, Path rootDir,
FileSystem fs, TableName tableName) throws IOException { FileSystem fs, TableName tableName) throws IOException {
Path tableArchive = FSUtils.getTableDir(archiveDir, tableName); Path tableArchive = FSUtils.getTableDir(archiveDir, tableName);
Path[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive); return SnapshotTestingUtils.listHFileNames(fs, tableArchive);
List<String> files = new ArrayList<String>(archivedHFiles.length);
LOG.debug("Have archived hfiles: " + tableArchive);
for (Path file : archivedHFiles) {
LOG.debug(file);
files.add(file.getName());
}
// sort the archived files
Collections.sort(files);
return files;
} }
/** /**

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.Type; import org.apache.hadoop.hbase.KeyValue.Type;
@ -36,7 +35,6 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;

View File

@ -37,7 +37,6 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileScanner; import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.Test; import org.junit.Test;
import org.junit.experimental.categories.Category; import org.junit.experimental.categories.Category;

View File

@ -85,25 +85,6 @@ public class MobSnapshotTestingUtils {
return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName()); return ConnectionFactory.createConnection(util.getConfiguration()).getTable(htd.getTableName());
} }
/**
* Return the number of rows in the given table.
*/
public static int countMobRows(final Table table) throws IOException {
Scan scan = new Scan();
ResultScanner results = table.getScanner(scan);
int count = 0;
for (Result res : results) {
count++;
List<Cell> cells = res.listCells();
for (Cell cell : cells) {
// Verify the value
Assert.assertTrue(CellUtil.cloneValue(cell).length > 0);
}
}
results.close();
return count;
}
/** /**
* Return the number of rows in the given table. * Return the number of rows in the given table.
*/ */

View File

@ -22,7 +22,7 @@ import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Collections;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -149,6 +149,14 @@ public class SnapshotTestingUtils {
tableName); tableName);
} }
public static void confirmSnapshotValid(HBaseTestingUtility testUtil,
SnapshotDescription snapshotDescriptor, TableName tableName, byte[] family)
throws IOException {
MasterFileSystem mfs = testUtil.getHBaseCluster().getMaster().getMasterFileSystem();
confirmSnapshotValid(snapshotDescriptor, tableName, family,
mfs.getRootDir(), testUtil.getHBaseAdmin(), mfs.getFileSystem());
}
/** /**
* Confirm that the snapshot contains references to all the files that should * Confirm that the snapshot contains references to all the files that should
* be in the snapshot. * be in the snapshot.
@ -335,28 +343,18 @@ public class SnapshotTestingUtils {
* @return array of the current HFiles in the table (could be a zero-length array) * @return array of the current HFiles in the table (could be a zero-length array)
* @throws IOException on unexecpted error reading the FS * @throws IOException on unexecpted error reading the FS
*/ */
public static Path[] listHFiles(final FileSystem fs, final Path tableDir) public static ArrayList<String> listHFileNames(final FileSystem fs, final Path tableDir)
throws IOException { throws IOException {
final ArrayList<Path> hfiles = new ArrayList<Path>(); final ArrayList<String> hfiles = new ArrayList<String>();
FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() { FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() {
@Override @Override
public void storeFile(final String region, final String family, final String hfileName) public void storeFile(final String region, final String family, final String hfileName)
throws IOException { throws IOException {
hfiles.add(new Path(tableDir, new Path(region, new Path(family, hfileName)))); hfiles.add(hfileName);
} }
}); });
return hfiles.toArray(new Path[hfiles.size()]); Collections.sort(hfiles);
} return hfiles;
public static String[] listHFileNames(final FileSystem fs, final Path tableDir)
throws IOException {
Path[] files = listHFiles(fs, tableDir);
String[] names = new String[files.length];
for (int i = 0; i < files.length; ++i) {
names[i] = files[i].getName();
}
Arrays.sort(names);
return names;
} }
/** /**

View File

@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
@ -138,8 +137,7 @@ public class TestFlushSnapshotFromClient {
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM); SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
LOG.debug("FS state before snapshot:"); LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(), UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the enabled table // take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot"; String snapshotString = "offlineTableSnapshot";
@ -152,14 +150,10 @@ public class TestFlushSnapshotFromClient {
snapshot, TABLE_NAME); snapshot, TABLE_NAME);
// make sure its a valid snapshot // make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:"); LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(), UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
admin, fs);
} }
/** /**
@ -177,8 +171,7 @@ public class TestFlushSnapshotFromClient {
UTIL.loadTable(table, TEST_FAM); UTIL.loadTable(table, TEST_FAM);
LOG.debug("FS state before snapshot:"); LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(), UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the enabled table // take a snapshot of the enabled table
String snapshotString = "skipFlushTableSnapshot"; String snapshotString = "skipFlushTableSnapshot";
@ -191,14 +184,10 @@ public class TestFlushSnapshotFromClient {
snapshot, TABLE_NAME); snapshot, TABLE_NAME);
// make sure its a valid snapshot // make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:"); LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(), UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
admin, fs);
admin.deleteSnapshot(snapshot); admin.deleteSnapshot(snapshot);
snapshots = admin.listSnapshots(); snapshots = admin.listSnapshots();
@ -220,8 +209,7 @@ public class TestFlushSnapshotFromClient {
SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM); SnapshotTestingUtils.loadData(UTIL, TABLE_NAME, DEFAULT_NUM_ROWS, TEST_FAM);
LOG.debug("FS state before snapshot:"); LOG.debug("FS state before snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(), UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// take a snapshot of the enabled table // take a snapshot of the enabled table
String snapshotString = "offlineTableSnapshot"; String snapshotString = "offlineTableSnapshot";
@ -239,14 +227,10 @@ public class TestFlushSnapshotFromClient {
snapshot, TABLE_NAME); snapshot, TABLE_NAME);
// make sure its a valid snapshot // make sure its a valid snapshot
FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
LOG.debug("FS state after snapshot:"); LOG.debug("FS state after snapshot:");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(), UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, SnapshotTestingUtils.confirmSnapshotValid(UTIL, snapshots.get(0), TABLE_NAME, TEST_FAM);
admin, fs);
} }
@Test (timeout=300000) @Test (timeout=300000)
@ -293,8 +277,8 @@ public class TestFlushSnapshotFromClient {
HMaster master = UTIL.getMiniHBaseCluster().getMaster(); HMaster master = UTIL.getMiniHBaseCluster().getMaster();
SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200); SnapshotTestingUtils.waitForSnapshotToComplete(master, snapshot, 200);
LOG.info(" === Async Snapshot Completed ==="); LOG.info(" === Async Snapshot Completed ===");
FSUtils.logFileSystemState(UTIL.getTestFileSystem(), UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
// make sure we get the snapshot // make sure we get the snapshot
SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot); SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, snapshot);
} }
@ -494,7 +478,7 @@ public class TestFlushSnapshotFromClient {
} }
// dump for debugging // dump for debugging
logFSTree(FSUtils.getRootDir(UTIL.getConfiguration())); UTIL.getHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
List<SnapshotDescription> taken = admin.listSnapshots(); List<SnapshotDescription> taken = admin.listSnapshots();
int takenSize = taken.size(); int takenSize = taken.size();
@ -518,10 +502,6 @@ public class TestFlushSnapshotFromClient {
UTIL.deleteTable(TABLE2_NAME); UTIL.deleteTable(TABLE2_NAME);
} }
private void logFSTree(Path root) throws IOException {
FSUtils.logFileSystemState(UTIL.getDFSCluster().getFileSystem(), root, LOG);
}
private void waitRegionsAfterMerge(final long numRegionsAfterMerge) private void waitRegionsAfterMerge(final long numRegionsAfterMerge)
throws IOException, InterruptedException { throws IOException, InterruptedException {
Admin admin = UTIL.getHBaseAdmin(); Admin admin = UTIL.getHBaseAdmin();

View File

@ -27,14 +27,12 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager; import org.apache.hadoop.hbase.regionserver.snapshot.RegionServerSnapshotManager;
import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.junit.After; import org.junit.After;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
@ -208,8 +206,7 @@ public class TestRestoreFlushSnapshotFromClient {
// Helpers // Helpers
// ========================================================================== // ==========================================================================
private void logFSTree() throws IOException { private void logFSTree() throws IOException {
MasterFileSystem mfs = UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem().logFileSystemState(LOG);
FSUtils.logFileSystemState(mfs.getFileSystem(), mfs.getRootDir(), LOG);
} }
protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName, protected void verifyRowCount(final HBaseTestingUtility util, final TableName tableName,

View File

@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -124,12 +125,12 @@ public class TestRestoreSnapshotHelper {
private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd, private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd,
final HTableDescriptor htdClone) throws IOException { final HTableDescriptor htdClone) throws IOException {
String[] files = SnapshotTestingUtils.listHFileNames(fs, List<String> files = SnapshotTestingUtils.listHFileNames(fs,
FSUtils.getTableDir(rootDir, htdClone.getTableName())); FSUtils.getTableDir(rootDir, htdClone.getTableName()));
assertEquals(12, files.length); assertEquals(12, files.size());
for (int i = 0; i < files.length; i += 2) { for (int i = 0; i < files.size(); i += 2) {
String linkFile = files[i]; String linkFile = files.get(i);
String refFile = files[i+1]; String refFile = files.get(i+1);
assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile)); assertTrue(linkFile + " should be a HFileLink", HFileLink.isHFileLink(linkFile));
assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile)); assertTrue(refFile + " should be a Referene", StoreFileInfo.isReference(refFile));
assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile)); assertEquals(sourceHtd.getTableName(), HFileLink.getReferencedTableName(linkFile));