HBASE-7808 Refactor Store to use HRegionFileSystem
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1452936 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
fc6f7a031b
commit
55955c16c2
|
@ -186,19 +186,19 @@ public class HFileArchiver {
|
|||
* Remove the store files, either by archiving them or outright deletion
|
||||
* @param conf {@link Configuration} to examine to determine the archive directory
|
||||
* @param fs the filesystem where the store files live
|
||||
* @param parent Parent region hosting the store files
|
||||
* @param regionInfo {@link HRegionInfo} of the region hosting the store files
|
||||
* @param family the family hosting the store files
|
||||
* @param compactedFiles files to be disposed of. No further reading of these files should be
|
||||
* attempted; otherwise likely to cause an {@link IOException}
|
||||
* @throws IOException if the files could not be correctly disposed.
|
||||
*/
|
||||
public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegion parent,
|
||||
byte[] family, Collection<StoreFile> compactedFiles) throws IOException {
|
||||
public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
|
||||
Path tableDir, byte[] family, Collection<StoreFile> compactedFiles) throws IOException {
|
||||
|
||||
// sometimes in testing, we don't have rss, so we need to check for that
|
||||
if (fs == null) {
|
||||
LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:"
|
||||
+ Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family));
|
||||
+ Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family));
|
||||
deleteStoreFilesWithoutArchiving(compactedFiles);
|
||||
return;
|
||||
}
|
||||
|
@ -210,10 +210,10 @@ public class HFileArchiver {
|
|||
}
|
||||
|
||||
// build the archive path
|
||||
if (parent == null || family == null) throw new IOException(
|
||||
"Need to have a parent region and a family to archive from.");
|
||||
if (regionInfo == null || family == null) throw new IOException(
|
||||
"Need to have a region and a family to archive from.");
|
||||
|
||||
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, family);
|
||||
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
|
||||
|
||||
// make sure we don't archive if we can't and that the archive dir exists
|
||||
if (!fs.mkdirs(storeArchiveDir)) {
|
||||
|
@ -231,7 +231,36 @@ public class HFileArchiver {
|
|||
// do the actual archive
|
||||
if (!resolveAndArchive(fs, storeArchiveDir, storeFiles)) {
|
||||
throw new IOException("Failed to archive/delete all the files for region:"
|
||||
+ Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family)
|
||||
+ Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family)
|
||||
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Archive the store file
|
||||
* @param fs the filesystem where the store files live
|
||||
* @param regionInfo region hosting the store files
|
||||
* @param conf {@link Configuration} to examine to determine the archive directory
|
||||
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
|
||||
* @param family the family hosting the store files
|
||||
* @param storeFile file to be archived
|
||||
* @throws IOException if the files could not be correctly disposed.
|
||||
*/
|
||||
public static void archiveStoreFile(Configuration conf, FileSystem fs, HRegionInfo regionInfo,
|
||||
Path tableDir, byte[] family, Path storeFile) throws IOException {
|
||||
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
|
||||
// make sure we don't archive if we can't and that the archive dir exists
|
||||
if (!fs.mkdirs(storeArchiveDir)) {
|
||||
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
|
||||
+ Bytes.toString(family) + ", deleting compacted files instead.");
|
||||
}
|
||||
|
||||
// do the actual archive
|
||||
long start = EnvironmentEdgeManager.currentTimeMillis();
|
||||
File file = new FileablePath(fs, storeFile);
|
||||
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
|
||||
throw new IOException("Failed to archive/delete the file for region:"
|
||||
+ regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
|
||||
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
|
||||
|
||||
|
@ -67,12 +67,12 @@ public class HFileLink extends FileLink {
|
|||
*/
|
||||
public static final String LINK_NAME_REGEX =
|
||||
String.format("%s=%s-%s", HTableDescriptor.VALID_USER_TABLE_REGEX,
|
||||
HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFile.HFILE_NAME_REGEX);
|
||||
HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX);
|
||||
|
||||
/** Define the HFile Link name parser in the form of: table=region-hfile */
|
||||
private static final Pattern LINK_NAME_PATTERN =
|
||||
Pattern.compile(String.format("^(%s)=(%s)-(%s)$", HTableDescriptor.VALID_USER_TABLE_REGEX,
|
||||
HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFile.HFILE_NAME_REGEX));
|
||||
HRegionInfo.ENCODED_REGION_NAME_REGEX, StoreFileInfo.HFILE_NAME_REGEX));
|
||||
|
||||
/**
|
||||
* The pattern should be used for hfile and reference links
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.nio.ByteBuffer;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
|
@ -85,16 +86,18 @@ public class HalfStoreFileReader extends StoreFile.Reader {
|
|||
* Creates a half file reader for a hfile referred to by an hfilelink.
|
||||
* @param fs fileystem to read from
|
||||
* @param p path to hfile
|
||||
* @param link
|
||||
* @param in {@link FSDataInputStream}
|
||||
* @param inNoChecksum {@link FSDataInputStream} opened on a filesystem without checksum
|
||||
* @param size Full size of the hfile file
|
||||
* @param cacheConf
|
||||
* @param r original reference file (contains top or bottom)
|
||||
* @param preferredEncodingInCache
|
||||
* @throws IOException
|
||||
*/
|
||||
public HalfStoreFileReader(final FileSystem fs, final Path p, final HFileLink link,
|
||||
final CacheConfig cacheConf, final Reference r,
|
||||
DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
super(fs, p, link, link.getFileStatus(fs).getLen(), cacheConf, preferredEncodingInCache, true);
|
||||
public HalfStoreFileReader(final FileSystem fs, final Path p, final FSDataInputStream in,
|
||||
final FSDataInputStream inNoChecksum, long size, final CacheConfig cacheConf,
|
||||
final Reference r, final DataBlockEncoding preferredEncodingInCache) throws IOException {
|
||||
super(fs, p, in, inNoChecksum, size, cacheConf, preferredEncodingInCache, true);
|
||||
// This is not actual midkey for this half-file; its just border
|
||||
// around which we split top and bottom. Have to look in files to find
|
||||
// actual last and first keys for bottom and top halves. Half-files don't
|
||||
|
|
|
@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.catalog.MetaEditor;
|
|||
import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
|
@ -301,14 +301,13 @@ class CatalogJanitor extends Chore {
|
|||
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
|
||||
|
||||
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
|
||||
Path p = HStore.getStoreHomedir(tabledir, daughter.getEncodedName(),
|
||||
family.getName());
|
||||
Path p = HStore.getStoreHomedir(tabledir, daughter, family.getName());
|
||||
if (!fs.exists(p)) continue;
|
||||
// Look for reference files. Call listStatus with anonymous instance of PathFilter.
|
||||
FileStatus [] ps = FSUtils.listStatus(fs, p,
|
||||
new PathFilter () {
|
||||
public boolean accept(Path path) {
|
||||
return StoreFile.isReference(path);
|
||||
return StoreFileInfo.isReference(path);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
|
|
@ -125,8 +125,7 @@ class RegionLocationFinder {
|
|||
HTableDescriptor tableDescriptor = getTableDescriptor(region.getTableName());
|
||||
if (tableDescriptor != null) {
|
||||
HDFSBlocksDistribution blocksDistribution =
|
||||
HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor,
|
||||
region.getEncodedName());
|
||||
HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region);
|
||||
List<String> topHosts = blocksDistribution.getTopHosts();
|
||||
topServerNames = mapHostNameToServerName(topHosts);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Stoppable;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
/**
|
||||
* This Chore, every time it runs, will clear the HFiles in the hfile archive
|
||||
* folder that are deletable for each HFile cleaner in the chain.
|
||||
|
@ -52,7 +52,7 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
|||
if (HFileLink.isBackReferencesDir(file) || HFileLink.isBackReferencesDir(file.getParent())) {
|
||||
return true;
|
||||
}
|
||||
return StoreFile.validateStoreFileName(file.getName());
|
||||
return StoreFileInfo.validateStoreFileName(file.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
|
|||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.exceptions.CorruptedSnapshotException;
|
||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||
import org.apache.hadoop.hbase.snapshot.TakeSnapshotUtils;
|
||||
|
@ -207,7 +207,7 @@ public final class MasterSnapshotVerifier {
|
|||
Path archivedCfDir = new Path(archivedRegion, cf.getPath().getName());
|
||||
for (FileStatus hfile : hfiles) {
|
||||
// make sure the name is correct
|
||||
if (!StoreFile.validateStoreFileName(hfile.getPath().getName())) {
|
||||
if (!StoreFileInfo.validateStoreFileName(hfile.getPath().getName())) {
|
||||
throw new CorruptedSnapshotException("HFile: " + hfile.getPath()
|
||||
+ " is not a valid hfile name.", snapshot);
|
||||
}
|
||||
|
|
|
@ -112,12 +112,12 @@ public class CompactionTool extends Configured implements Tool {
|
|||
Path regionDir = path.getParent();
|
||||
Path tableDir = regionDir.getParent();
|
||||
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
|
||||
HRegion region = loadRegion(fs, conf, htd, regionDir);
|
||||
compactStoreFiles(region, path, compactOnce);
|
||||
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
|
||||
compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce);
|
||||
} else if (isRegionDir(fs, path)) {
|
||||
Path tableDir = path.getParent();
|
||||
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
|
||||
compactRegion(htd, path, compactOnce);
|
||||
compactRegion(tableDir, htd, path, compactOnce);
|
||||
} else if (isTableDir(fs, path)) {
|
||||
compactTable(path, compactOnce);
|
||||
} else {
|
||||
|
@ -129,19 +129,16 @@ public class CompactionTool extends Configured implements Tool {
|
|||
private void compactTable(final Path tableDir, final boolean compactOnce)
|
||||
throws IOException {
|
||||
HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir);
|
||||
LOG.info("Compact table=" + htd.getNameAsString());
|
||||
for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) {
|
||||
compactRegion(htd, regionDir, compactOnce);
|
||||
compactRegion(tableDir, htd, regionDir, compactOnce);
|
||||
}
|
||||
}
|
||||
|
||||
private void compactRegion(final HTableDescriptor htd, final Path regionDir,
|
||||
final boolean compactOnce) throws IOException {
|
||||
HRegion region = loadRegion(fs, conf, htd, regionDir);
|
||||
LOG.info("Compact table=" + htd.getNameAsString() +
|
||||
" region=" + region.getRegionNameAsString());
|
||||
private void compactRegion(final Path tableDir, final HTableDescriptor htd,
|
||||
final Path regionDir, final boolean compactOnce) throws IOException {
|
||||
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
|
||||
for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
|
||||
compactStoreFiles(region, familyDir, compactOnce);
|
||||
compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,12 +147,13 @@ public class CompactionTool extends Configured implements Tool {
|
|||
* If the compact once flag is not specified, execute the compaction until
|
||||
* no more compactions are needed. Uses the Configuration settings provided.
|
||||
*/
|
||||
private void compactStoreFiles(final HRegion region, final Path familyDir,
|
||||
final boolean compactOnce) throws IOException {
|
||||
LOG.info("Compact table=" + region.getTableDesc().getNameAsString() +
|
||||
" region=" + region.getRegionNameAsString() +
|
||||
" family=" + familyDir.getName());
|
||||
HStore store = getStore(region, familyDir);
|
||||
private void compactStoreFiles(final Path tableDir, final HTableDescriptor htd,
|
||||
final HRegionInfo hri, final String familyName, final boolean compactOnce)
|
||||
throws IOException {
|
||||
HStore store = getStore(conf, fs, tableDir, htd, hri, familyName, tmpDir);
|
||||
LOG.info("Compact table=" + htd.getNameAsString() +
|
||||
" region=" + hri.getRegionNameAsString() +
|
||||
" family=" + familyName);
|
||||
do {
|
||||
CompactionContext compaction = store.requestCompaction();
|
||||
if (compaction == null) break;
|
||||
|
@ -174,29 +172,17 @@ public class CompactionTool extends Configured implements Tool {
|
|||
* Create a "mock" HStore that uses the tmpDir specified by the user and
|
||||
* the store dir to compact as source.
|
||||
*/
|
||||
private HStore getStore(final HRegion region, final Path storeDir) throws IOException {
|
||||
byte[] familyName = Bytes.toBytes(storeDir.getName());
|
||||
HColumnDescriptor hcd = region.getTableDesc().getFamily(familyName);
|
||||
// Create a Store w/ check of hbase.rootdir blanked out and return our
|
||||
// list of files instead of have Store search its home dir.
|
||||
return new HStore(tmpDir, region, hcd, fs, conf) {
|
||||
private static HStore getStore(final Configuration conf, final FileSystem fs,
|
||||
final Path tableDir, final HTableDescriptor htd, final HRegionInfo hri,
|
||||
final String familyName, final Path tempDir) throws IOException {
|
||||
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, hri) {
|
||||
@Override
|
||||
public FileStatus[] getStoreFiles() throws IOException {
|
||||
return this.fs.listStatus(getHomedir());
|
||||
}
|
||||
|
||||
@Override
|
||||
Path createStoreHomeDir(FileSystem fs, Path homedir) throws IOException {
|
||||
return storeDir;
|
||||
public Path getTempDir() {
|
||||
return tempDir;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
private static HRegion loadRegion(final FileSystem fs, final Configuration conf,
|
||||
final HTableDescriptor htd, final Path regionDir) throws IOException {
|
||||
Path rootDir = regionDir.getParent().getParent();
|
||||
HRegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
|
||||
return HRegion.createHRegion(hri, rootDir, conf, htd, null, false, true);
|
||||
HRegion region = new HRegion(regionFs, null, conf, htd, null);
|
||||
return new HStore(region, htd.getFamily(Bytes.toBytes(familyName)), conf);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -633,7 +633,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
status.setStatus("Instantiating store for column family " + family);
|
||||
completionService.submit(new Callable<HStore>() {
|
||||
public HStore call() throws IOException {
|
||||
return instantiateHStore(getTableDir(), family);
|
||||
return instantiateHStore(family);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -706,32 +706,23 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
* This is a helper function to compute HDFS block distribution on demand
|
||||
* @param conf configuration
|
||||
* @param tableDescriptor HTableDescriptor of the table
|
||||
* @param regionEncodedName encoded name of the region
|
||||
* @param regionInfo encoded name of the region
|
||||
* @return The HDFS blocks distribution for the given region.
|
||||
* @throws IOException
|
||||
* @throws IOException
|
||||
*/
|
||||
static public HDFSBlocksDistribution computeHDFSBlocksDistribution(
|
||||
Configuration conf, HTableDescriptor tableDescriptor,
|
||||
String regionEncodedName) throws IOException {
|
||||
HDFSBlocksDistribution hdfsBlocksDistribution =
|
||||
new HDFSBlocksDistribution();
|
||||
Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf),
|
||||
tableDescriptor.getName());
|
||||
public static HDFSBlocksDistribution computeHDFSBlocksDistribution(final Configuration conf,
|
||||
final HTableDescriptor tableDescriptor, final HRegionInfo regionInfo) throws IOException {
|
||||
HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution();
|
||||
Path tablePath = FSUtils.getTablePath(FSUtils.getRootDir(conf), tableDescriptor.getName());
|
||||
FileSystem fs = tablePath.getFileSystem(conf);
|
||||
|
||||
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo);
|
||||
for (HColumnDescriptor family: tableDescriptor.getFamilies()) {
|
||||
Path storeHomeDir = HStore.getStoreHomedir(tablePath, regionEncodedName,
|
||||
family.getName());
|
||||
if (!fs.exists(storeHomeDir))continue;
|
||||
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family.getNameAsString());
|
||||
if (storeFiles == null) continue;
|
||||
|
||||
FileStatus[] hfilesStatus = null;
|
||||
hfilesStatus = fs.listStatus(storeHomeDir);
|
||||
|
||||
for (FileStatus hfileStatus : hfilesStatus) {
|
||||
HDFSBlocksDistribution storeFileBlocksDistribution =
|
||||
FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0,
|
||||
hfileStatus.getLen());
|
||||
hdfsBlocksDistribution.add(storeFileBlocksDistribution);
|
||||
for (StoreFileInfo storeFileInfo : storeFiles) {
|
||||
hdfsBlocksDistribution.add(storeFileInfo.computeHDFSBlocksDistribution(fs));
|
||||
}
|
||||
}
|
||||
return hdfsBlocksDistribution;
|
||||
|
@ -1121,14 +1112,6 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
void doRegionCompactionPrep() throws IOException {
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the temporary directory for this region. This directory
|
||||
* will have its contents removed when the region is reopened.
|
||||
*/
|
||||
Path getTmpDir() {
|
||||
return fs.getTempDir();
|
||||
}
|
||||
|
||||
void triggerMajorCompaction() {
|
||||
for (Store h : stores.values()) {
|
||||
h.triggerMajorCompaction();
|
||||
|
@ -2377,8 +2360,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
// files/batch, far more than the number of store files under a single column family.
|
||||
for (Store store : stores.values()) {
|
||||
// 2.1. build the snapshot reference directory for the store
|
||||
Path dstStoreDir = TakeSnapshotUtils.getStoreSnapshotDirectory(
|
||||
snapshotRegionFs.getRegionDir(), Bytes.toString(store.getFamily().getName()));
|
||||
Path dstStoreDir = snapshotRegionFs.getStoreDir(store.getFamily().getNameAsString());
|
||||
List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
|
||||
|
@ -2965,9 +2947,8 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
return true;
|
||||
}
|
||||
|
||||
protected HStore instantiateHStore(Path tableDir, HColumnDescriptor c)
|
||||
throws IOException {
|
||||
return new HStore(tableDir, this, c, this.getFilesystem(), this.conf);
|
||||
protected HStore instantiateHStore(final HColumnDescriptor family) throws IOException {
|
||||
return new HStore(this, family, this.conf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4171,13 +4152,13 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
* @param colFamily the column family
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void makeColumnFamilyDirs(FileSystem fs, Path tabledir,
|
||||
final HRegionInfo hri, byte [] colFamily)
|
||||
throws IOException {
|
||||
Path dir = HStore.getStoreHomedir(tabledir, hri.getEncodedName(), colFamily);
|
||||
private static Path makeColumnFamilyDirs(FileSystem fs, Path tabledir,
|
||||
final HRegionInfo hri, byte [] colFamily) throws IOException {
|
||||
Path dir = HStore.getStoreHomedir(tabledir, hri, colFamily);
|
||||
if (!fs.mkdirs(dir)) {
|
||||
LOG.warn("Failed to create " + dir);
|
||||
}
|
||||
return dir;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4299,14 +4280,12 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
byFamily = filesByFamily(byFamily, b.close());
|
||||
for (Map.Entry<byte [], List<StoreFile>> es : byFamily.entrySet()) {
|
||||
byte [] colFamily = es.getKey();
|
||||
makeColumnFamilyDirs(fs, tableDir, newRegionInfo, colFamily);
|
||||
Path storeDir = makeColumnFamilyDirs(fs, tableDir, newRegionInfo, colFamily);
|
||||
// Because we compacted the source regions we should have no more than two
|
||||
// HStoreFiles per family and there will be no reference store
|
||||
List<StoreFile> srcFiles = es.getValue();
|
||||
for (StoreFile hsf: srcFiles) {
|
||||
StoreFile.rename(fs, hsf.getPath(),
|
||||
StoreFile.getUniqueFile(fs, HStore.getStoreHomedir(tableDir,
|
||||
newRegionInfo.getEncodedName(), colFamily)));
|
||||
StoreFile.rename(fs, hsf.getPath(), StoreFile.getUniqueFile(fs, storeDir));
|
||||
}
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
|
|
@ -22,6 +22,9 @@ package org.apache.hadoop.hbase.regionserver;
|
|||
import java.io.EOFException;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -38,7 +41,6 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
||||
|
@ -117,6 +119,214 @@ public class HRegionFileSystem {
|
|||
FSUtils.deleteDirectory(fs, getTempDir());
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Store/StoreFile Helpers
|
||||
// ===========================================================================
|
||||
/**
|
||||
* Returns the directory path of the specified family
|
||||
* @param familyName Column Family Name
|
||||
* @return {@link Path} to the directory of the specified family
|
||||
*/
|
||||
Path getStoreDir(final String familyName) {
|
||||
return new Path(this.getRegionDir(), familyName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the store directory for the specified family name
|
||||
* @param familyName Column Family Name
|
||||
* @return {@link Path} to the directory of the specified family
|
||||
* @throws IOException if the directory creation fails.
|
||||
*/
|
||||
public Path createStoreDir(final String familyName) throws IOException {
|
||||
Path storeDir = getStoreDir(familyName);
|
||||
if (!fs.exists(storeDir) && !fs.mkdirs(storeDir)) {
|
||||
throw new IOException("Failed create of: " + storeDir);
|
||||
}
|
||||
return storeDir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the store files available for the family.
|
||||
* This methods performs the filtering based on the valid store files.
|
||||
* @param familyName Column Family Name
|
||||
* @return a set of {@link StoreFileInfo} for the specified family.
|
||||
*/
|
||||
public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException {
|
||||
return getStoreFiles(Bytes.toString(familyName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the store files available for the family.
|
||||
* This methods performs the filtering based on the valid store files.
|
||||
* @param familyName Column Family Name
|
||||
* @return a set of {@link StoreFileInfo} for the specified family.
|
||||
*/
|
||||
public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException {
|
||||
Path familyDir = getStoreDir(familyName);
|
||||
FileStatus[] files = FSUtils.listStatus(this.fs, familyDir);
|
||||
if (files == null) return null;
|
||||
|
||||
ArrayList<StoreFileInfo> storeFiles = new ArrayList<StoreFileInfo>(files.length);
|
||||
for (FileStatus status: files) {
|
||||
if (!StoreFileInfo.isValid(status)) continue;
|
||||
|
||||
storeFiles.add(new StoreFileInfo(this.conf, this.fs, status));
|
||||
}
|
||||
return storeFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the set of families present on disk
|
||||
*/
|
||||
public Collection<String> getFamilies() throws IOException {
|
||||
FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
|
||||
if (fds == null) return null;
|
||||
|
||||
ArrayList<String> families = new ArrayList<String>(fds.length);
|
||||
for (FileStatus status: fds) {
|
||||
families.add(status.getPath().getName());
|
||||
}
|
||||
|
||||
return families;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique file name, used by createTempName() and commitStoreFile()
|
||||
* @param suffix extra information to append to the generated name
|
||||
* @return Unique file name
|
||||
*/
|
||||
private static String generateUniqueName(final String suffix) {
|
||||
String name = UUID.randomUUID().toString().replaceAll("-", "");
|
||||
if (suffix != null) name += suffix;
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique temporary Path. Used in conjuction with commitStoreFile()
|
||||
* to get a safer file creation.
|
||||
* <code>
|
||||
* Path file = fs.createTempName();
|
||||
* ...StoreFile.Writer(file)...
|
||||
* fs.commitStoreFile("family", file);
|
||||
* </code>
|
||||
*
|
||||
* @return Unique {@link Path} of the temporary file
|
||||
*/
|
||||
public Path createTempName() {
|
||||
return createTempName(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a unique temporary Path. Used in conjuction with commitStoreFile()
|
||||
* to get a safer file creation.
|
||||
* <code>
|
||||
* Path file = fs.createTempName();
|
||||
* ...StoreFile.Writer(file)...
|
||||
* fs.commitStoreFile("family", file);
|
||||
* </code>
|
||||
*
|
||||
* @param suffix extra information to append to the generated name
|
||||
* @return Unique {@link Path} of the temporary file
|
||||
*/
|
||||
public Path createTempName(final String suffix) {
|
||||
return new Path(getTempDir(), generateUniqueName(suffix));
|
||||
}
|
||||
|
||||
/**
|
||||
* Move the file from a build/temp location to the main family store directory.
|
||||
* @param familyName Family that will gain the file
|
||||
* @param buildPath {@link Path} to the file to commit.
|
||||
* @return The new {@link Path} of the committed file
|
||||
* @throws IOException
|
||||
*/
|
||||
public Path commitStoreFile(final String familyName, final Path buildPath) throws IOException {
|
||||
return commitStoreFile(familyName, buildPath, -1, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Move the file from a build/temp location to the main family store directory.
|
||||
* @param familyName Family that will gain the file
|
||||
* @param buildPath {@link Path} to the file to commit.
|
||||
* @param seqNum Sequence Number to append to the file name (less then 0 if no sequence number)
|
||||
* @param generateNewName False if you want to keep the buildPath name
|
||||
* @return The new {@link Path} of the committed file
|
||||
* @throws IOException
|
||||
*/
|
||||
public Path commitStoreFile(final String familyName, final Path buildPath,
|
||||
final long seqNum, final boolean generateNewName) throws IOException {
|
||||
Path storeDir = getStoreDir(familyName);
|
||||
fs.mkdirs(storeDir);
|
||||
String name = buildPath.getName();
|
||||
if (generateNewName) {
|
||||
name = generateUniqueName((seqNum < 0) ? null : "_SeqId_" + seqNum + "_");
|
||||
}
|
||||
Path dstPath = new Path(storeDir, name);
|
||||
if (!fs.exists(buildPath)) {
|
||||
throw new FileNotFoundException(buildPath.toString());
|
||||
}
|
||||
LOG.debug("Committing store file " + buildPath + " as " + dstPath);
|
||||
if (!fs.rename(buildPath, dstPath)) {
|
||||
throw new IOException("Failed rename of " + buildPath + " to " + dstPath);
|
||||
}
|
||||
return dstPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* Archives the specified store file from the specified family.
|
||||
* @param familyName Family that contains the store files
|
||||
* @param filePath {@link Path} to the store file to remove
|
||||
* @throws IOException if the archiving fails
|
||||
*/
|
||||
public void removeStoreFile(final String familyName, final Path filePath)
|
||||
throws IOException {
|
||||
HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfo,
|
||||
this.tableDir, Bytes.toBytes(familyName), filePath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Closes and archives the specified store files from the specified family.
|
||||
* @param familyName Family that contains the store files
|
||||
* @param storeFiles set of store files to remove
|
||||
* @throws IOException if the archiving fails
|
||||
*/
|
||||
public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
|
||||
throws IOException {
|
||||
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfo,
|
||||
this.tableDir, Bytes.toBytes(familyName), storeFiles);
|
||||
}
|
||||
|
||||
/**
|
||||
* Bulk load: Add a specified store file to the specified family.
|
||||
* If the source file is on the same different file-system is moved from the
|
||||
* source location to the destination location, otherwise is copied over.
|
||||
*
|
||||
* @param familyName Family that will gain the file
|
||||
* @param srcPath {@link Path} to the file to import
|
||||
* @param seqNum Bulk Load sequence number
|
||||
* @return The destination {@link Path} of the bulk loaded file
|
||||
* @throws IOException
|
||||
*/
|
||||
public Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
|
||||
throws IOException {
|
||||
// Copy the file if it's on another filesystem
|
||||
FileSystem srcFs = srcPath.getFileSystem(conf);
|
||||
FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
|
||||
|
||||
// We can't compare FileSystem instances as equals() includes UGI instance
|
||||
// as part of the comparison and won't work when doing SecureBulkLoad
|
||||
// TODO deal with viewFS
|
||||
if (!srcFs.getUri().equals(desFs.getUri())) {
|
||||
LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
|
||||
"the destination store. Copying file over to destination filesystem.");
|
||||
Path tmpPath = createTempName();
|
||||
FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
|
||||
LOG.info("Copied " + srcPath + " to temporary path on destination filesystem: " + tmpPath);
|
||||
srcPath = tmpPath;
|
||||
}
|
||||
|
||||
return commitStoreFile(familyName, srcPath, seqNum, true);
|
||||
}
|
||||
|
||||
// ===========================================================================
|
||||
// Splits Helpers
|
||||
// ===========================================================================
|
||||
|
|
|
@ -41,9 +41,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CompoundConfiguration;
|
||||
|
@ -52,11 +50,9 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.RemoteExceptionHandler;
|
||||
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.exceptions.WrongRegionException;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
|
@ -77,7 +73,6 @@ import org.apache.hadoop.hbase.util.ChecksumType;
|
|||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.CollectionBackedScanner;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -116,15 +111,12 @@ public class HStore implements Store {
|
|||
|
||||
protected final MemStore memstore;
|
||||
// This stores directory in the filesystem.
|
||||
private final Path homedir;
|
||||
private final HRegion region;
|
||||
private final HColumnDescriptor family;
|
||||
CompactionPolicy compactionPolicy;
|
||||
final FileSystem fs;
|
||||
final Configuration conf;
|
||||
final CacheConfig cacheConf;
|
||||
// ttl in milliseconds. TODO: can this be removed? Already stored in scanInfo.
|
||||
private long ttl;
|
||||
final CompactionPolicy compactionPolicy;
|
||||
private final HRegionFileSystem fs;
|
||||
private final Configuration conf;
|
||||
private final CacheConfig cacheConf;
|
||||
private long lastCompactSize = 0;
|
||||
volatile boolean forceMajor = false;
|
||||
/* how many bytes to write between status checks */
|
||||
|
@ -137,7 +129,7 @@ public class HStore implements Store {
|
|||
|
||||
private ScanInfo scanInfo;
|
||||
|
||||
private StoreFileManager storeFileManager;
|
||||
private final StoreFileManager storeFileManager;
|
||||
final List<StoreFile> filesCompacting = Lists.newArrayList();
|
||||
|
||||
// All access must be synchronized.
|
||||
|
@ -164,25 +156,20 @@ public class HStore implements Store {
|
|||
|
||||
/**
|
||||
* Constructor
|
||||
* @param basedir qualified path under which the region directory lives;
|
||||
* generally the table subdirectory
|
||||
* @param region
|
||||
* @param family HColumnDescriptor for this column
|
||||
* @param fs file system object
|
||||
* @param confParam configuration object
|
||||
* failed. Can be null.
|
||||
* @throws IOException
|
||||
*/
|
||||
protected HStore(Path basedir, HRegion region, HColumnDescriptor family,
|
||||
FileSystem fs, Configuration confParam)
|
||||
throws IOException {
|
||||
protected HStore(final HRegion region, final HColumnDescriptor family,
|
||||
final Configuration confParam) throws IOException {
|
||||
|
||||
HRegionInfo info = region.getRegionInfo();
|
||||
this.fs = fs;
|
||||
// Assemble the store's home directory.
|
||||
Path p = getStoreHomedir(basedir, info.getEncodedName(), family.getName());
|
||||
// Ensure it exists.
|
||||
this.homedir = createStoreHomeDir(this.fs, p);
|
||||
this.fs = region.getRegionFileSystem();
|
||||
|
||||
// Assemble the store's home directory and Ensure it exists.
|
||||
fs.createStoreDir(family.getNameAsString());
|
||||
this.region = region;
|
||||
this.family = family;
|
||||
// 'conf' renamed to 'confParam' b/c we use this.conf in the constructor
|
||||
|
@ -200,13 +187,13 @@ public class HStore implements Store {
|
|||
family.getDataBlockEncoding());
|
||||
|
||||
this.comparator = info.getComparator();
|
||||
// Get TTL
|
||||
this.ttl = determineTTLFromFamily(family);
|
||||
// used by ScanQueryMatcher
|
||||
long timeToPurgeDeletes =
|
||||
Math.max(conf.getLong("hbase.hstore.time.to.purge.deletes", 0), 0);
|
||||
LOG.trace("Time to purge deletes set to " + timeToPurgeDeletes +
|
||||
"ms in store " + this);
|
||||
// Get TTL
|
||||
long ttl = determineTTLFromFamily(family);
|
||||
// Why not just pass a HColumnDescriptor in here altogether? Even if have
|
||||
// to clone it?
|
||||
scanInfo = new ScanInfo(family, ttl, timeToPurgeDeletes, this.comparator);
|
||||
|
@ -250,7 +237,7 @@ public class HStore implements Store {
|
|||
|
||||
/**
|
||||
* @param family
|
||||
* @return
|
||||
* @return TTL in seconds of the specified family
|
||||
*/
|
||||
private static long determineTTLFromFamily(final HColumnDescriptor family) {
|
||||
// HCD.getTimeToLive returns ttl in seconds. Convert to milliseconds.
|
||||
|
@ -273,27 +260,15 @@ public class HStore implements Store {
|
|||
|
||||
@Override
|
||||
public String getTableName() {
|
||||
return this.region.getTableDesc().getNameAsString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create this store's homedir
|
||||
* @param fs
|
||||
* @param homedir
|
||||
* @return Return <code>homedir</code>
|
||||
* @throws IOException
|
||||
*/
|
||||
Path createStoreHomeDir(final FileSystem fs,
|
||||
final Path homedir) throws IOException {
|
||||
if (!fs.exists(homedir)) {
|
||||
if (!fs.mkdirs(homedir))
|
||||
throw new IOException("Failed create of: " + homedir.toString());
|
||||
}
|
||||
return homedir;
|
||||
return this.getRegionInfo().getTableNameAsString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileSystem getFileSystem() {
|
||||
return this.fs.getFileSystem();
|
||||
}
|
||||
|
||||
public HRegionFileSystem getRegionFileSystem() {
|
||||
return this.fs;
|
||||
}
|
||||
|
||||
|
@ -301,7 +276,7 @@ public class HStore implements Store {
|
|||
@Override
|
||||
public long getStoreFileTtl() {
|
||||
// TTL only applies if there's no MIN_VERSIONs setting on the column.
|
||||
return (this.scanInfo.getMinVersions() == 0) ? this.ttl : Long.MAX_VALUE;
|
||||
return (this.scanInfo.getMinVersions() == 0) ? this.scanInfo.getTtl() : Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -358,42 +333,27 @@ public class HStore implements Store {
|
|||
}
|
||||
|
||||
/**
|
||||
* @param tabledir
|
||||
* @param encodedName Encoded region name.
|
||||
* @param family
|
||||
* @param tabledir {@link Path} to where the table is being stored
|
||||
* @param hri {@link HRegionInfo} for the region.
|
||||
* @param family {@link HColumnDescriptor} describing the column family
|
||||
* @return Path to family/Store home directory.
|
||||
*/
|
||||
@Deprecated
|
||||
public static Path getStoreHomedir(final Path tabledir,
|
||||
final String encodedName, final byte [] family) {
|
||||
return getStoreHomedir(tabledir, encodedName, Bytes.toString(family));
|
||||
final HRegionInfo hri, final byte[] family) {
|
||||
return getStoreHomedir(tabledir, hri.getEncodedName(), family);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param tabledir
|
||||
* @param tabledir {@link Path} to where the table is being stored
|
||||
* @param encodedName Encoded region name.
|
||||
* @param family
|
||||
* @param family {@link HColumnDescriptor} describing the column family
|
||||
* @return Path to family/Store home directory.
|
||||
*/
|
||||
@Deprecated
|
||||
public static Path getStoreHomedir(final Path tabledir,
|
||||
final String encodedName, final String family) {
|
||||
return new Path(tabledir, new Path(encodedName, new Path(family)));
|
||||
}
|
||||
|
||||
/**
|
||||
* @param parentRegionDirectory directory for the parent region
|
||||
* @param family family name of this store
|
||||
* @return Path to the family/Store home directory
|
||||
*/
|
||||
public static Path getStoreHomedir(final Path parentRegionDirectory,
|
||||
final byte[] family) {
|
||||
return new Path(parentRegionDirectory, new Path(Bytes.toString(family)));
|
||||
}
|
||||
/**
|
||||
* Return the directory in which this store stores its
|
||||
* StoreFiles
|
||||
*/
|
||||
Path getHomedir() {
|
||||
return homedir;
|
||||
final String encodedName, final byte[] family) {
|
||||
return new Path(tabledir, new Path(encodedName, Bytes.toString(family)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -409,48 +369,31 @@ public class HStore implements Store {
|
|||
this.dataBlockEncoder = blockEncoder;
|
||||
}
|
||||
|
||||
FileStatus[] getStoreFiles() throws IOException {
|
||||
return FSUtils.listStatus(this.fs, this.homedir, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates an unsorted list of StoreFile loaded in parallel
|
||||
* from the given directory.
|
||||
* @throws IOException
|
||||
*/
|
||||
private List<StoreFile> loadStoreFiles() throws IOException {
|
||||
ArrayList<StoreFile> results = new ArrayList<StoreFile>();
|
||||
FileStatus files[] = getStoreFiles();
|
||||
|
||||
if (files == null || files.length == 0) {
|
||||
return results;
|
||||
Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName());
|
||||
if (files == null || files.size() == 0) {
|
||||
return new ArrayList<StoreFile>();
|
||||
}
|
||||
|
||||
// initialize the thread pool for opening store files in parallel..
|
||||
ThreadPoolExecutor storeFileOpenerThreadPool =
|
||||
this.region.getStoreFileOpenAndCloseThreadPool("StoreFileOpenerThread-" +
|
||||
this.family.getNameAsString());
|
||||
this.getColumnFamilyName());
|
||||
CompletionService<StoreFile> completionService =
|
||||
new ExecutorCompletionService<StoreFile>(storeFileOpenerThreadPool);
|
||||
|
||||
int totalValidStoreFile = 0;
|
||||
for (int i = 0; i < files.length; i++) {
|
||||
// Skip directories.
|
||||
if (files[i].isDir()) {
|
||||
continue;
|
||||
}
|
||||
final Path p = files[i].getPath();
|
||||
// Check for empty hfile. Should never be the case but can happen
|
||||
// after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
|
||||
// NOTE: that the HFileLink is just a name, so it's an empty file.
|
||||
if (!HFileLink.isHFileLink(p) && this.fs.getFileStatus(p).getLen() <= 0) {
|
||||
LOG.warn("Skipping " + p + " because its empty. HBASE-646 DATA LOSS?");
|
||||
continue;
|
||||
}
|
||||
|
||||
final FileSystem fs = this.getFileSystem();
|
||||
for (final StoreFileInfo storeFileInfo: files) {
|
||||
// open each store file in parallel
|
||||
completionService.submit(new Callable<StoreFile>() {
|
||||
public StoreFile call() throws IOException {
|
||||
StoreFile storeFile = new StoreFile(fs, p, conf, cacheConf,
|
||||
StoreFile storeFile = new StoreFile(fs, storeFileInfo.getPath(), conf, cacheConf,
|
||||
family.getBloomFilterType(), dataBlockEncoder);
|
||||
storeFile.createReader();
|
||||
return storeFile;
|
||||
|
@ -459,6 +402,7 @@ public class HStore implements Store {
|
|||
totalValidStoreFile++;
|
||||
}
|
||||
|
||||
ArrayList<StoreFile> results = new ArrayList<StoreFile>(files.size());
|
||||
IOException ioe = null;
|
||||
try {
|
||||
for (int i = 0; i < totalValidStoreFile; i++) {
|
||||
|
@ -543,7 +487,7 @@ public class HStore implements Store {
|
|||
HFile.Reader reader = null;
|
||||
try {
|
||||
LOG.info("Validating hfile at " + srcPath + " for inclusion in "
|
||||
+ "store " + this + " region " + this.region);
|
||||
+ "store " + this + " region " + this.getRegionInfo().getRegionNameAsString());
|
||||
reader = HFile.createReader(srcPath.getFileSystem(conf),
|
||||
srcPath, cacheConf);
|
||||
reader.loadFileInfo();
|
||||
|
@ -557,14 +501,13 @@ public class HStore implements Store {
|
|||
LOG.debug("HFile bounds: first=" + Bytes.toStringBinary(firstKey) +
|
||||
" last=" + Bytes.toStringBinary(lastKey));
|
||||
LOG.debug("Region bounds: first=" +
|
||||
Bytes.toStringBinary(region.getStartKey()) +
|
||||
" last=" + Bytes.toStringBinary(region.getEndKey()));
|
||||
Bytes.toStringBinary(getRegionInfo().getStartKey()) +
|
||||
" last=" + Bytes.toStringBinary(getRegionInfo().getEndKey()));
|
||||
|
||||
HRegionInfo hri = region.getRegionInfo();
|
||||
if (!hri.containsRange(firstKey, lastKey)) {
|
||||
if (!this.getRegionInfo().containsRange(firstKey, lastKey)) {
|
||||
throw new WrongRegionException(
|
||||
"Bulk load file " + srcPath.toString() + " does not fit inside region "
|
||||
+ this.region);
|
||||
+ this.getRegionInfo().getRegionNameAsString());
|
||||
}
|
||||
|
||||
if (verifyBulkLoads) {
|
||||
|
@ -602,38 +545,17 @@ public class HStore implements Store {
|
|||
@Override
|
||||
public void bulkLoadHFile(String srcPathStr, long seqNum) throws IOException {
|
||||
Path srcPath = new Path(srcPathStr);
|
||||
Path dstPath = fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum);
|
||||
|
||||
// Copy the file if it's on another filesystem
|
||||
FileSystem srcFs = srcPath.getFileSystem(conf);
|
||||
FileSystem desFs = fs instanceof HFileSystem ? ((HFileSystem)fs).getBackingFs() : fs;
|
||||
//We can't compare FileSystem instances as
|
||||
//equals() includes UGI instance as part of the comparison
|
||||
//and won't work when doing SecureBulkLoad
|
||||
//TODO deal with viewFS
|
||||
if (!srcFs.getUri().equals(desFs.getUri())) {
|
||||
LOG.info("Bulk-load file " + srcPath + " is on different filesystem than " +
|
||||
"the destination store. Copying file over to destination filesystem.");
|
||||
Path tmpPath = getTmpPath();
|
||||
FileUtil.copy(srcFs, srcPath, fs, tmpPath, false, conf);
|
||||
LOG.info("Copied " + srcPath
|
||||
+ " to temporary path on destination filesystem: " + tmpPath);
|
||||
srcPath = tmpPath;
|
||||
}
|
||||
|
||||
Path dstPath = StoreFile.getRandomFilename(fs, homedir,
|
||||
(seqNum == -1) ? null : "_SeqId_" + seqNum + "_");
|
||||
LOG.debug("Renaming bulk load file " + srcPath + " to " + dstPath);
|
||||
StoreFile.rename(fs, srcPath, dstPath);
|
||||
|
||||
StoreFile sf = new StoreFile(fs, dstPath, this.conf, this.cacheConf,
|
||||
StoreFile sf = new StoreFile(this.getFileSystem(), dstPath, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType(), this.dataBlockEncoder);
|
||||
|
||||
StoreFile.Reader r = sf.createReader();
|
||||
this.storeSize += r.length();
|
||||
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
|
||||
|
||||
LOG.info("Moved HFile " + srcPath + " into store directory " +
|
||||
homedir + " - updating store file list.");
|
||||
LOG.info("Loaded HFile " + srcPath + " into store '" + getColumnFamilyName() +
|
||||
"' as " + dstPath + " - updating store file list.");
|
||||
|
||||
// Append the new storefile into the list
|
||||
this.lock.writeLock().lock();
|
||||
|
@ -652,16 +574,6 @@ public class HStore implements Store {
|
|||
+ " into store " + this + " (new location: " + dstPath + ")");
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a temporary path in this region. These temporary files
|
||||
* will get cleaned up when the region is re-opened if they are
|
||||
* still around.
|
||||
*/
|
||||
private Path getTmpPath() throws IOException {
|
||||
return StoreFile.getRandomFilename(
|
||||
fs, region.getTmpDir());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ImmutableCollection<StoreFile> close() throws IOException {
|
||||
this.lock.writeLock().lock();
|
||||
|
@ -673,7 +585,7 @@ public class HStore implements Store {
|
|||
// initialize the thread pool for closing store files in parallel.
|
||||
ThreadPoolExecutor storeFileCloserThreadPool = this.region
|
||||
.getStoreFileOpenAndCloseThreadPool("StoreFileCloserThread-"
|
||||
+ this.family.getNameAsString());
|
||||
+ this.getColumnFamilyName());
|
||||
|
||||
// close each store file in parallel
|
||||
CompletionService<Void> completionService =
|
||||
|
@ -809,20 +721,19 @@ public class HStore implements Store {
|
|||
// treat this as a minor compaction.
|
||||
InternalScanner scanner = null;
|
||||
KeyValueScanner memstoreScanner = new CollectionBackedScanner(set, this.comparator);
|
||||
if (this.region.getCoprocessorHost() != null) {
|
||||
scanner = this.region.getCoprocessorHost()
|
||||
.preFlushScannerOpen(this, memstoreScanner);
|
||||
if (this.getCoprocessorHost() != null) {
|
||||
scanner = this.getCoprocessorHost().preFlushScannerOpen(this, memstoreScanner);
|
||||
}
|
||||
if (scanner == null) {
|
||||
Scan scan = new Scan();
|
||||
scan.setMaxVersions(scanInfo.getMaxVersions());
|
||||
scanner = new StoreScanner(this, scanInfo, scan,
|
||||
Collections.singletonList(memstoreScanner), ScanType.COMPACT_RETAIN_DELETES,
|
||||
this.region.getSmallestReadPoint(), HConstants.OLDEST_TIMESTAMP);
|
||||
smallestReadPoint, HConstants.OLDEST_TIMESTAMP);
|
||||
}
|
||||
if (this.region.getCoprocessorHost() != null) {
|
||||
if (this.getCoprocessorHost() != null) {
|
||||
InternalScanner cpScanner =
|
||||
this.region.getCoprocessorHost().preFlush(this, scanner);
|
||||
this.getCoprocessorHost().preFlush(this, scanner);
|
||||
// NULL scanner returned from coprocessor hooks means skip normal processing
|
||||
if (cpScanner == null) {
|
||||
return null;
|
||||
|
@ -898,17 +809,10 @@ public class HStore implements Store {
|
|||
MonitoredTask status)
|
||||
throws IOException {
|
||||
// Write-out finished successfully, move into the right spot
|
||||
String fileName = path.getName();
|
||||
Path dstPath = new Path(homedir, fileName);
|
||||
String msg = "Renaming flushed file at " + path + " to " + dstPath;
|
||||
LOG.debug(msg);
|
||||
status.setStatus("Flushing " + this + ": " + msg);
|
||||
if (!fs.rename(path, dstPath)) {
|
||||
LOG.warn("Unable to rename " + path + " to " + dstPath);
|
||||
}
|
||||
Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);
|
||||
|
||||
status.setStatus("Flushing " + this + ": reopening flushed file");
|
||||
StoreFile sf = new StoreFile(this.fs, dstPath, this.conf, this.cacheConf,
|
||||
StoreFile sf = new StoreFile(this.getFileSystem(), dstPath, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType(), this.dataBlockEncoder);
|
||||
|
||||
StoreFile.Reader r = sf.createReader();
|
||||
|
@ -927,7 +831,7 @@ public class HStore implements Store {
|
|||
* @param maxKeyCount
|
||||
* @return Writer for a new StoreFile in the tmp dir.
|
||||
*/
|
||||
private StoreFile.Writer createWriterInTmp(int maxKeyCount)
|
||||
private StoreFile.Writer createWriterInTmp(long maxKeyCount)
|
||||
throws IOException {
|
||||
return createWriterInTmp(maxKeyCount, this.family.getCompression(), false);
|
||||
}
|
||||
|
@ -938,7 +842,7 @@ public class HStore implements Store {
|
|||
* @param isCompaction whether we are creating a new file in a compaction
|
||||
* @return Writer for a new StoreFile in the tmp dir.
|
||||
*/
|
||||
public StoreFile.Writer createWriterInTmp(int maxKeyCount,
|
||||
public StoreFile.Writer createWriterInTmp(long maxKeyCount,
|
||||
Compression.Algorithm compression, boolean isCompaction)
|
||||
throws IOException {
|
||||
final CacheConfig writerCacheConf;
|
||||
|
@ -950,8 +854,8 @@ public class HStore implements Store {
|
|||
writerCacheConf = cacheConf;
|
||||
}
|
||||
StoreFile.Writer w = new StoreFile.WriterBuilder(conf, writerCacheConf,
|
||||
fs, blocksize)
|
||||
.withOutputDir(region.getTmpDir())
|
||||
this.getFileSystem(), blocksize)
|
||||
.withFilePath(fs.createTempName())
|
||||
.withDataBlockEncoder(dataBlockEncoder)
|
||||
.withComparator(comparator)
|
||||
.withBloomType(family.getBloomFilterType())
|
||||
|
@ -1084,8 +988,8 @@ public class HStore implements Store {
|
|||
|
||||
// Ready to go. Have list of files to compact.
|
||||
LOG.info("Starting compaction of " + filesToCompact.size() + " file(s) in "
|
||||
+ this + " of " + this.region.getRegionInfo().getRegionNameAsString()
|
||||
+ " into tmpdir=" + region.getTmpDir() + ", totalSize="
|
||||
+ this + " of " + this.getRegionInfo().getRegionNameAsString()
|
||||
+ " into tmpdir=" + fs.getTempDir() + ", totalSize="
|
||||
+ StringUtils.humanReadableInt(cr.getSize()));
|
||||
|
||||
List<StoreFile> sfs = new ArrayList<StoreFile>();
|
||||
|
@ -1098,8 +1002,8 @@ public class HStore implements Store {
|
|||
for (Path newFile: newFiles) {
|
||||
assert newFile != null;
|
||||
StoreFile sf = moveFileIntoPlace(newFile);
|
||||
if (region.getCoprocessorHost() != null) {
|
||||
region.getCoprocessorHost().postCompact(this, sf, cr);
|
||||
if (this.getCoprocessorHost() != null) {
|
||||
this.getCoprocessorHost().postCompact(this, sf, cr);
|
||||
}
|
||||
assert sf != null;
|
||||
sfs.add(sf);
|
||||
|
@ -1108,7 +1012,7 @@ public class HStore implements Store {
|
|||
} else {
|
||||
for (Path newFile: newFiles) {
|
||||
// Create storefile around what we wrote with a reader on it.
|
||||
StoreFile sf = new StoreFile(this.fs, newFile, this.conf, this.cacheConf,
|
||||
StoreFile sf = new StoreFile(this.getFileSystem(), newFile, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType(), this.dataBlockEncoder);
|
||||
sf.createReader();
|
||||
sfs.add(sf);
|
||||
|
@ -1133,7 +1037,7 @@ public class HStore implements Store {
|
|||
StringBuilder message = new StringBuilder(
|
||||
"Completed" + (cr.isMajor() ? " major " : " ") + "compaction of "
|
||||
+ cr.getFiles().size() + " file(s) in " + this + " of "
|
||||
+ this.region.getRegionInfo().getRegionNameAsString()
|
||||
+ this.getRegionInfo().getRegionNameAsString()
|
||||
+ " into ");
|
||||
if (sfs.isEmpty()) {
|
||||
message.append("none, ");
|
||||
|
@ -1155,17 +1059,11 @@ public class HStore implements Store {
|
|||
}
|
||||
|
||||
// Package-visible for tests
|
||||
StoreFile moveFileIntoPlace(Path newFile) throws IOException {
|
||||
StoreFile moveFileIntoPlace(final Path newFile) throws IOException {
|
||||
validateStoreFile(newFile);
|
||||
// Move the file into the right spot
|
||||
Path destPath = new Path(homedir, newFile.getName());
|
||||
LOG.info("Renaming compacted file at " + newFile + " to " + destPath);
|
||||
if (!fs.rename(newFile, destPath)) {
|
||||
String err = "Failed move of compacted file " + newFile + " to " + destPath;
|
||||
LOG.error(err);
|
||||
throw new IOException(err);
|
||||
}
|
||||
StoreFile result = new StoreFile(this.fs, destPath, this.conf, this.cacheConf,
|
||||
Path destPath = fs.commitStoreFile(getColumnFamilyName(), newFile);
|
||||
StoreFile result = new StoreFile(this.getFileSystem(), destPath, this.conf, this.cacheConf,
|
||||
this.family.getBloomFilterType(), this.dataBlockEncoder);
|
||||
result.createReader();
|
||||
return result;
|
||||
|
@ -1215,8 +1113,8 @@ public class HStore implements Store {
|
|||
for (Path newFile: newFiles) {
|
||||
// Move the compaction into place.
|
||||
StoreFile sf = moveFileIntoPlace(newFile);
|
||||
if (region.getCoprocessorHost() != null) {
|
||||
region.getCoprocessorHost().postCompact(this, sf, null);
|
||||
if (this.getCoprocessorHost() != null) {
|
||||
this.getCoprocessorHost().postCompact(this, sf, null);
|
||||
}
|
||||
ArrayList<StoreFile> tmp = new ArrayList<StoreFile>();
|
||||
tmp.add(sf);
|
||||
|
@ -1260,7 +1158,7 @@ public class HStore implements Store {
|
|||
public CompactionContext requestCompaction(int priority, CompactionRequest baseRequest)
|
||||
throws IOException {
|
||||
// don't even select for compaction if writes are disabled
|
||||
if (!this.region.areWritesEnabled()) {
|
||||
if (!this.areWritesEnabled()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -1269,9 +1167,9 @@ public class HStore implements Store {
|
|||
try {
|
||||
synchronized (filesCompacting) {
|
||||
// First, see if coprocessor would want to override selection.
|
||||
if (region.getCoprocessorHost() != null) {
|
||||
if (this.getCoprocessorHost() != null) {
|
||||
List<StoreFile> candidatesForCoproc = compaction.preSelect(this.filesCompacting);
|
||||
boolean override = region.getCoprocessorHost().preCompactSelection(
|
||||
boolean override = this.getCoprocessorHost().preCompactSelection(
|
||||
this, candidatesForCoproc, baseRequest);
|
||||
if (override) {
|
||||
// Coprocessor is overriding normal file selection.
|
||||
|
@ -1291,8 +1189,8 @@ public class HStore implements Store {
|
|||
this.offPeakCompactions.endOffPeakRequest();
|
||||
}
|
||||
}
|
||||
if (region.getCoprocessorHost() != null) {
|
||||
region.getCoprocessorHost().postCompactSelection(
|
||||
if (this.getCoprocessorHost() != null) {
|
||||
this.getCoprocessorHost().postCompactSelection(
|
||||
this, ImmutableList.copyOf(compaction.getRequest().getFiles()), baseRequest);
|
||||
}
|
||||
|
||||
|
@ -1329,7 +1227,7 @@ public class HStore implements Store {
|
|||
(priority != Store.NO_PRIORITY) ? priority : getCompactPriority());
|
||||
compaction.getRequest().setIsMajor(isMajor);
|
||||
compaction.getRequest().setDescription(
|
||||
region.getRegionNameAsString(), getColumnFamilyName());
|
||||
getRegionInfo().getRegionNameAsString(), getColumnFamilyName());
|
||||
}
|
||||
} finally {
|
||||
this.lock.readLock().unlock();
|
||||
|
@ -1366,7 +1264,7 @@ public class HStore implements Store {
|
|||
throws IOException {
|
||||
StoreFile storeFile = null;
|
||||
try {
|
||||
storeFile = new StoreFile(this.fs, path, this.conf,
|
||||
storeFile = new StoreFile(this.getFileSystem(), path, this.conf,
|
||||
this.cacheConf, this.family.getBloomFilterType(),
|
||||
NoOpDataBlockEncoder.INSTANCE);
|
||||
storeFile.createReader();
|
||||
|
@ -1425,8 +1323,7 @@ public class HStore implements Store {
|
|||
|
||||
// let the archive util decide if we should archive or delete the files
|
||||
LOG.debug("Removing store files after compaction...");
|
||||
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.region,
|
||||
this.family.getName(), compactedFiles);
|
||||
this.fs.removeStoreFiles(this.getColumnFamilyName(), compactedFiles);
|
||||
|
||||
} catch (IOException e) {
|
||||
e = RemoteExceptionHandler.checkIOException(e);
|
||||
|
@ -1475,12 +1372,12 @@ public class HStore implements Store {
|
|||
// at all (expired or not) has at least one version that will not expire.
|
||||
// Note that this method used to take a KeyValue as arguments. KeyValue
|
||||
// can be back-dated, a row key cannot.
|
||||
long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.ttl;
|
||||
long ttlToUse = scanInfo.getMinVersions() > 0 ? Long.MAX_VALUE : this.scanInfo.getTtl();
|
||||
|
||||
KeyValue kv = new KeyValue(row, HConstants.LATEST_TIMESTAMP);
|
||||
|
||||
GetClosestRowBeforeTracker state = new GetClosestRowBeforeTracker(
|
||||
this.comparator, kv, ttlToUse, this.region.getRegionInfo().isMetaRegion());
|
||||
this.comparator, kv, ttlToUse, this.getRegionInfo().isMetaRegion());
|
||||
this.lock.readLock().lock();
|
||||
try {
|
||||
// First go to the memstore. Pick up deletes and candidates.
|
||||
|
@ -1634,7 +1531,7 @@ public class HStore implements Store {
|
|||
this.lock.readLock().lock();
|
||||
try {
|
||||
// Should already be enforced by the split policy!
|
||||
assert !this.region.getRegionInfo().isMetaRegion();
|
||||
assert !this.getRegionInfo().isMetaRegion();
|
||||
// Not split-able if we find a reference store file present in the store.
|
||||
if (hasReferences()) {
|
||||
assert false : "getSplitPoint() called on a region that can't split!";
|
||||
|
@ -1677,8 +1574,8 @@ public class HStore implements Store {
|
|||
lock.readLock().lock();
|
||||
try {
|
||||
KeyValueScanner scanner = null;
|
||||
if (this.region.getCoprocessorHost() != null) {
|
||||
scanner = this.region.getCoprocessorHost().preStoreScannerOpen(this, scan, targetCols);
|
||||
if (this.getCoprocessorHost() != null) {
|
||||
scanner = this.getCoprocessorHost().preStoreScannerOpen(this, scan, targetCols);
|
||||
}
|
||||
if (scanner == null) {
|
||||
scanner = new StoreScanner(this, getScanInfo(), scan, targetCols);
|
||||
|
@ -1778,7 +1675,7 @@ public class HStore implements Store {
|
|||
|
||||
@Override
|
||||
public HRegionInfo getRegionInfo() {
|
||||
return this.region.getRegionInfo();
|
||||
return this.fs.getRegionInfo();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1871,10 +1768,8 @@ public class HStore implements Store {
|
|||
}
|
||||
storeFile = HStore.this.commitFile(storeFilePath, cacheFlushId,
|
||||
snapshotTimeRangeTracker, flushedSize, status);
|
||||
if (HStore.this.region.getCoprocessorHost() != null) {
|
||||
HStore.this.getHRegion()
|
||||
.getCoprocessorHost()
|
||||
.postFlush(HStore.this, storeFile);
|
||||
if (HStore.this.getCoprocessorHost() != null) {
|
||||
HStore.this.getCoprocessorHost().postFlush(HStore.this, storeFile);
|
||||
}
|
||||
|
||||
// Add new file to store files. Clear snapshot too while we have
|
||||
|
@ -1894,7 +1789,7 @@ public class HStore implements Store {
|
|||
}
|
||||
|
||||
public static final long FIXED_OVERHEAD =
|
||||
ClassSize.align((21 * ClassSize.REFERENCE) + (4 * Bytes.SIZEOF_LONG)
|
||||
ClassSize.align((19 * ClassSize.REFERENCE) + (4 * Bytes.SIZEOF_LONG)
|
||||
+ (2 * Bytes.SIZEOF_INT) + Bytes.SIZEOF_BOOLEAN);
|
||||
|
||||
public static final long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD
|
||||
|
|
|
@ -655,11 +655,9 @@ public class SplitTransaction {
|
|||
throws IOException {
|
||||
FileSystem fs = this.parent.getFilesystem();
|
||||
byte [] family = sf.getFamily();
|
||||
String encoded = this.hri_a.getEncodedName();
|
||||
Path storedir = HStore.getStoreHomedir(splitdir, encoded, family);
|
||||
Path storedir = HStore.getStoreHomedir(splitdir, this.hri_a, family);
|
||||
StoreFile.split(fs, storedir, sf, this.splitrow, false);
|
||||
encoded = this.hri_b.getEncodedName();
|
||||
storedir = HStore.getStoreHomedir(splitdir, encoded, family);
|
||||
storedir = HStore.getStoreHomedir(splitdir, this.hri_b, family);
|
||||
StoreFile.split(fs, storedir, sf, this.splitrow, true);
|
||||
}
|
||||
|
||||
|
|
|
@ -145,7 +145,7 @@ public interface Store extends HeapSize, StoreConfigInformation {
|
|||
* @param isCompaction whether we are creating a new file in a compaction
|
||||
* @return Writer for a new StoreFile in the tmp dir.
|
||||
*/
|
||||
public StoreFile.Writer createWriterInTmp(int maxKeyCount,
|
||||
public StoreFile.Writer createWriterInTmp(long maxKeyCount,
|
||||
Compression.Algorithm compression, boolean isCompaction) throws IOException;
|
||||
|
||||
// Compaction oriented methods
|
||||
|
|
|
@ -30,8 +30,6 @@ import java.util.Map;
|
|||
import java.util.SortedSet;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -46,9 +44,6 @@ import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
|||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
|
@ -64,7 +59,6 @@ import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
|||
import org.apache.hadoop.hbase.util.BloomFilterWriter;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.Writables;
|
||||
import org.apache.hadoop.io.RawComparator;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
|
@ -125,29 +119,15 @@ public class StoreFile {
|
|||
// Need to make it 8k for testing.
|
||||
public static final int DEFAULT_BLOCKSIZE_SMALL = 8 * 1024;
|
||||
|
||||
private final StoreFileInfo fileInfo;
|
||||
private final FileSystem fs;
|
||||
|
||||
// This file's path.
|
||||
private final Path path;
|
||||
|
||||
// If this storefile references another, this is the reference instance.
|
||||
private Reference reference;
|
||||
|
||||
// If this StoreFile references another, this is the other files path.
|
||||
private Path referencePath;
|
||||
|
||||
// If this storefile is a link to another, this is the link instance.
|
||||
private HFileLink link;
|
||||
|
||||
// Block cache configuration and reference.
|
||||
private final CacheConfig cacheConf;
|
||||
|
||||
// What kind of data block encoding will be used
|
||||
private final HFileDataBlockEncoder dataBlockEncoder;
|
||||
|
||||
// HDFS blocks distribution information
|
||||
private HDFSBlocksDistribution hdfsBlocksDistribution;
|
||||
|
||||
// Keys for metadata stored in backing HFile.
|
||||
// Set when we obtain a Reader.
|
||||
private long sequenceid = -1;
|
||||
|
@ -183,27 +163,6 @@ public class StoreFile {
|
|||
*/
|
||||
private Map<byte[], byte[]> metadataMap;
|
||||
|
||||
/**
|
||||
* A non-capture group, for hfiles, so that this can be embedded.
|
||||
* HFiles are uuid ([0-9a-z]+). Bulk loaded hfiles has (_SeqId_[0-9]+_) has suffix.
|
||||
*/
|
||||
public static final String HFILE_NAME_REGEX = "[0-9a-f]+(?:_SeqId_[0-9]+_)?";
|
||||
|
||||
/** Regex that will work for hfiles */
|
||||
private static final Pattern HFILE_NAME_PATTERN =
|
||||
Pattern.compile("^(" + HFILE_NAME_REGEX + ")");
|
||||
|
||||
/**
|
||||
* Regex that will work for straight reference names (<hfile>.<parentEncRegion>)
|
||||
* and hfilelink reference names (<table>=<region>-<hfile>.<parentEncRegion>)
|
||||
* If reference, then the regex has more than just one group.
|
||||
* Group 1, hfile/hfilelink pattern, is this file's id.
|
||||
* Group 2 '(.+)' is the reference's parent region name.
|
||||
*/
|
||||
private static final Pattern REF_NAME_PATTERN =
|
||||
Pattern.compile(String.format("^(%s|%s)\\.(.+)$",
|
||||
HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX));
|
||||
|
||||
// StoreFile.Reader
|
||||
private volatile Reader reader;
|
||||
|
||||
|
@ -232,64 +191,63 @@ public class StoreFile {
|
|||
* @param dataBlockEncoder data block encoding algorithm.
|
||||
* @throws IOException When opening the reader fails.
|
||||
*/
|
||||
public StoreFile(final FileSystem fs,
|
||||
final Path p,
|
||||
final Configuration conf,
|
||||
final CacheConfig cacheConf,
|
||||
final BloomType cfBloomType,
|
||||
final HFileDataBlockEncoder dataBlockEncoder)
|
||||
throws IOException {
|
||||
public StoreFile(final FileSystem fs, final Path p, final Configuration conf,
|
||||
final CacheConfig cacheConf, final BloomType cfBloomType,
|
||||
final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
|
||||
this(fs, new StoreFileInfo(conf, fs, p), conf, cacheConf, cfBloomType, dataBlockEncoder);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Constructor, loads a reader and it's indices, etc. May allocate a
|
||||
* substantial amount of ram depending on the underlying files (10-20MB?).
|
||||
*
|
||||
* @param fs The current file system to use.
|
||||
* @param fileInfo The store file information.
|
||||
* @param conf The current configuration.
|
||||
* @param cacheConf The cache configuration and block cache reference.
|
||||
* @param cfBloomType The bloom type to use for this store file as specified
|
||||
* by column family configuration. This may or may not be the same
|
||||
* as the Bloom filter type actually present in the HFile, because
|
||||
* column family configuration might change. If this is
|
||||
* {@link BloomType#NONE}, the existing Bloom filter is ignored.
|
||||
* @param dataBlockEncoder data block encoding algorithm.
|
||||
* @throws IOException When opening the reader fails.
|
||||
*/
|
||||
public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf,
|
||||
final CacheConfig cacheConf, final BloomType cfBloomType,
|
||||
final HFileDataBlockEncoder dataBlockEncoder) throws IOException {
|
||||
this.fs = fs;
|
||||
this.path = p;
|
||||
this.fileInfo = fileInfo;
|
||||
this.cacheConf = cacheConf;
|
||||
this.dataBlockEncoder =
|
||||
dataBlockEncoder == null ? NoOpDataBlockEncoder.INSTANCE
|
||||
: dataBlockEncoder;
|
||||
|
||||
if (HFileLink.isHFileLink(p)) {
|
||||
this.link = new HFileLink(conf, p);
|
||||
LOG.debug("Store file " + p + " is a link");
|
||||
} else if (isReference(p)) {
|
||||
this.reference = Reference.read(fs, p);
|
||||
this.referencePath = getReferredToFile(this.path);
|
||||
if (HFileLink.isHFileLink(this.referencePath)) {
|
||||
this.link = new HFileLink(conf, this.referencePath);
|
||||
}
|
||||
LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
|
||||
" reference to " + this.referencePath);
|
||||
} else if (!isHFile(p)) {
|
||||
throw new IOException("path=" + path + " doesn't look like a valid StoreFile");
|
||||
}
|
||||
|
||||
if (BloomFilterFactory.isGeneralBloomEnabled(conf)) {
|
||||
this.cfBloomType = cfBloomType;
|
||||
} else {
|
||||
LOG.info("Ignoring bloom filter check for file " + path + ": " +
|
||||
LOG.info("Ignoring bloom filter check for file " + this.getPath() + ": " +
|
||||
"cfBloomType=" + cfBloomType + " (disabled in config)");
|
||||
this.cfBloomType = BloomType.NONE;
|
||||
}
|
||||
|
||||
// cache the modification time stamp of this store file
|
||||
FileStatus[] stats = FSUtils.listStatus(fs, p, null);
|
||||
if (stats != null && stats.length == 1) {
|
||||
this.modificationTimeStamp = stats[0].getModificationTime();
|
||||
} else {
|
||||
this.modificationTimeStamp = 0;
|
||||
}
|
||||
this.modificationTimeStamp = fileInfo.getModificationTime();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Path or null if this StoreFile was made with a Stream.
|
||||
*/
|
||||
public Path getPath() {
|
||||
return this.path;
|
||||
return this.fileInfo.getPath();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The Store/ColumnFamily this file belongs to.
|
||||
*/
|
||||
byte [] getFamily() {
|
||||
return Bytes.toBytes(this.path.getParent().getName());
|
||||
return Bytes.toBytes(this.getPath().getParent().getName());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -297,64 +255,7 @@ public class StoreFile {
|
|||
* else may get wrong answer.
|
||||
*/
|
||||
public boolean isReference() {
|
||||
return this.reference != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return <tt>true</tt> if this StoreFile is an HFileLink
|
||||
*/
|
||||
boolean isLink() {
|
||||
return this.link != null && this.reference == null;
|
||||
}
|
||||
|
||||
private static boolean isHFile(final Path path) {
|
||||
Matcher m = HFILE_NAME_PATTERN.matcher(path.getName());
|
||||
return m.matches() && m.groupCount() > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param p Path to check.
|
||||
* @return True if the path has format of a HStoreFile reference.
|
||||
*/
|
||||
public static boolean isReference(final Path p) {
|
||||
return isReference(p.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @param name file name to check.
|
||||
* @return True if the path has format of a HStoreFile reference.
|
||||
*/
|
||||
public static boolean isReference(final String name) {
|
||||
Matcher m = REF_NAME_PATTERN.matcher(name);
|
||||
return m.matches() && m.groupCount() > 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return path to the file referred to by a Reference. Presumes a directory
|
||||
* hierarchy of <code>${hbase.rootdir}/tablename/regionname/familyname</code>.
|
||||
* @param p Path to a Reference file.
|
||||
* @return Calculated path to parent region file.
|
||||
* @throws IllegalArgumentException when path regex fails to match.
|
||||
*/
|
||||
public static Path getReferredToFile(final Path p) {
|
||||
Matcher m = REF_NAME_PATTERN.matcher(p.getName());
|
||||
if (m == null || !m.matches()) {
|
||||
LOG.warn("Failed match of store file name " + p.toString());
|
||||
throw new IllegalArgumentException("Failed match of store file name " +
|
||||
p.toString());
|
||||
}
|
||||
|
||||
// Other region name is suffix on the passed Reference file name
|
||||
String otherRegion = m.group(2);
|
||||
// Tabledir is up two directories from where Reference was written.
|
||||
Path tableDir = p.getParent().getParent().getParent();
|
||||
String nameStrippedOfSuffix = m.group(1);
|
||||
LOG.debug("reference '" + p + "' to region=" + otherRegion + " hfile=" + nameStrippedOfSuffix);
|
||||
|
||||
// Build up new path with the referenced region in place of our current
|
||||
// region in the reference path. Also strip regionname suffix from name.
|
||||
return new Path(new Path(new Path(tableDir, otherRegion),
|
||||
p.getParent().getName()), nameStrippedOfSuffix);
|
||||
return this.fileInfo.isReference();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -444,65 +345,7 @@ public class StoreFile {
|
|||
* calculated when store file is opened.
|
||||
*/
|
||||
public HDFSBlocksDistribution getHDFSBlockDistribution() {
|
||||
return this.hdfsBlocksDistribution;
|
||||
}
|
||||
|
||||
/**
|
||||
* helper function to compute HDFS blocks distribution of a given reference
|
||||
* file.For reference file, we don't compute the exact value. We use some
|
||||
* estimate instead given it might be good enough. we assume bottom part
|
||||
* takes the first half of reference file, top part takes the second half
|
||||
* of the reference file. This is just estimate, given
|
||||
* midkey ofregion != midkey of HFile, also the number and size of keys vary.
|
||||
* If this estimate isn't good enough, we can improve it later.
|
||||
* @param fs The FileSystem
|
||||
* @param reference The reference
|
||||
* @param status The reference FileStatus
|
||||
* @return HDFS blocks distribution
|
||||
*/
|
||||
static private HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(
|
||||
FileSystem fs, Reference reference, FileStatus status) throws IOException {
|
||||
if (status == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
long start = 0;
|
||||
long length = 0;
|
||||
|
||||
if (Reference.isTopFileRegion(reference.getFileRegion())) {
|
||||
start = status.getLen()/2;
|
||||
length = status.getLen() - status.getLen()/2;
|
||||
} else {
|
||||
start = 0;
|
||||
length = status.getLen()/2;
|
||||
}
|
||||
return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
|
||||
}
|
||||
|
||||
/**
|
||||
* compute HDFS block distribution, for reference file, it is an estimate
|
||||
*/
|
||||
private void computeHDFSBlockDistribution() throws IOException {
|
||||
if (isReference()) {
|
||||
FileStatus status;
|
||||
if (this.link != null) {
|
||||
status = this.link.getFileStatus(fs);
|
||||
} else {
|
||||
status = fs.getFileStatus(this.referencePath);
|
||||
}
|
||||
this.hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(
|
||||
this.fs, this.reference, status);
|
||||
} else {
|
||||
FileStatus status;
|
||||
if (isLink()) {
|
||||
status = link.getFileStatus(fs);
|
||||
} else {
|
||||
status = this.fs.getFileStatus(path);
|
||||
}
|
||||
long length = status.getLen();
|
||||
this.hdfsBlocksDistribution = FSUtils.computeHDFSBlocksDistribution(
|
||||
this.fs, status, 0, length);
|
||||
}
|
||||
return this.fileInfo.getHDFSBlockDistribution();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -515,24 +358,9 @@ public class StoreFile {
|
|||
if (this.reader != null) {
|
||||
throw new IllegalAccessError("Already open");
|
||||
}
|
||||
if (isReference()) {
|
||||
if (this.link != null) {
|
||||
this.reader = new HalfStoreFileReader(this.fs, this.referencePath, this.link,
|
||||
this.cacheConf, this.reference, dataBlockEncoder.getEncodingInCache());
|
||||
} else {
|
||||
this.reader = new HalfStoreFileReader(this.fs, this.referencePath,
|
||||
this.cacheConf, this.reference, dataBlockEncoder.getEncodingInCache());
|
||||
}
|
||||
} else if (isLink()) {
|
||||
long size = link.getFileStatus(fs).getLen();
|
||||
this.reader = new Reader(this.fs, this.path, link, size, this.cacheConf,
|
||||
dataBlockEncoder.getEncodingInCache(), true);
|
||||
} else {
|
||||
this.reader = new Reader(this.fs, this.path, this.cacheConf,
|
||||
dataBlockEncoder.getEncodingInCache());
|
||||
}
|
||||
|
||||
computeHDFSBlockDistribution();
|
||||
// Open the StoreFile.Reader
|
||||
this.reader = fileInfo.open(this.fs, this.cacheConf, dataBlockEncoder.getEncodingInCache());
|
||||
|
||||
// Load up indices and fileinfo. This also loads Bloom filter type.
|
||||
metadataMap = Collections.unmodifiableMap(this.reader.loadFileInfo());
|
||||
|
@ -546,26 +374,22 @@ public class StoreFile {
|
|||
// since store files are distinguished by sequence id, the one half would
|
||||
// subsume the other.
|
||||
this.sequenceid = Bytes.toLong(b);
|
||||
if (isReference()) {
|
||||
if (Reference.isTopFileRegion(this.reference.getFileRegion())) {
|
||||
this.sequenceid += 1;
|
||||
}
|
||||
if (fileInfo.isTopReference()) {
|
||||
this.sequenceid += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (isBulkLoadResult()){
|
||||
// generate the sequenceId from the fileName
|
||||
// fileName is of the form <randomName>_SeqId_<id-when-loaded>_
|
||||
String fileName = this.path.getName();
|
||||
String fileName = this.getPath().getName();
|
||||
int startPos = fileName.indexOf("SeqId_");
|
||||
if (startPos != -1) {
|
||||
this.sequenceid = Long.parseLong(fileName.substring(startPos + 6,
|
||||
fileName.indexOf('_', startPos + 6)));
|
||||
// Handle reference files as done above.
|
||||
if (isReference()) {
|
||||
if (Reference.isTopFileRegion(this.reference.getFileRegion())) {
|
||||
this.sequenceid += 1;
|
||||
}
|
||||
if (fileInfo.isTopReference()) {
|
||||
this.sequenceid += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -675,8 +499,7 @@ public class StoreFile {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.path.toString() +
|
||||
(isReference()? "-" + this.referencePath + "-" + reference.toString(): "");
|
||||
return this.fileInfo.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -684,7 +507,7 @@ public class StoreFile {
|
|||
*/
|
||||
public String toStringDetailed() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(this.path.toString());
|
||||
sb.append(this.getPath().toString());
|
||||
sb.append(", isReference=").append(isReference());
|
||||
sb.append(", isBulkLoadResult=").append(isBulkLoadResult());
|
||||
if (isBulkLoadResult()) {
|
||||
|
@ -869,48 +692,7 @@ public class StoreFile {
|
|||
throw new IOException("Expecting " + dir.toString() +
|
||||
" to be a directory");
|
||||
}
|
||||
return getRandomFilename(fs, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param fs
|
||||
* @param dir
|
||||
* @return Path to a file that doesn't exist at time of this invocation.
|
||||
* @throws IOException
|
||||
*/
|
||||
static Path getRandomFilename(final FileSystem fs, final Path dir)
|
||||
throws IOException {
|
||||
return getRandomFilename(fs, dir, null);
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param fs
|
||||
* @param dir
|
||||
* @param suffix
|
||||
* @return Path to a file that doesn't exist at time of this invocation.
|
||||
* @throws IOException
|
||||
*/
|
||||
static Path getRandomFilename(final FileSystem fs,
|
||||
final Path dir,
|
||||
final String suffix)
|
||||
throws IOException {
|
||||
return new Path(dir, UUID.randomUUID().toString().replaceAll("-", "")
|
||||
+ (suffix == null ? "" : suffix));
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the store file name.
|
||||
* @param fileName name of the file to validate
|
||||
* @return <tt>true</tt> if the file could be a valid store file, <tt>false</tt> otherwise
|
||||
*/
|
||||
public static boolean validateStoreFileName(String fileName) {
|
||||
if (HFileLink.isHFileLink(fileName))
|
||||
return true;
|
||||
if (isReference(fileName))
|
||||
return true;
|
||||
return !fileName.contains("-");
|
||||
return new Path(dir, UUID.randomUUID().toString().replaceAll("-", ""));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1336,17 +1118,10 @@ public class StoreFile {
|
|||
bloomFilterType = BloomType.NONE;
|
||||
}
|
||||
|
||||
public Reader(FileSystem fs, Path path, HFileLink hfileLink, long size,
|
||||
public Reader(FileSystem fs, Path path, FSDataInputStream in,
|
||||
final FSDataInputStream inNoChecksum, long size,
|
||||
CacheConfig cacheConf, DataBlockEncoding preferredEncodingInCache,
|
||||
boolean closeIStream) throws IOException {
|
||||
|
||||
FSDataInputStream in = hfileLink.open(fs);
|
||||
FSDataInputStream inNoChecksum = in;
|
||||
if (fs instanceof HFileSystem) {
|
||||
FileSystem noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
|
||||
inNoChecksum = hfileLink.open(noChecksumFs);
|
||||
}
|
||||
|
||||
reader = HFile.createReaderWithEncoding(fs, path, in, inNoChecksum,
|
||||
size, cacheConf, preferredEncodingInCache, closeIStream);
|
||||
bloomFilterType = BloomType.NONE;
|
||||
|
|
|
@ -0,0 +1,378 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
||||
/**
|
||||
* Describe a StoreFile (hfile, reference, link)
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class StoreFileInfo {
|
||||
public static final Log LOG = LogFactory.getLog(StoreFileInfo.class);
|
||||
|
||||
/**
|
||||
* A non-capture group, for hfiles, so that this can be embedded.
|
||||
* HFiles are uuid ([0-9a-z]+). Bulk loaded hfiles has (_SeqId_[0-9]+_) has suffix.
|
||||
*/
|
||||
public static final String HFILE_NAME_REGEX = "[0-9a-f]+(?:_SeqId_[0-9]+_)?";
|
||||
|
||||
/** Regex that will work for hfiles */
|
||||
private static final Pattern HFILE_NAME_PATTERN =
|
||||
Pattern.compile("^(" + HFILE_NAME_REGEX + ")");
|
||||
|
||||
/**
|
||||
* Regex that will work for straight reference names (<hfile>.<parentEncRegion>)
|
||||
* and hfilelink reference names (<table>=<region>-<hfile>.<parentEncRegion>)
|
||||
* If reference, then the regex has more than just one group.
|
||||
* Group 1, hfile/hfilelink pattern, is this file's id.
|
||||
* Group 2 '(.+)' is the reference's parent region name.
|
||||
*/
|
||||
private static final Pattern REF_NAME_PATTERN =
|
||||
Pattern.compile(String.format("^(%s|%s)\\.(.+)$",
|
||||
HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX));
|
||||
|
||||
// HDFS blocks distribution information
|
||||
private HDFSBlocksDistribution hdfsBlocksDistribution = null;
|
||||
|
||||
// If this storefile references another, this is the reference instance.
|
||||
private final Reference reference;
|
||||
|
||||
// If this storefile is a link to another, this is the link instance.
|
||||
private final HFileLink link;
|
||||
|
||||
// FileSystem information for the file.
|
||||
private final FileStatus fileStatus;
|
||||
|
||||
/**
|
||||
* Create a Store File Info
|
||||
* @param conf the {@link Configuration} to use
|
||||
* @param fs The current file system to use.
|
||||
* @param path The {@link Path} of the file
|
||||
*/
|
||||
public StoreFileInfo(final Configuration conf, final FileSystem fs, final Path path)
|
||||
throws IOException {
|
||||
this(conf, fs, fs.getFileStatus(path));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a Store File Info
|
||||
* @param conf the {@link Configuration} to use
|
||||
* @param fs The current file system to use.
|
||||
* @param fileStatus The {@link FileStatus} of the file
|
||||
*/
|
||||
public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus)
|
||||
throws IOException {
|
||||
this.fileStatus = fileStatus;
|
||||
|
||||
Path p = fileStatus.getPath();
|
||||
if (HFileLink.isHFileLink(p)) {
|
||||
// HFileLink
|
||||
this.reference = null;
|
||||
this.link = new HFileLink(conf, p);
|
||||
LOG.debug("Store file " + p + " is a link");
|
||||
} else if (isReference(p)) {
|
||||
this.reference = Reference.read(fs, p);
|
||||
Path referencePath = getReferredToFile(p);
|
||||
if (HFileLink.isHFileLink(referencePath)) {
|
||||
// HFileLink Reference
|
||||
this.link = new HFileLink(conf, referencePath);
|
||||
} else {
|
||||
// Reference
|
||||
this.link = null;
|
||||
}
|
||||
LOG.debug("Store file " + p + " is a " + reference.getFileRegion() +
|
||||
" reference to " + referencePath);
|
||||
} else if (isHFile(p)) {
|
||||
// HFile
|
||||
this.reference = null;
|
||||
this.link = null;
|
||||
} else {
|
||||
throw new IOException("path=" + p + " doesn't look like a valid StoreFile");
|
||||
}
|
||||
}
|
||||
|
||||
/** @return True if the store file is a Reference */
|
||||
public boolean isReference() {
|
||||
return this.reference != null;
|
||||
}
|
||||
|
||||
/** @return True if the store file is a top Reference */
|
||||
public boolean isTopReference() {
|
||||
return this.reference != null && Reference.isTopFileRegion(this.reference.getFileRegion());
|
||||
}
|
||||
|
||||
/** @return True if the store file is a link */
|
||||
public boolean isLink() {
|
||||
return this.link != null && this.reference == null;
|
||||
}
|
||||
|
||||
/** @return the HDFS block distribution */
|
||||
public HDFSBlocksDistribution getHDFSBlockDistribution() {
|
||||
return this.hdfsBlocksDistribution;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a Reader for the StoreFile
|
||||
* @param fs The current file system to use.
|
||||
* @param cacheConf The cache configuration and block cache reference.
|
||||
* @param dataBlockEncoding data block encoding algorithm.
|
||||
* @return The StoreFile.Reader for the file
|
||||
*/
|
||||
public StoreFile.Reader open(final FileSystem fs, final CacheConfig cacheConf,
|
||||
final DataBlockEncoding dataBlockEncoding) throws IOException {
|
||||
FSDataInputStream inNoChecksum = null;
|
||||
FileSystem noChecksumFs = null;
|
||||
FSDataInputStream in;
|
||||
FileStatus status;
|
||||
|
||||
if (fs instanceof HFileSystem) {
|
||||
noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
|
||||
}
|
||||
|
||||
if (this.reference != null) {
|
||||
if (this.link != null) {
|
||||
// HFileLink Reference
|
||||
in = this.link.open(fs);
|
||||
inNoChecksum = (noChecksumFs != null) ? this.link.open(noChecksumFs) : in;
|
||||
status = this.link.getFileStatus(fs);
|
||||
} else {
|
||||
// HFile Reference
|
||||
Path referencePath = getReferredToFile(this.getPath());
|
||||
in = fs.open(referencePath);
|
||||
inNoChecksum = (noChecksumFs != null) ? noChecksumFs.open(referencePath) : in;
|
||||
status = fs.getFileStatus(referencePath);
|
||||
}
|
||||
|
||||
hdfsBlocksDistribution = computeRefFileHDFSBlockDistribution(fs, reference, status);
|
||||
return new HalfStoreFileReader(fs, this.getPath(), in, inNoChecksum, status.getLen(),
|
||||
cacheConf, reference, dataBlockEncoding);
|
||||
} else {
|
||||
if (this.link != null) {
|
||||
// HFileLink
|
||||
in = this.link.open(fs);
|
||||
inNoChecksum = (noChecksumFs != null) ? link.open(noChecksumFs) : in;
|
||||
status = this.link.getFileStatus(fs);
|
||||
} else {
|
||||
// HFile
|
||||
status = fileStatus;
|
||||
in = fs.open(this.getPath());
|
||||
inNoChecksum = (noChecksumFs != null) ? noChecksumFs.open(this.getPath()) : in;
|
||||
}
|
||||
|
||||
long length = status.getLen();
|
||||
hdfsBlocksDistribution = FSUtils.computeHDFSBlocksDistribution(fs, status, 0, length);
|
||||
return new StoreFile.Reader(fs, this.getPath(), in, inNoChecksum, length,
|
||||
cacheConf, dataBlockEncoding, true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the HDFS Block Distribution for this StoreFile
|
||||
*/
|
||||
public HDFSBlocksDistribution computeHDFSBlocksDistribution(final FileSystem fs)
|
||||
throws IOException {
|
||||
FileStatus status;
|
||||
if (this.reference != null) {
|
||||
if (this.link != null) {
|
||||
// HFileLink Reference
|
||||
status = link.getFileStatus(fs);
|
||||
} else {
|
||||
// HFile Reference
|
||||
Path referencePath = getReferredToFile(this.getPath());
|
||||
status = fs.getFileStatus(referencePath);
|
||||
}
|
||||
return computeRefFileHDFSBlockDistribution(fs, reference, status);
|
||||
} else {
|
||||
if (this.link != null) {
|
||||
// HFileLink
|
||||
status = link.getFileStatus(fs);
|
||||
} else {
|
||||
status = this.fileStatus;
|
||||
}
|
||||
return FSUtils.computeHDFSBlocksDistribution(fs, status, 0, status.getLen());
|
||||
}
|
||||
}
|
||||
|
||||
/** @return The {@link Path} of the file */
|
||||
public Path getPath() {
|
||||
return this.fileStatus.getPath();
|
||||
}
|
||||
|
||||
/** @return The {@link FileStatus} of the file */
|
||||
public FileStatus getFileStatus() {
|
||||
return this.fileStatus;
|
||||
}
|
||||
|
||||
/** @return Get the modification time of the file. */
|
||||
public long getModificationTime() {
|
||||
return this.fileStatus.getModificationTime();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return this.getPath() +
|
||||
(isReference() ? "-" + getReferredToFile(this.getPath()) + "-" + reference : "");
|
||||
}
|
||||
|
||||
/**
|
||||
* @param path Path to check.
|
||||
* @return True if the path has format of a HFile.
|
||||
*/
|
||||
public static boolean isHFile(final Path path) {
|
||||
return isHFile(path.getName());
|
||||
}
|
||||
|
||||
public static boolean isHFile(final String fileName) {
|
||||
Matcher m = HFILE_NAME_PATTERN.matcher(fileName);
|
||||
return m.matches() && m.groupCount() > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param path Path to check.
|
||||
* @return True if the path has format of a HStoreFile reference.
|
||||
*/
|
||||
public static boolean isReference(final Path path) {
|
||||
return isReference(path.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @param name file name to check.
|
||||
* @return True if the path has format of a HStoreFile reference.
|
||||
*/
|
||||
public static boolean isReference(final String name) {
|
||||
Matcher m = REF_NAME_PATTERN.matcher(name);
|
||||
return m.matches() && m.groupCount() > 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return path to the file referred to by a Reference. Presumes a directory
|
||||
* hierarchy of <code>${hbase.rootdir}/tablename/regionname/familyname</code>.
|
||||
* @param p Path to a Reference file.
|
||||
* @return Calculated path to parent region file.
|
||||
* @throws IllegalArgumentException when path regex fails to match.
|
||||
*/
|
||||
public static Path getReferredToFile(final Path p) {
|
||||
Matcher m = REF_NAME_PATTERN.matcher(p.getName());
|
||||
if (m == null || !m.matches()) {
|
||||
LOG.warn("Failed match of store file name " + p.toString());
|
||||
throw new IllegalArgumentException("Failed match of store file name " +
|
||||
p.toString());
|
||||
}
|
||||
|
||||
// Other region name is suffix on the passed Reference file name
|
||||
String otherRegion = m.group(2);
|
||||
// Tabledir is up two directories from where Reference was written.
|
||||
Path tableDir = p.getParent().getParent().getParent();
|
||||
String nameStrippedOfSuffix = m.group(1);
|
||||
LOG.debug("reference '" + p + "' to region=" + otherRegion + " hfile=" + nameStrippedOfSuffix);
|
||||
|
||||
// Build up new path with the referenced region in place of our current
|
||||
// region in the reference path. Also strip regionname suffix from name.
|
||||
return new Path(new Path(new Path(tableDir, otherRegion),
|
||||
p.getParent().getName()), nameStrippedOfSuffix);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the store file name.
|
||||
* @param fileName name of the file to validate
|
||||
* @return <tt>true</tt> if the file could be a valid store file, <tt>false</tt> otherwise
|
||||
*/
|
||||
public static boolean validateStoreFileName(final String fileName) {
|
||||
if (HFileLink.isHFileLink(fileName) || isReference(fileName))
|
||||
return(true);
|
||||
return !fileName.contains("-");
|
||||
}
|
||||
|
||||
/**
|
||||
* Return if the specified file is a valid store file or not.
|
||||
* @param fileStatus The {@link FileStatus} of the file
|
||||
* @return <tt>true</tt> if the file is valid
|
||||
*/
|
||||
public static boolean isValid(final FileStatus fileStatus)
|
||||
throws IOException {
|
||||
final Path p = fileStatus.getPath();
|
||||
|
||||
if (fileStatus.isDir())
|
||||
return false;
|
||||
|
||||
// Check for empty hfile. Should never be the case but can happen
|
||||
// after data loss in hdfs for whatever reason (upgrade, etc.): HBASE-646
|
||||
// NOTE: that the HFileLink is just a name, so it's an empty file.
|
||||
if (!HFileLink.isHFileLink(p) && fileStatus.getLen() <= 0) {
|
||||
LOG.warn("Skipping " + p + " beccreateStoreDirause its empty. HBASE-646 DATA LOSS?");
|
||||
return false;
|
||||
}
|
||||
|
||||
return validateStoreFileName(p.getName());
|
||||
}
|
||||
|
||||
/**
|
||||
* helper function to compute HDFS blocks distribution of a given reference
|
||||
* file.For reference file, we don't compute the exact value. We use some
|
||||
* estimate instead given it might be good enough. we assume bottom part
|
||||
* takes the first half of reference file, top part takes the second half
|
||||
* of the reference file. This is just estimate, given
|
||||
* midkey ofregion != midkey of HFile, also the number and size of keys vary.
|
||||
* If this estimate isn't good enough, we can improve it later.
|
||||
* @param fs The FileSystem
|
||||
* @param reference The reference
|
||||
* @param status The reference FileStatus
|
||||
* @return HDFS blocks distribution
|
||||
*/
|
||||
private static HDFSBlocksDistribution computeRefFileHDFSBlockDistribution(
|
||||
final FileSystem fs, final Reference reference, final FileStatus status)
|
||||
throws IOException {
|
||||
if (status == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
long start = 0;
|
||||
long length = 0;
|
||||
|
||||
if (Reference.isTopFileRegion(reference.getFileRegion())) {
|
||||
start = status.getLen()/2;
|
||||
length = status.getLen() - status.getLen()/2;
|
||||
} else {
|
||||
start = 0;
|
||||
length = status.getLen()/2;
|
||||
}
|
||||
return FSUtils.computeHDFSBlocksDistribution(fs, status, start, length);
|
||||
}
|
||||
}
|
|
@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.io.HFileLink;
|
|||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.apache.hadoop.hbase.util.FSVisitor;
|
||||
|
@ -453,7 +453,7 @@ public class RestoreSnapshotHelper {
|
|||
final String hfileName) throws IOException {
|
||||
if (HFileLink.isHFileLink(hfileName)) {
|
||||
HFileLink.createFromHFileLink(conf, fs, familyDir, hfileName);
|
||||
} else if (StoreFile.isReference(hfileName)) {
|
||||
} else if (StoreFileInfo.isReference(hfileName)) {
|
||||
restoreReferenceFile(familyDir, regionInfo, hfileName);
|
||||
} else {
|
||||
HFileLink.create(conf, fs, familyDir, regionInfo, hfileName);
|
||||
|
@ -482,7 +482,7 @@ public class RestoreSnapshotHelper {
|
|||
final String hfileName) throws IOException {
|
||||
// Extract the referred information (hfile name and parent region)
|
||||
String tableName = snapshotDesc.getTable();
|
||||
Path refPath = StoreFile.getReferredToFile(new Path(new Path(new Path(tableName,
|
||||
Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path(tableName,
|
||||
regionInfo.getEncodedName()), familyDir.getName()), hfileName));
|
||||
String snapshotRegionName = refPath.getParent().getParent().getName();
|
||||
String fileName = refPath.getName();
|
||||
|
|
|
@ -206,7 +206,7 @@ public class SnapshotDescriptionUtils {
|
|||
* Get the directory to store the snapshot instance
|
||||
* @param snapshotsDir hbase-global directory for storing all snapshots
|
||||
* @param snapshotName name of the snapshot to take
|
||||
* @return
|
||||
* @return the final directory for the completed snapshot
|
||||
*/
|
||||
private static final Path getCompletedSnapshotDir(final Path snapshotsDir, String snapshotName) {
|
||||
return new Path(snapshotsDir, snapshotName);
|
||||
|
|
|
@ -73,19 +73,6 @@ public class TakeSnapshotUtils {
|
|||
return HRegion.getRegionDir(snapshotDir, regionName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the home directory for store-level snapshot files.
|
||||
* <p>
|
||||
* Specific files per store are kept in a similar layout as per the current directory layout.
|
||||
* @param regionDir snapshot directory for the parent region, <b>not</b> the standard region
|
||||
* directory. See {@link #getRegionSnapshotDirectory}
|
||||
* @param family name of the store to snapshot
|
||||
* @return path to the snapshot home directory for the store/family
|
||||
*/
|
||||
public static Path getStoreSnapshotDirectory(Path regionDir, String family) {
|
||||
return HStore.getStoreHomedir(regionDir, Bytes.toBytes(family));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the snapshot directory for each family to be added to the the snapshot
|
||||
* @param snapshot description of the snapshot being take
|
||||
|
@ -102,7 +89,7 @@ public class TakeSnapshotUtils {
|
|||
List<Path> familyDirs = new ArrayList<Path>(families.length);
|
||||
for (FileStatus family : families) {
|
||||
// build the reference directory name
|
||||
familyDirs.add(getStoreSnapshotDirectory(snapshotRegionDir, family.getPath().getName()));
|
||||
familyDirs.add(new Path(snapshotRegionDir, family.getPath().getName()));
|
||||
}
|
||||
return familyDirs;
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ import org.apache.hadoop.hbase.master.MasterFileSystem;
|
|||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
|
||||
|
@ -619,7 +619,7 @@ public class HBaseFsck extends Configured implements Tool {
|
|||
for (Path path: allFiles.values()) {
|
||||
boolean isReference = false;
|
||||
try {
|
||||
isReference = StoreFile.isReference(path);
|
||||
isReference = StoreFileInfo.isReference(path);
|
||||
} catch (Throwable t) {
|
||||
// Ignore. Some files may not be store files at all.
|
||||
// For example, files under .oldlogs folder in .META.
|
||||
|
@ -628,7 +628,7 @@ public class HBaseFsck extends Configured implements Tool {
|
|||
}
|
||||
if (!isReference) continue;
|
||||
|
||||
Path referredToFile = StoreFile.getReferredToFile(path);
|
||||
Path referredToFile = StoreFileInfo.getReferredToFile(path);
|
||||
if (fs.exists(referredToFile)) continue; // good, expected
|
||||
|
||||
// Found a lingering reference file
|
||||
|
|
|
@ -49,20 +49,22 @@ public class HFileArchiveUtil {
|
|||
public static Path getStoreArchivePath(final Configuration conf, final String tableName,
|
||||
final String regionName, final String familyName) throws IOException {
|
||||
Path tableArchiveDir = getTableArchivePath(conf, tableName);
|
||||
return HStore.getStoreHomedir(tableArchiveDir, regionName, familyName);
|
||||
return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the directory to archive a store directory
|
||||
* @param conf {@link Configuration} to read for the archive directory name
|
||||
* @param region parent region information under which the store currently
|
||||
* lives
|
||||
* @param family name of the family in the store
|
||||
* @param tableName table name under which the store currently lives
|
||||
* @param region parent region information under which the store currently lives
|
||||
* @param familyName name of the family in the store
|
||||
* @return {@link Path} to the directory to archive the given store or
|
||||
* <tt>null</tt> if it should not be archived
|
||||
*/
|
||||
public static Path getStoreArchivePath(Configuration conf, HRegion region, byte [] family){
|
||||
return getStoreArchivePath(conf, region.getRegionInfo(), region.getTableDir(), family);
|
||||
public static Path getStoreArchivePath(final Configuration conf, final String tableName,
|
||||
final HRegionInfo region, final String familyName) throws IOException {
|
||||
Path tableArchiveDir = getTableArchivePath(conf, tableName);
|
||||
return HStore.getStoreHomedir(tableArchiveDir, region, Bytes.toBytes(familyName));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -58,7 +58,7 @@ import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.NoServerForRegionException;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -669,11 +669,10 @@ public class RegionSplitter {
|
|||
HTableDescriptor htd = table.getTableDescriptor();
|
||||
// check every Column Family for that region
|
||||
for (HColumnDescriptor c : htd.getFamilies()) {
|
||||
Path cfDir = HStore.getStoreHomedir(tableDir, hri.getEncodedName(),
|
||||
c.getName());
|
||||
Path cfDir = HStore.getStoreHomedir(tableDir, hri, c.getName());
|
||||
if (fs.exists(cfDir)) {
|
||||
for (FileStatus file : fs.listStatus(cfDir)) {
|
||||
refFound |= StoreFile.isReference(file.getPath());
|
||||
refFound |= StoreFileInfo.isReference(file.getPath());
|
||||
if (refFound)
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -699,7 +699,7 @@ public class TestHFileOutputFormat {
|
|||
// deep inspection: get the StoreFile dir
|
||||
final Path storePath = HStore.getStoreHomedir(
|
||||
HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
|
||||
admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
|
||||
admin.getTableRegions(TABLE_NAME).get(0),
|
||||
FAMILIES[0]);
|
||||
assertEquals(0, fs.listStatus(storePath).length);
|
||||
|
||||
|
@ -767,7 +767,7 @@ public class TestHFileOutputFormat {
|
|||
// deep inspection: get the StoreFile dir
|
||||
final Path storePath = HStore.getStoreHomedir(
|
||||
HTableDescriptor.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME),
|
||||
admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(),
|
||||
admin.getTableRegions(TABLE_NAME).get(0),
|
||||
FAMILIES[0]);
|
||||
assertEquals(0, fs.listStatus(storePath).length);
|
||||
|
||||
|
|
|
@ -354,7 +354,7 @@ public class TestCatalogJanitor {
|
|||
Path rootdir = services.getMasterFileSystem().getRootDir();
|
||||
Path tabledir =
|
||||
HTableDescriptor.getTableDir(rootdir, htd.getName());
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, splita.getEncodedName(),
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, splita,
|
||||
htd.getColumnFamilies()[0].getName());
|
||||
Reference ref = Reference.createTopReference(Bytes.toBytes("ccc"));
|
||||
long now = System.currentTimeMillis();
|
||||
|
@ -600,8 +600,7 @@ public class TestCatalogJanitor {
|
|||
// the single test passes, but when the full suite is run, things get borked).
|
||||
FSUtils.setRootDir(fs.getConf(), rootdir);
|
||||
Path tabledir = HTableDescriptor.getTableDir(rootdir, htd.getName());
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, parent.getEncodedName(),
|
||||
htd.getColumnFamilies()[0].getName());
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
|
||||
Path storeArchive = HFileArchiveUtil.getStoreArchivePath(services.getConfiguration(), parent,
|
||||
tabledir, htd.getColumnFamilies()[0].getName());
|
||||
LOG.debug("Table dir:" + tabledir);
|
||||
|
@ -682,8 +681,7 @@ public class TestCatalogJanitor {
|
|||
// the single test passes, but when the full suite is run, things get borked).
|
||||
FSUtils.setRootDir(fs.getConf(), rootdir);
|
||||
Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, parent.getEncodedName(),
|
||||
htd.getColumnFamilies()[0].getName());
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, parent, htd.getColumnFamilies()[0].getName());
|
||||
System.out.println("Old root:" + rootdir);
|
||||
System.out.println("Old table:" + tabledir);
|
||||
System.out.println("Old store:" + storedir);
|
||||
|
@ -766,7 +764,7 @@ public class TestCatalogJanitor {
|
|||
throws IOException {
|
||||
Path rootdir = services.getMasterFileSystem().getRootDir();
|
||||
Path tabledir = HTableDescriptor.getTableDir(rootdir, parent.getTableName());
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, daughter.getEncodedName(),
|
||||
Path storedir = HStore.getStoreHomedir(tabledir, daughter,
|
||||
htd.getColumnFamilies()[0].getName());
|
||||
Reference ref =
|
||||
top? Reference.createTopReference(midkey): Reference.createBottomReference(midkey);
|
||||
|
|
|
@ -70,9 +70,9 @@ public class TestHFileLinkCleaner {
|
|||
|
||||
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
|
||||
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
||||
tableName, hri.getEncodedName(), familyName);
|
||||
tableName, hri, familyName);
|
||||
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
||||
tableLinkName, hriLink.getEncodedName(), familyName);
|
||||
tableLinkName, hriLink, familyName);
|
||||
|
||||
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
|
||||
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
|
||||
|
|
|
@ -344,20 +344,10 @@ public class HFileReadWriteTest {
|
|||
columnDescriptor.setDataBlockEncoding(dataBlockEncoding);
|
||||
HRegionInfo regionInfo = new HRegionInfo();
|
||||
HTableDescriptor htd = new HTableDescriptor(TABLE_NAME);
|
||||
HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd,
|
||||
null);
|
||||
HStore store = new HStore(outputDir, region, columnDescriptor, fs, conf);
|
||||
HRegion region = new HRegion(outputDir, null, fs, conf, regionInfo, htd, null);
|
||||
HStore store = new HStore(region, columnDescriptor, conf);
|
||||
|
||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf,
|
||||
new CacheConfig(conf), fs, blockSize)
|
||||
.withOutputDir(outputDir)
|
||||
.withCompression(compression)
|
||||
.withDataBlockEncoder(dataBlockEncoder)
|
||||
.withBloomType(bloomType)
|
||||
.withMaxKeyCount(maxKeyCount)
|
||||
.withChecksumType(HFile.DEFAULT_CHECKSUM_TYPE)
|
||||
.withBytesPerChecksum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
|
||||
.build();
|
||||
StoreFile.Writer writer = store.createWriterInTmp(maxKeyCount, compression, false);
|
||||
|
||||
StatisticsPrinter statsPrinter = new StatisticsPrinter();
|
||||
statsPrinter.startThread();
|
||||
|
|
|
@ -165,7 +165,7 @@ public class TestCacheOnWriteInSchema {
|
|||
hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
|
||||
|
||||
region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
|
||||
store = new HStore(basedir, region, hcd, fs, conf);
|
||||
store = new HStore(region, hcd, conf);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -525,7 +525,7 @@ public class TestCompaction extends HBaseTestCase {
|
|||
assertEquals(compactionThreshold, s.getStorefilesCount());
|
||||
assertTrue(s.getStorefilesSize() > 15*1000);
|
||||
// and no new store files persisted past compactStores()
|
||||
FileStatus[] ls = FileSystem.get(conf).listStatus(r.getTmpDir());
|
||||
FileStatus[] ls = r.getFilesystem().listStatus(r.getRegionFileSystem().getTempDir());
|
||||
assertEquals(0, ls.length);
|
||||
|
||||
} finally {
|
||||
|
@ -605,16 +605,14 @@ public class TestCompaction extends HBaseTestCase {
|
|||
List<Path> newFiles = tool.compactForTesting(storeFiles, false);
|
||||
|
||||
// Now lets corrupt the compacted file.
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
FileSystem fs = store.getFileSystem();
|
||||
// default compaction policy created one and only one new compacted file
|
||||
Path origPath = newFiles.get(0);
|
||||
Path homedir = store.getHomedir();
|
||||
Path dstPath = new Path(homedir, origPath.getName());
|
||||
FSDataOutputStream stream = fs.create(origPath, null, true, 512, (short) 3,
|
||||
(long) 1024,
|
||||
null);
|
||||
Path dstPath = store.getRegionFileSystem().createTempName();
|
||||
FSDataOutputStream stream = fs.create(dstPath, null, true, 512, (short)3, (long)1024, null);
|
||||
stream.writeChars("CORRUPT FILE!!!!");
|
||||
stream.close();
|
||||
Path origPath = store.getRegionFileSystem().commitStoreFile(
|
||||
Bytes.toString(COLUMN_FAMILY), dstPath);
|
||||
|
||||
try {
|
||||
((HStore)store).moveFileIntoPlace(origPath);
|
||||
|
|
|
@ -93,17 +93,16 @@ public class TestDefaultCompactSelection extends TestCase {
|
|||
htd.addFamily(hcd);
|
||||
HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false);
|
||||
|
||||
hlog = HLogFactory.createHLog(fs, basedir,
|
||||
logName, conf);
|
||||
hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
|
||||
region = HRegion.createHRegion(info, basedir, conf, htd);
|
||||
HRegion.closeHRegion(region);
|
||||
Path tableDir = new Path(basedir, Bytes.toString(htd.getName()));
|
||||
region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
|
||||
|
||||
store = new HStore(basedir, region, hcd, fs, conf);
|
||||
store = new HStore(region, hcd, conf);
|
||||
|
||||
TEST_FILE = StoreFile.getRandomFilename(fs, store.getHomedir());
|
||||
fs.create(TEST_FILE);
|
||||
TEST_FILE = region.getRegionFileSystem().createTempName();
|
||||
fs.createNewFile(TEST_FILE);
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -3426,8 +3426,7 @@ public class TestHRegion extends HBaseTestCase {
|
|||
// static method is used by load balancer or other components
|
||||
HDFSBlocksDistribution blocksDistribution2 =
|
||||
HRegion.computeHDFSBlocksDistribution(htu.getConfiguration(),
|
||||
firstRegion.getTableDesc(),
|
||||
firstRegion.getRegionInfo().getEncodedName());
|
||||
firstRegion.getTableDesc(), firstRegion.getRegionInfo());
|
||||
long uniqueBlocksWeight2 =
|
||||
blocksDistribution2.getUniqueBlocksTotalWeight();
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import static org.junit.Assert.assertFalse;
|
|||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -71,4 +72,34 @@ public class TestHRegionFileSystem {
|
|||
|
||||
fs.delete(rootDir, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTempAndCommit() throws IOException {
|
||||
Path rootDir = TEST_UTIL.getDataTestDirOnTestFS("testTempAndCommit");
|
||||
FileSystem fs = TEST_UTIL.getTestFileSystem();
|
||||
Configuration conf = TEST_UTIL.getConfiguration();
|
||||
|
||||
// Create a Region
|
||||
String familyName = "cf";
|
||||
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("TestTable"));
|
||||
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri);
|
||||
|
||||
// New region, no store files
|
||||
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName);
|
||||
assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
|
||||
|
||||
// Create a new file in temp (no files in the family)
|
||||
Path buildPath = regionFs.createTempName();
|
||||
fs.createNewFile(buildPath);
|
||||
storeFiles = regionFs.getStoreFiles(familyName);
|
||||
assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
|
||||
|
||||
// commit the file
|
||||
Path dstPath = regionFs.commitStoreFile(familyName, buildPath);
|
||||
storeFiles = regionFs.getStoreFiles(familyName);
|
||||
assertEquals(0, storeFiles != null ? storeFiles.size() : 0);
|
||||
assertFalse(fs.exists(buildPath));
|
||||
|
||||
fs.delete(rootDir, true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
|
||||
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.HLog;
|
||||
|
@ -158,7 +159,7 @@ public class TestStore extends TestCase {
|
|||
HLog hlog = HLogFactory.createHLog(fs, basedir, logName, conf);
|
||||
HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null);
|
||||
|
||||
store = new HStore(basedir, region, hcd, fs, conf);
|
||||
store = new HStore(region, hcd, conf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -323,10 +324,7 @@ public class TestStore extends TestCase {
|
|||
w.close();
|
||||
this.store.close();
|
||||
// Reopen it... should pick up two files
|
||||
this.store = new HStore(storedir.getParent().getParent(),
|
||||
this.store.getHRegion(),
|
||||
this.store.getFamily(), fs, c);
|
||||
System.out.println(this.store.getRegionInfo().getEncodedName());
|
||||
this.store = new HStore(this.store.getHRegion(), this.store.getFamily(), c);
|
||||
assertEquals(2, this.store.getStorefilesCount());
|
||||
|
||||
result = HBaseTestingUtility.getFromStoreFile(store,
|
||||
|
@ -651,10 +649,10 @@ public class TestStore extends TestCase {
|
|||
store.add(new KeyValue(row, family, qf3, 1, (byte[])null));
|
||||
|
||||
LOG.info("Before flush, we should have no files");
|
||||
FileStatus[] files = fs.listStatus(store.getHomedir());
|
||||
Path[] paths = FileUtil.stat2Paths(files);
|
||||
System.err.println("Got paths: " + Joiner.on(",").join(paths));
|
||||
assertEquals(0, paths.length);
|
||||
|
||||
Collection<StoreFileInfo> files =
|
||||
store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
|
||||
assertEquals(0, files != null ? files.size() : 0);
|
||||
|
||||
//flush
|
||||
try {
|
||||
|
@ -666,10 +664,8 @@ public class TestStore extends TestCase {
|
|||
}
|
||||
|
||||
LOG.info("After failed flush, we should still have no files!");
|
||||
files = fs.listStatus(store.getHomedir());
|
||||
paths = FileUtil.stat2Paths(files);
|
||||
System.err.println("Got paths: " + Joiner.on(",").join(paths));
|
||||
assertEquals(0, paths.length);
|
||||
files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName());
|
||||
assertEquals(0, files != null ? files.size() : 0);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
|
|
@ -198,9 +198,10 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
|
||||
|
||||
// Try to open store file from link
|
||||
StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
|
||||
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
|
||||
StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
|
||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||
assertTrue(hsf.isLink());
|
||||
assertTrue(storeFileInfo.isLink());
|
||||
|
||||
// Now confirm that I can read from the link
|
||||
int count = 1;
|
||||
|
@ -212,30 +213,6 @@ public class TestStoreFile extends HBaseTestCase {
|
|||
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that we can handle valid tables with '.', '_', and '-' chars.
|
||||
*/
|
||||
public void testStoreFileNames() {
|
||||
String[] legalHFileLink = { "MyTable_02=abc012-def345", "MyTable_02.300=abc012-def345",
|
||||
"MyTable_02-400=abc012-def345", "MyTable_02-400.200=abc012-def345",
|
||||
"MyTable_02=abc012-def345_SeqId_1_", "MyTable_02=abc012-def345_SeqId_20_" };
|
||||
for (String name: legalHFileLink) {
|
||||
assertTrue("should be a valid link: " + name, HFileLink.isHFileLink(name));
|
||||
assertTrue("should be a valid StoreFile" + name, StoreFile.validateStoreFileName(name));
|
||||
assertFalse("should not be a valid reference: " + name, StoreFile.isReference(name));
|
||||
|
||||
String refName = name + ".6789";
|
||||
assertTrue("should be a valid link reference: " + refName, StoreFile.isReference(refName));
|
||||
assertTrue("should be a valid StoreFile" + refName, StoreFile.validateStoreFileName(refName));
|
||||
}
|
||||
|
||||
String[] illegalHFileLink = { ".MyTable_02=abc012-def345", "-MyTable_02.300=abc012-def345",
|
||||
"MyTable_02-400=abc0_12-def345", "MyTable_02-400.200=abc012-def345...." };
|
||||
for (String name: illegalHFileLink) {
|
||||
assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This test creates an hfile and then the dir structures and files to verify that references
|
||||
* to hfilelinks (created by snapshot clones) can be properly interpreted.
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.regionserver;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HBaseTestCase;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.SmallTests;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||
import org.apache.hadoop.hbase.io.hfile.BlockCache;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheStats;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder;
|
||||
import org.apache.hadoop.hbase.util.BloomFilterFactory;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ChecksumType;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
/**
|
||||
* Test HStoreFile
|
||||
*/
|
||||
@Category(SmallTests.class)
|
||||
public class TestStoreFileInfo extends HBaseTestCase {
|
||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||
|
||||
/**
|
||||
* Validate that we can handle valid tables with '.', '_', and '-' chars.
|
||||
*/
|
||||
public void testStoreFileNames() {
|
||||
String[] legalHFileLink = { "MyTable_02=abc012-def345", "MyTable_02.300=abc012-def345",
|
||||
"MyTable_02-400=abc012-def345", "MyTable_02-400.200=abc012-def345",
|
||||
"MyTable_02=abc012-def345_SeqId_1_", "MyTable_02=abc012-def345_SeqId_20_" };
|
||||
for (String name: legalHFileLink) {
|
||||
assertTrue("should be a valid link: " + name, HFileLink.isHFileLink(name));
|
||||
assertTrue("should be a valid StoreFile" + name, StoreFileInfo.validateStoreFileName(name));
|
||||
assertFalse("should not be a valid reference: " + name, StoreFileInfo.isReference(name));
|
||||
|
||||
String refName = name + ".6789";
|
||||
assertTrue("should be a valid link reference: " + refName,
|
||||
StoreFileInfo.isReference(refName));
|
||||
assertTrue("should be a valid StoreFile" + refName,
|
||||
StoreFileInfo.validateStoreFileName(refName));
|
||||
}
|
||||
|
||||
String[] illegalHFileLink = { ".MyTable_02=abc012-def345", "-MyTable_02.300=abc012-def345",
|
||||
"MyTable_02-400=abc0_12-def345", "MyTable_02-400.200=abc012-def345...." };
|
||||
for (String name: illegalHFileLink) {
|
||||
assertFalse("should not be a valid link: " + name, HFileLink.isHFileLink(name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -574,9 +574,8 @@ public class TestWALReplay {
|
|||
HRegion region = new HRegion(basedir, wal, this.fs, this.conf, hri, htd,
|
||||
rsServices) {
|
||||
@Override
|
||||
protected HStore instantiateHStore(Path tableDir, HColumnDescriptor c)
|
||||
throws IOException {
|
||||
return new HStore(tableDir, this, c, fs, conf) {
|
||||
protected HStore instantiateHStore(final HColumnDescriptor family) throws IOException {
|
||||
return new HStore(this, family, conf) {
|
||||
@Override
|
||||
protected Path flushCache(final long logCacheFlushId,
|
||||
SortedSet<KeyValue> snapshot,
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
|||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||
import org.apache.hadoop.hbase.util.FSUtils;
|
||||
|
@ -110,7 +110,7 @@ public class TestRestoreSnapshotHelper {
|
|||
String[] files = getHFiles(HTableDescriptor.getTableDir(rootDir, htdClone.getName()));
|
||||
assertEquals(2, files.length);
|
||||
assertTrue(files[0] + " should be a HFileLink", HFileLink.isHFileLink(files[0]));
|
||||
assertTrue(files[1] + " should be a Referene", StoreFile.isReference(files[1]));
|
||||
assertTrue(files[1] + " should be a Referene", StoreFileInfo.isReference(files[1]));
|
||||
assertEquals(sourceHtd.getNameAsString(), HFileLink.getReferencedTableName(files[0]));
|
||||
assertEquals(TEST_HFILE, HFileLink.getReferencedHFileName(files[0]));
|
||||
Path refPath = getReferredToFile(files[1]);
|
||||
|
@ -182,7 +182,7 @@ public class TestRestoreSnapshotHelper {
|
|||
|
||||
private Path getReferredToFile(final String referenceName) {
|
||||
Path fakeBasePath = new Path(new Path("table", "region"), "cf");
|
||||
return StoreFile.getReferredToFile(new Path(fakeBasePath, referenceName));
|
||||
return StoreFileInfo.getReferredToFile(new Path(fakeBasePath, referenceName));
|
||||
}
|
||||
|
||||
private String[] getHFiles(final Path tableDir) throws IOException {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.Store;
|
||||
|
@ -222,8 +223,11 @@ public class HFileArchiveTestingUtil {
|
|||
* @param store store that is archiving files
|
||||
* @return {@link Path} to the store archive directory for the given region
|
||||
*/
|
||||
public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store) {
|
||||
return HFileArchiveUtil.getStoreArchivePath(conf, region, store.getFamily().getName());
|
||||
public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store)
|
||||
throws IOException {
|
||||
HRegionInfo hri = region.getRegionInfo();
|
||||
return HFileArchiveUtil.getStoreArchivePath(conf, hri.getTableNameAsString(), hri,
|
||||
store.getFamily().getNameAsString());
|
||||
}
|
||||
|
||||
public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName,
|
||||
|
|
|
@ -65,14 +65,5 @@ public class TestHFileArchiveUtil {
|
|||
assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family));
|
||||
conf = new Configuration();
|
||||
assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, region, tabledir, family));
|
||||
|
||||
// do a little mocking of a region to get the same results
|
||||
HRegion mockRegion = Mockito.mock(HRegion.class);
|
||||
Mockito.when(mockRegion.getRegionInfo()).thenReturn(region);
|
||||
Mockito.when(mockRegion.getTableDir()).thenReturn(tabledir);
|
||||
|
||||
assertNotNull(HFileArchiveUtil.getStoreArchivePath(null, mockRegion, family));
|
||||
conf = new Configuration();
|
||||
assertNotNull(HFileArchiveUtil.getStoreArchivePath(conf, mockRegion, family));
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue