HBASE-7809 Refactor Split/Merge to use HRegionFileSystem

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1457148 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
mbertozzi 2013-03-15 22:19:34 +00:00
parent 436c63c497
commit eb039567de
22 changed files with 502 additions and 665 deletions

View File

@ -265,35 +265,6 @@ public class HFileArchiver {
}
}
/**
* Archive the store file
* @param fs the filesystem where the store files live
* @param regionInfo region hosting the store files
* @param conf {@link Configuration} to examine to determine the archive directory
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
* @param family the family hosting the store files
* @param storeFile file to be archived
* @throws IOException if the files could not be correctly disposed.
*/
public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
// make sure we don't archive if we can't and that the archive dir exists
if (!fs.mkdirs(storeArchiveDir)) {
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
+ Bytes.toString(family) + ", deleting compacted files instead.");
}
// do the actual archive
long start = EnvironmentEdgeManager.currentTimeMillis();
File file = new FileablePath(fs, storeFile);
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
throw new IOException("Failed to archive/delete the file for region:"
+ regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
}
}
/**
* Archive the given files and resolve any conflicts with existing files via appending the time
* archiving started (so all conflicts in the same group have the same timestamp appended).

View File

@ -30,10 +30,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HRegionInfo;
@ -43,10 +41,8 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
@ -226,10 +222,8 @@ class CatalogJanitor extends Chore {
boolean result = false;
// Run checks on each daughter split.
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
Pair<Boolean, Boolean> a =
checkDaughterInFs(parent, daughters.getFirst());
Pair<Boolean, Boolean> b =
checkDaughterInFs(parent, daughters.getSecond());
Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
if (hasNoReferences(a) && hasNoReferences(b)) {
LOG.debug("Deleting region " + parent.getRegionNameAsString() +
" because daughter splits no longer hold references");
@ -284,45 +278,35 @@ class CatalogJanitor extends Chore {
*/
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
throws IOException {
boolean references = false;
boolean exists = false;
if (daughter == null) {
return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
}
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
Path rootdir = this.services.getMasterFileSystem().getRootDir();
Path tabledir = new Path(rootdir, daughter.getTableNameAsString());
Path regiondir = new Path(tabledir, daughter.getEncodedName());
exists = fs.exists(regiondir);
if (!exists) {
LOG.warn("Daughter regiondir does not exist: " + regiondir.toString());
return new Pair<Boolean, Boolean>(exists, Boolean.FALSE);
Path tabledir = HTableDescriptor.getTableDir(rootdir, daughter.getTableName());
HRegionFileSystem regionFs = null;
try {
regionFs = HRegionFileSystem.openRegionFromFileSystem(
this.services.getConfiguration(), fs, tabledir, daughter, true);
} catch (IOException e) {
LOG.warn("Daughter region does not exist: " + daughter.getEncodedName());
return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
}
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
boolean references = false;
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableNameAsString());
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
Path p = HStore.getStoreHomedir(tabledir, daughter, family.getName());
if (!fs.exists(p)) continue;
// Look for reference files. Call listStatus with anonymous instance of PathFilter.
FileStatus [] ps = FSUtils.listStatus(fs, p,
new PathFilter () {
public boolean accept(Path path) {
return StoreFileInfo.isReference(path);
}
}
);
if (ps != null && ps.length > 0) {
references = true;
if ((references = regionFs.hasReferences(family.getNameAsString()))) {
break;
}
}
return new Pair<Boolean, Boolean>(Boolean.valueOf(exists),
Boolean.valueOf(references));
return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
}
private HTableDescriptor getTableDescriptor(byte[] tableName)
throws FileNotFoundException, IOException {
return this.services.getTableDescriptors().get(Bytes.toString(tableName));
private HTableDescriptor getTableDescriptor(final String tableName)
throws FileNotFoundException, IOException {
return this.services.getTableDescriptors().get(tableName);
}
}

View File

@ -77,7 +77,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
@ -216,12 +215,6 @@ public class HRegion implements HeapSize { // , Writable{
// TODO: account for each registered handler in HeapSize computation
private Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
//These variable are just used for getting data out of the region, to test on
//client side
// private int numStores = 0;
// private int [] storeSize = null;
// private byte [] name = null;
public final AtomicLong memstoreSize = new AtomicLong(0);
// Debug possible data loss due to WAL off
@ -574,7 +567,7 @@ public class HRegion implements HeapSize { // , Writable{
// Get rid of any splits or merges that were lost in-progress. Clean out
// these directories here on open. We may be opening a region that was
// being split but we crashed in the middle of it all.
SplitTransaction.cleanupAnySplitDetritus(this);
fs.cleanupAnySplitDetritus();
fs.cleanupMergesDir();
this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
@ -664,7 +657,7 @@ public class HRegion implements HeapSize { // , Writable{
mvcc.initialize(maxMemstoreTS + 1);
// Recover any edits if available.
maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
this.getRegionDir(), maxSeqIdInStores, reporter, status));
this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
return maxSeqId;
}
@ -817,7 +810,7 @@ public class HRegion implements HeapSize { // , Writable{
*
* @throws IOException e
*/
public List<StoreFile> close() throws IOException {
public Map<byte[], List<StoreFile>> close() throws IOException {
return close(false);
}
@ -837,7 +830,7 @@ public class HRegion implements HeapSize { // , Writable{
*
* @throws IOException e
*/
public List<StoreFile> close(final boolean abort) throws IOException {
public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
// Only allow one thread to close at a time. Serialize them so dual
// threads attempting to close will run up against each other.
MonitoredTask status = TaskMonitor.get().createStatus(
@ -854,9 +847,8 @@ public class HRegion implements HeapSize { // , Writable{
}
}
private List<StoreFile> doClose(
final boolean abort, MonitoredTask status)
throws IOException {
private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status)
throws IOException {
if (isClosed()) {
LOG.warn("Region " + this + " already closed");
return null;
@ -902,28 +894,35 @@ public class HRegion implements HeapSize { // , Writable{
internalFlushcache(status);
}
List<StoreFile> result = new ArrayList<StoreFile>();
Map<byte[], List<StoreFile>> result =
new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
if (!stores.isEmpty()) {
// initialize the thread pool for closing stores in parallel.
ThreadPoolExecutor storeCloserThreadPool =
getStoreOpenAndCloseThreadPool("StoreCloserThread-" + this.getRegionNameAsString());
CompletionService<Collection<StoreFile>> completionService =
new ExecutorCompletionService<Collection<StoreFile>>(storeCloserThreadPool);
CompletionService<Pair<byte[], Collection<StoreFile>>> completionService =
new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>(storeCloserThreadPool);
// close each store in parallel
for (final Store store : stores.values()) {
completionService
.submit(new Callable<Collection<StoreFile>>() {
public Collection<StoreFile> call() throws IOException {
return store.close();
.submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
public Pair<byte[], Collection<StoreFile>> call() throws IOException {
return new Pair<byte[], Collection<StoreFile>>(
store.getFamily().getName(), store.close());
}
});
}
try {
for (int i = 0; i < stores.size(); i++) {
Future<Collection<StoreFile>> future = completionService.take();
Collection<StoreFile> storeFileList = future.get();
result.addAll(storeFileList);
Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
if (familyFiles == null) {
familyFiles = new ArrayList<StoreFile>();
result.put(storeFiles.getFirst(), familyFiles);
}
familyFiles.addAll(storeFiles.getSecond());
}
} catch (InterruptedException e) {
throw new IOException(e);
@ -1062,11 +1061,6 @@ public class HRegion implements HeapSize { // , Writable{
return this.baseConf;
}
/** @return region directory Path */
public Path getRegionDir() {
return fs.getRegionDir();
}
/** @return {@link FileSystem} being used by this region */
public FileSystem getFilesystem() {
return fs.getFileSystem();
@ -2348,7 +2342,7 @@ public class HRegion implements HeapSize { // , Writable{
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing region-info for snapshot.");
HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
getFilesystem(), snapshotDir, getRegionInfo());
this.fs.getFileSystem(), snapshotDir, getRegionInfo());
// 2. iterate through all the stores in the region
LOG.debug("Creating references for hfiles");
@ -3290,11 +3284,6 @@ public class HRegion implements HeapSize { // , Writable{
return this.getRegionNameAsString();
}
/** @return Path of region base directory */
public Path getTableDir() {
return this.fs.getTableDir();
}
/**
* RegionScannerImpl is used to combine scanners from multiple Stores (aka column families).
*/
@ -4022,7 +4011,8 @@ public class HRegion implements HeapSize { // , Writable{
*/
public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
throws IOException {
HRegion r = newHRegion(other.getTableDir(), other.getLog(), other.getFilesystem(),
HRegionFileSystem regionFs = other.getRegionFileSystem();
HRegion r = newHRegion(regionFs.getTableDir(), other.getLog(), regionFs.getFileSystem(),
other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
return r.openHRegion(reporter);
}
@ -4056,22 +4046,14 @@ public class HRegion implements HeapSize { // , Writable{
/**
* Create a daughter region from given a temp directory with the region data.
* @param hri Spec. for daughter region to open.
* @param daughterTmpDir Directory that contains region files.
* @throws IOException
*/
HRegion createDaughterRegion(final HRegionInfo hri, final Path daughterTmpDir)
throws IOException {
FileSystem fs = this.fs.getFileSystem();
HRegion r = HRegion.newHRegion(this.getTableDir(), this.getLog(), fs,
HRegion createDaughterRegionFromSplits(final HRegionInfo hri) throws IOException {
HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getLog(), fs.getFileSystem(),
this.getBaseConf(), hri, this.getTableDesc(), rsServices);
r.readRequestsCount.set(this.getReadRequestsCount() / 2);
r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
// Move the tmp dir in the expected location
if (daughterTmpDir != null && fs.exists(daughterTmpDir)) {
if (!fs.rename(daughterTmpDir, r.getRegionDir())) {
LOG.warn("Unable to rename " + daughterTmpDir + " to " + r.getRegionDir());
}
}
fs.commitDaughterRegion(hri);
return r;
}
@ -4086,8 +4068,7 @@ public class HRegion implements HeapSize { // , Writable{
* @throws IOException
*/
// TODO remove since only test and merge use this
public static void addRegionToMETA(HRegion meta, HRegion r)
throws IOException {
public static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException {
meta.checkResources();
// The row key is the region name
byte[] row = r.getRegionName();
@ -4144,24 +4125,6 @@ public class HRegion implements HeapSize { // , Writable{
(Bytes.compareTo(info.getEndKey(), row) > 0));
}
/**
* Make the directories for a specific column family
*
* @param fs the file system
* @param tabledir base directory where region will live (usually the table dir)
* @param hri
* @param colFamily the column family
* @throws IOException
*/
private static Path makeColumnFamilyDirs(FileSystem fs, Path tabledir,
final HRegionInfo hri, byte [] colFamily) throws IOException {
Path dir = HStore.getStoreHomedir(tabledir, hri, colFamily);
if (!fs.mkdirs(dir)) {
LOG.warn("Failed to create " + dir);
}
return dir;
}
/**
* Merge two HRegions. The regions must be adjacent and must not overlap.
*
@ -4202,37 +4165,35 @@ public class HRegion implements HeapSize { // , Writable{
* @return new merged region
* @throws IOException
*/
public static HRegion merge(HRegion a, HRegion b)
throws IOException {
public static HRegion merge(final HRegion a, final HRegion b) throws IOException {
if (!a.getRegionInfo().getTableNameAsString().equals(
b.getRegionInfo().getTableNameAsString())) {
throw new IOException("Regions do not belong to the same table");
}
FileSystem fs = a.getFilesystem();
FileSystem fs = a.getRegionFileSystem().getFileSystem();
// Make sure each region's cache is empty
a.flushcache();
b.flushcache();
// Compact each region so we only have one store file per family
a.compactStores(true);
if (LOG.isDebugEnabled()) {
LOG.debug("Files for region: " + a);
FSUtils.logFileSystemState(fs, a.getRegionDir(), LOG);
a.getRegionFileSystem().logFileSystemState(LOG);
}
b.compactStores(true);
if (LOG.isDebugEnabled()) {
LOG.debug("Files for region: " + b);
FSUtils.logFileSystemState(fs, b.getRegionDir(), LOG);
b.getRegionFileSystem().logFileSystemState(LOG);
}
Configuration conf = a.baseConf;
HTableDescriptor tabledesc = a.getTableDesc();
HLog log = a.getLog();
Path tableDir = a.getTableDir();
Path tableDir = a.getRegionFileSystem().getTableDir();
// Presume both are of same region type -- i.e. both user or catalog
// table regions. This way can use comparator.
final byte[] startKey =
@ -4258,43 +4219,34 @@ public class HRegion implements HeapSize { // , Writable{
? b.getEndKey()
: a.getEndKey());
HRegionInfo newRegionInfo =
new HRegionInfo(tabledesc.getName(), startKey, endKey);
LOG.info("Creating new region " + newRegionInfo.toString());
String encodedName = newRegionInfo.getEncodedName();
Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
if(fs.exists(newRegionDir)) {
throw new IOException("Cannot merge; target file collision at " +
newRegionDir);
}
fs.mkdirs(newRegionDir);
HRegionInfo newRegionInfo = new HRegionInfo(tabledesc.getName(), startKey, endKey);
LOG.info("Creating new region " + newRegionInfo);
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
conf, fs, tableDir, newRegionInfo);
LOG.info("starting merge of regions: " + a + " and " + b +
" into new region " + newRegionInfo.toString() +
" with start key <" + Bytes.toStringBinary(startKey) + "> and end key <" +
Bytes.toStringBinary(endKey) + ">");
// Move HStoreFiles under new region directory
Map<byte [], List<StoreFile>> byFamily =
new TreeMap<byte [], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
byFamily = filesByFamily(byFamily, a.close());
byFamily = filesByFamily(byFamily, b.close());
for (Map.Entry<byte [], List<StoreFile>> es : byFamily.entrySet()) {
byte [] colFamily = es.getKey();
Path storeDir = makeColumnFamilyDirs(fs, tableDir, newRegionInfo, colFamily);
// Because we compacted the source regions we should have no more than two
// HStoreFiles per family and there will be no reference store
List<StoreFile> srcFiles = es.getValue();
for (StoreFile hsf: srcFiles) {
StoreFile.rename(fs, hsf.getPath(), StoreFile.getUniqueFile(fs, storeDir));
}
}
// Because we compacted the source regions we should have no more than two
// StoreFiles per family and there will be no reference store
Map<byte[], List<StoreFile>> aStoreFiles = a.close();
Map<byte[], List<StoreFile>> bStoreFiles = b.close();
// Move StoreFiles under new region directory
regionFs.commitStoreFiles(aStoreFiles);
regionFs.commitStoreFiles(bStoreFiles);
if (LOG.isDebugEnabled()) {
LOG.debug("Files for new region");
FSUtils.logFileSystemState(fs, newRegionDir, LOG);
regionFs.logFileSystemState(LOG);
}
// Create HRegion and update the metrics
HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf,
newRegionInfo, a.getTableDesc(), null);
newRegionInfo, tabledesc, null);
dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get());
dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get());
dstRegion.checkAndMutateChecksFailed.set(
@ -4303,44 +4255,23 @@ public class HRegion implements HeapSize { // , Writable{
a.checkAndMutateChecksPassed.get() + b.checkAndMutateChecksPassed.get());
dstRegion.initialize();
dstRegion.compactStores();
if (LOG.isDebugEnabled()) {
LOG.debug("Files for new region");
FSUtils.logFileSystemState(fs, dstRegion.getRegionDir(), LOG);
dstRegion.getRegionFileSystem().logFileSystemState(LOG);
}
// delete out the 'A' region
HFileArchiver.archiveRegion(fs,
FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir());
HRegionFileSystem.deleteRegionFromFileSystem(
a.getBaseConf(), fs, tableDir, a.getRegionInfo());
// delete out the 'B' region
HFileArchiver.archiveRegion(fs,
FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir());
HRegionFileSystem.deleteRegionFromFileSystem(
b.getBaseConf(), fs, tableDir, b.getRegionInfo());
LOG.info("merge completed. New region is " + dstRegion);
return dstRegion;
}
/*
* Fills a map with a vector of store files keyed by column family.
* @param byFamily Map to fill.
* @param storeFiles Store files to process.
* @param family
* @return Returns <code>byFamily</code>
*/
private static Map<byte [], List<StoreFile>> filesByFamily(
Map<byte [], List<StoreFile>> byFamily, List<StoreFile> storeFiles) {
for (StoreFile src: storeFiles) {
byte [] family = src.getFamily();
List<StoreFile> v = byFamily.get(family);
if (v == null) {
v = new ArrayList<StoreFile>();
byFamily.put(family, v);
}
v.add(src);
}
return byFamily;
}
/**
* @return True if needs a major compaction.
* @throws IOException

View File

@ -24,6 +24,8 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import org.apache.commons.logging.Log;
@ -36,11 +38,13 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Bytes;
@ -108,14 +112,14 @@ public class HRegionFileSystem {
// Temp Helpers
// ===========================================================================
/** @return {@link Path} to the region's temp directory, used for file creations */
public Path getTempDir() {
Path getTempDir() {
return new Path(getRegionDir(), REGION_TEMP_DIR);
}
/**
* Clean up any temp detritus that may have been left around from previous operation attempts.
*/
public void cleanupTempDir() throws IOException {
void cleanupTempDir() throws IOException {
FSUtils.deleteDirectory(fs, getTempDir());
}
@ -137,7 +141,7 @@ public class HRegionFileSystem {
* @return {@link Path} to the directory of the specified family
* @throws IOException if the directory creation fails.
*/
public Path createStoreDir(final String familyName) throws IOException {
Path createStoreDir(final String familyName) throws IOException {
Path storeDir = getStoreDir(familyName);
if (!fs.exists(storeDir) && !fs.mkdirs(storeDir)) {
throw new IOException("Failed create of: " + storeDir);
@ -175,8 +179,26 @@ public class HRegionFileSystem {
return storeFiles;
}
/**
* Returns true if the specified family has reference files
* @param familyName Column Family Name
* @return true if family contains reference files
* @throws IOException
*/
public boolean hasReferences(final String familyName) throws IOException {
FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
new PathFilter () {
public boolean accept(Path path) {
return StoreFileInfo.isReference(path);
}
}
);
return files != null && files.length > 0;
}
/**
* @return the set of families present on disk
* @throws IOException
*/
public Collection<String> getFamilies() throws IOException {
FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
@ -190,6 +212,24 @@ public class HRegionFileSystem {
return families;
}
/**
* Remove the region family from disk, archiving the store files.
* @param familyName Column Family Name
* @throws IOException if an error occours during the archiving
*/
public void deleteFamily(final String familyName) throws IOException {
// archive family store files
HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName));
// delete the family folder
Path familyDir = getStoreDir(familyName);
if (!fs.delete(familyDir, true)) {
throw new IOException("Could not delete family " + familyName +
" from FileSystem for region " + regionInfo.getRegionNameAsString() +
"(" + regionInfo.getEncodedName() + ")");
}
}
/**
* Generate a unique file name, used by createTempName() and commitStoreFile()
* @param suffix extra information to append to the generated name
@ -252,7 +292,7 @@ public class HRegionFileSystem {
* @return The new {@link Path} of the committed file
* @throws IOException
*/
public Path commitStoreFile(final String familyName, final Path buildPath,
private Path commitStoreFile(final String familyName, final Path buildPath,
final long seqNum, final boolean generateNewName) throws IOException {
Path storeDir = getStoreDir(familyName);
fs.mkdirs(storeDir);
@ -271,6 +311,20 @@ public class HRegionFileSystem {
return dstPath;
}
/**
* Moves multiple store files to the relative region's family store directory.
* @param storeFiles list of store files divided by family
* @throws IOException
*/
void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
String familyName = Bytes.toString(es.getKey());
for (StoreFile sf: es.getValue()) {
commitStoreFile(familyName, sf.getPath());
}
}
}
/**
* Archives the specified store file from the specified family.
* @param familyName Family that contains the store files
@ -306,7 +360,7 @@ public class HRegionFileSystem {
* @return The destination {@link Path} of the bulk loaded file
* @throws IOException
*/
public Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
throws IOException {
// Copy the file if it's on another filesystem
FileSystem srcFs = srcPath.getFileSystem(conf);
@ -331,35 +385,149 @@ public class HRegionFileSystem {
// Splits Helpers
// ===========================================================================
/** @return {@link Path} to the temp directory used during split operations */
public Path getSplitsDir() {
Path getSplitsDir() {
return new Path(getRegionDir(), REGION_SPLITS_DIR);
}
Path getSplitsDir(final HRegionInfo hri) {
return new Path(getSplitsDir(), hri.getEncodedName());
}
/**
* Clean up any split detritus that may have been left around from previous split attempts.
*/
public void cleanupSplitsDir() throws IOException {
void cleanupSplitsDir() throws IOException {
FSUtils.deleteDirectory(fs, getSplitsDir());
}
/**
* Clean up any split detritus that may have been left around from previous
* split attempts.
* Call this method on initial region deploy.
* @throws IOException
*/
void cleanupAnySplitDetritus() throws IOException {
Path splitdir = this.getSplitsDir();
if (!fs.exists(splitdir)) return;
// Look at the splitdir. It could have the encoded names of the daughter
// regions we tried to make. See if the daughter regions actually got made
// out under the tabledir. If here under splitdir still, then the split did
// not complete. Try and do cleanup. This code WILL NOT catch the case
// where we successfully created daughter a but regionserver crashed during
// the creation of region b. In this case, there'll be an orphan daughter
// dir in the filesystem. TOOD: Fix.
FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
if (daughters != null) {
for (FileStatus daughter: daughters) {
Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
if (fs.exists(daughterDir) && !fs.delete(daughterDir, true)) {
throw new IOException("Failed delete of " + daughterDir);
}
}
}
cleanupSplitsDir();
LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
}
/**
* Remove daughter region
* @param regionInfo daughter {@link HRegionInfo}
* @throws IOException
*/
void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
throw new IOException("Failed delete of " + regionDir);
}
}
/**
* Commit a daughter region, moving it from the split temporary directory
* to the proper location in the filesystem.
* @param regionInfo daughter {@link HRegionInfo}
* @throws IOException
*/
Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException {
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
Path daughterTmpDir = this.getSplitsDir(regionInfo);
if (fs.exists(daughterTmpDir) && !fs.rename(daughterTmpDir, regionDir)) {
throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
}
return regionDir;
}
/**
* Create the region splits directory.
*/
void createSplitsDir() throws IOException {
Path splitdir = getSplitsDir();
if (fs.exists(splitdir)) {
LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
if (!fs.delete(splitdir, true)) {
throw new IOException("Failed deletion of " + splitdir
+ " before creating them again.");
}
}
if (!fs.mkdirs(splitdir)) {
throw new IOException("Failed create of " + splitdir);
}
}
/**
* Write out a split reference. Package local so it doesnt leak out of
* regionserver.
* @param hri {@link HRegionInfo} of the destination
* @param familyName Column Family Name
* @param f File to split.
* @param splitRow Split Row
* @param top True if we are referring to the top half of the hfile.
* @return Path to created reference.
* @throws IOException
*/
Path splitStoreFile(final HRegionInfo hri, final String familyName,
final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
Path splitDir = new Path(getSplitsDir(hri), familyName);
// A reference to the bottom half of the hsf store file.
Reference r =
top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
// Add the referred-to regions name as a dot separated suffix.
// See REF_NAME_REGEX regex above. The referred-to regions name is
// up in the path of the passed in <code>f</code> -- parentdir is family,
// then the directory above is the region name.
String parentRegionName = regionInfo.getEncodedName();
// Write reference with same file id only with the other region name as
// suffix and into the new region location (under same family).
Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
return r.write(fs, p);
}
// ===========================================================================
// Merge Helpers
// ===========================================================================
/** @return {@link Path} to the temp directory used during merge operations */
public Path getMergesDir() {
Path getMergesDir() {
return new Path(getRegionDir(), REGION_MERGES_DIR);
}
/**
* Clean up any merge detritus that may have been left around from previous merge attempts.
*/
public void cleanupMergesDir() throws IOException {
void cleanupMergesDir() throws IOException {
FSUtils.deleteDirectory(fs, getMergesDir());
}
// ===========================================================================
// Create/Open/Delete Helpers
// ===========================================================================
/**
* Log the current state of the region
* @param LOG log to output information
* @throws IOException if an unexpected exception occurs
*/
void logFileSystemState(final Log LOG) throws IOException {
FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
}
/**
* @param hri
* @return Content of the file we write out to the filesystem under a region
@ -517,10 +685,12 @@ public class HRegionFileSystem {
* @param fs {@link FileSystem} from which to add the region
* @param tableDir {@link Path} to where the table is being stored
* @param regionInfo {@link HRegionInfo} for region to be added
* @param readOnly True if you don't want to edit the region data
* @throws IOException if the region creation fails due to a FileSystem exception.
*/
public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
throws IOException {
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
Path regionDir = regionFs.getRegionDir();
@ -529,12 +699,16 @@ public class HRegionFileSystem {
throw new IOException("The specified region do not exists on disk: " + regionDir);
}
// Cleanup temporary directories
regionFs.cleanupTempDir();
regionFs.cleanupSplitsDir();
regionFs.cleanupMergesDir();
// if it doesn't exists, Write HRI to a file, in case we need to recover .META.
regionFs.checkRegionInfoOnFilesystem();
if (readOnly) {
// Cleanup temporary directories
regionFs.cleanupTempDir();
regionFs.cleanupSplitsDir();
regionFs.cleanupMergesDir();
// if it doesn't exists, Write HRI to a file, in case we need to recover .META.
regionFs.checkRegionInfoOnFilesystem();
}
return regionFs;
}

View File

@ -93,13 +93,12 @@ class SplitRequest implements Runnable {
return;
}
LOG.info("Region split, META updated, and report to master. Parent="
+ parent.getRegionInfo().getRegionNameAsString() + ", new regions: "
+ parent.getRegionNameAsString() + ", new regions: "
+ st.getFirstDaughter().getRegionNameAsString() + ", "
+ st.getSecondDaughter().getRegionNameAsString() + ". Split took "
+ StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
} catch (IOException ex) {
LOG.error("Split failed " + this, RemoteExceptionHandler
.checkIOException(ex));
LOG.error("Split failed " + this, RemoteExceptionHandler.checkIOException(ex));
server.checkFileSystem();
} finally {
if (this.parent.getCoprocessorHost() != null) {

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
@ -34,9 +35,6 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
@ -46,7 +44,6 @@ import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
@ -90,7 +87,6 @@ public class SplitTransaction {
private final HRegion parent;
private HRegionInfo hri_a;
private HRegionInfo hri_b;
private Path splitdir;
private long fileSplitTimeout = 30000;
private int znodeVersion = -1;
@ -150,7 +146,6 @@ public class SplitTransaction {
public SplitTransaction(final HRegion r, final byte [] splitrow) {
this.parent = r;
this.splitrow = splitrow;
this.splitdir = getSplitDir(this.parent);
}
/**
@ -174,10 +169,8 @@ public class SplitTransaction {
return false;
}
long rid = getDaughterRegionIdTimestamp(hri);
this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
false, rid);
this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
false, rid);
this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow, false, rid);
this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey, false, rid);
return true;
}
@ -206,7 +199,8 @@ public class SplitTransaction {
* @param server Hosting server instance. Can be null when testing (won't try
* and update in zk if a null server)
* @param services Used to online/offline regions.
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
* @throws IOException If thrown, transaction failed.
* Call {@link #rollback(Server, RegionServerServices)}
* @return Regions created
*/
/* package */PairOfSameType<HRegion> createDaughters(final Server server,
@ -216,7 +210,8 @@ public class SplitTransaction {
(services != null && services.isStopping())) {
throw new IOException("Server is stopped or stopping");
}
assert !this.parent.lock.writeLock().isHeldByCurrentThread(): "Unsafe to hold write lock while performing RPCs";
assert !this.parent.lock.writeLock().isHeldByCurrentThread():
"Unsafe to hold write lock while performing RPCs";
// Coprocessor callback
if (this.parent.getCoprocessorHost() != null) {
@ -253,7 +248,8 @@ public class SplitTransaction {
// Master will get the callback for node change only if the transition is successful.
// Note that if the transition fails then the rollback will delete the created znode
// as the journal entry SET_SPLITTING_IN_ZK is added.
// TODO : May be we can add some new state to znode and handle the new state incase of success/failure
// TODO : May be we can add some new state to znode and handle the new state incase
// of success/failure
this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(),
this.parent.getRegionInfo(), server.getServerName(), -1);
} catch (KeeperException e) {
@ -262,10 +258,10 @@ public class SplitTransaction {
}
}
createSplitDir(this.parent.getFilesystem(), this.splitdir);
this.parent.getRegionFileSystem().createSplitsDir();
this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
List<StoreFile> hstoreFilesToSplit = null;
Map<byte[], List<StoreFile>> hstoreFilesToSplit = null;
Exception exceptionToThrow = null;
try{
hstoreFilesToSplit = this.parent.close(false);
@ -298,18 +294,18 @@ public class SplitTransaction {
// splitStoreFiles creates daughter region dirs under the parent splits dir
// Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
// clean this up.
splitStoreFiles(this.splitdir, hstoreFilesToSplit);
splitStoreFiles(hstoreFilesToSplit);
// Log to the journal that we are creating region A, the first daughter
// region. We could fail halfway through. If we do, we could have left
// stuff in fs that needs cleanup -- a storefile or two. Thats why we
// add entry to journal BEFORE rather than AFTER the change.
this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
HRegion a = createDaughterRegion(this.hri_a);
HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
// Ditto
this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
HRegion b = createDaughterRegion(this.hri_b);
HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
// This is the point of no return. Adding subsequent edits to .META. as we
// do below when we do the daughter opens adding each to .META. can fail in
@ -347,7 +343,8 @@ public class SplitTransaction {
* @param services Used to online/offline regions.
* @param a first daughter region
* @param a second daughter region
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
* @throws IOException If thrown, transaction failed.
* Call {@link #rollback(Server, RegionServerServices)}
*/
/* package */void openDaughters(final Server server,
final RegionServerServices services, HRegion a, HRegion b)
@ -404,7 +401,8 @@ public class SplitTransaction {
* @param services Used to online/offline regions.
* @param a first daughter region
* @param a second daughter region
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
* @throws IOException If thrown, transaction failed.
* Call {@link #rollback(Server, RegionServerServices)}
*/
/* package */void transitionZKNode(final Server server,
final RegionServerServices services, HRegion a, HRegion b)
@ -456,7 +454,8 @@ public class SplitTransaction {
* @param server Hosting server instance. Can be null when testing (won't try
* and update in zk if a null server)
* @param services Used to online/offline regions.
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
* @throws IOException If thrown, transaction failed.
* Call {@link #rollback(Server, RegionServerServices)}
* @return Regions created
* @throws IOException
* @see #rollback(Server, RegionServerServices)
@ -542,56 +541,8 @@ public class SplitTransaction {
}
}
private static Path getSplitDir(final HRegion r) {
return new Path(r.getRegionDir(), HRegionFileSystem.REGION_SPLITS_DIR);
}
/**
* @param fs Filesystem to use
* @param splitdir Directory to store temporary split data in
* @throws IOException If <code>splitdir</code> already exists or we fail
* to create it.
* @see #cleanupSplitDir(FileSystem, Path)
*/
private static void createSplitDir(final FileSystem fs, final Path splitdir)
throws IOException {
if (fs.exists(splitdir)) {
LOG.info("The " + splitdir
+ " directory exists. Hence deleting it to recreate it");
if (!fs.delete(splitdir, true)) {
throw new IOException("Failed deletion of " + splitdir
+ " before creating them again.");
}
}
if (!fs.mkdirs(splitdir)) throw new IOException("Failed create of " + splitdir);
}
private static void cleanupSplitDir(final FileSystem fs, final Path splitdir)
throws IOException {
// Splitdir may have been cleaned up by reopen of the parent dir.
deleteDir(fs, splitdir, false);
}
/**
* @param fs Filesystem to use
* @param dir Directory to delete
* @param mustPreExist If true, we'll throw exception if <code>dir</code>
* does not preexist, else we'll just pass.
* @throws IOException Thrown if we fail to delete passed <code>dir</code>
*/
private static void deleteDir(final FileSystem fs, final Path dir,
final boolean mustPreExist)
throws IOException {
if (!fs.exists(dir)) {
if (mustPreExist) throw new IOException(dir.toString() + " does not exist!");
} else if (!fs.delete(dir, true)) {
throw new IOException("Failed delete of " + dir);
}
}
private void splitStoreFiles(final Path splitdir,
final List<StoreFile> hstoreFilesToSplit)
throws IOException {
private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
throws IOException {
if (hstoreFilesToSplit == null) {
// Could be null because close didn't succeed -- for now consider it fatal
throw new IOException("Close returned empty list of StoreFiles");
@ -611,11 +562,12 @@ public class SplitTransaction {
(ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
// Split each store file.
for (StoreFile sf: hstoreFilesToSplit) {
//splitStoreFile(sf, splitdir);
StoreFileSplitter sfs = new StoreFileSplitter(sf, splitdir);
futures.add(threadPool.submit(sfs));
// Split each store file.
for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
for (StoreFile sf: entry.getValue()) {
StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
futures.add(threadPool.submit(sfs));
}
}
// Shutdown the pool
threadPool.shutdown();
@ -652,14 +604,11 @@ public class SplitTransaction {
}
}
private void splitStoreFile(final StoreFile sf, final Path splitdir)
throws IOException {
FileSystem fs = this.parent.getFilesystem();
byte [] family = sf.getFamily();
Path storedir = HStore.getStoreHomedir(splitdir, this.hri_a, family);
StoreFile.split(fs, storedir, sf, this.splitrow, false);
storedir = HStore.getStoreHomedir(splitdir, this.hri_b, family);
StoreFile.split(fs, storedir, sf, this.splitrow, true);
private void splitStoreFile(final byte[] family, final StoreFile sf) throws IOException {
HRegionFileSystem fs = this.parent.getRegionFileSystem();
String familyName = Bytes.toString(family);
fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false);
fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true);
}
/**
@ -667,60 +616,25 @@ public class SplitTransaction {
* in parallel instead of sequentially.
*/
class StoreFileSplitter implements Callable<Void> {
private final byte[] family;
private final StoreFile sf;
private final Path splitdir;
/**
* Constructor that takes what it needs to split
* @param family Family that contains the store file
* @param sf which file
* @param splitdir where the splitting is done
*/
public StoreFileSplitter(final StoreFile sf, final Path splitdir) {
public StoreFileSplitter(final byte[] family, final StoreFile sf) {
this.sf = sf;
this.splitdir = splitdir;
this.family = family;
}
public Void call() throws IOException {
splitStoreFile(sf, splitdir);
splitStoreFile(family, sf);
return null;
}
}
/**
* @param hri Spec. for daughter region to open.
* @param rsServices RegionServerServices this region should use.
* @return Created daughter HRegion.
* @throws IOException
* @see #cleanupDaughterRegion(FileSystem, Path, String)
*/
HRegion createDaughterRegion(final HRegionInfo hri) throws IOException {
// Package private so unit tests have access.
Path regionDir = getSplitDirForDaughter(this.splitdir, hri);
return this.parent.createDaughterRegion(hri, regionDir);
}
private static void cleanupDaughterRegion(final FileSystem fs,
final Path tabledir, final String encodedName)
throws IOException {
Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
// Dir may not preexist.
deleteDir(fs, regiondir, false);
}
/*
* Get the daughter directories in the splits dir. The splits dir is under
* the parent regions' directory.
* @param splitdir
* @param hri
* @return Path to daughter split dir.
* @throws IOException
*/
private static Path getSplitDirForDaughter(final Path splitdir, final HRegionInfo hri)
throws IOException {
return new Path(splitdir, hri.getEncodedName());
}
/**
* @param server Hosting server instance (May be null when testing).
* @param services
@ -736,7 +650,6 @@ public class SplitTransaction {
}
boolean result = true;
FileSystem fs = this.parent.getFilesystem();
ListIterator<JournalEntry> iterator =
this.journal.listIterator(this.journal.size());
// Iterate in reverse.
@ -751,8 +664,8 @@ public class SplitTransaction {
break;
case CREATE_SPLIT_DIR:
this.parent.writestate.writesEnabled = true;
cleanupSplitDir(fs, this.splitdir);
this.parent.writestate.writesEnabled = true;
this.parent.getRegionFileSystem().cleanupSplitsDir();
break;
case CLOSED_PARENT_REGION:
@ -771,13 +684,11 @@ public class SplitTransaction {
break;
case STARTED_REGION_A_CREATION:
cleanupDaughterRegion(fs, this.parent.getTableDir(),
this.hri_a.getEncodedName());
this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
break;
case STARTED_REGION_B_CREATION:
cleanupDaughterRegion(fs, this.parent.getTableDir(),
this.hri_b.getEncodedName());
this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
break;
case OFFLINED_PARENT:
@ -810,39 +721,6 @@ public class SplitTransaction {
return hri_b;
}
// For unit testing.
Path getSplitDir() {
return this.splitdir;
}
/**
* Clean up any split detritus that may have been left around from previous
* split attempts.
* Call this method on initial region deploy. Cleans up any mess
* left by previous deploys of passed <code>r</code> region.
* @param r
* @throws IOException
*/
static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
Path splitdir = getSplitDir(r);
FileSystem fs = r.getFilesystem();
if (!fs.exists(splitdir)) return;
// Look at the splitdir. It could have the encoded names of the daughter
// regions we tried to make. See if the daughter regions actually got made
// out under the tabledir. If here under splitdir still, then the split did
// not complete. Try and do cleanup. This code WILL NOT catch the case
// where we successfully created daughter a but regionserver crashed during
// the creation of region b. In this case, there'll be an orphan daughter
// dir in the filesystem. TOOD: Fix.
FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
for (int i = 0; i < daughters.length; i++) {
cleanupDaughterRegion(fs, r.getTableDir(),
daughters[i].getPath().getName());
}
cleanupSplitDir(r.getFilesystem(), splitdir);
LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
}
private static void cleanZK(final Server server, final HRegionInfo hri) {
try {
// Only delete if its in expected state; could have been hijacked.

View File

@ -36,7 +36,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValue.KVComparator;
import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.Reference;
import org.apache.hadoop.hbase.io.compress.Compression;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
import org.apache.hadoop.hbase.io.hfile.BlockType;
@ -253,13 +251,6 @@ public class StoreFile {
return this.fileInfo.getPath();
}
/**
* @return The Store/ColumnFamily this file belongs to.
*/
byte [] getFamily() {
return Bytes.toBytes(this.getPath().getParent().getName());
}
/**
* @return True if this is a StoreFile Reference; call after {@link #open()}
* else may get wrong answer.
@ -545,28 +536,6 @@ public class StoreFile {
return sb.toString();
}
/**
* Utility to help with rename.
* @param fs
* @param src
* @param tgt
* @return True if succeeded.
* @throws IOException
*/
public static Path rename(final FileSystem fs,
final Path src,
final Path tgt)
throws IOException {
if (!fs.exists(src)) {
throw new FileNotFoundException(src.toString());
}
if (!fs.rename(src, tgt)) {
throw new IOException("Failed rename of " + src + " to " + tgt);
}
return tgt;
}
public static class WriterBuilder {
private final Configuration conf;
private final CacheConfig cacheConf;
@ -720,38 +689,6 @@ public class StoreFile {
return new Path(dir, UUID.randomUUID().toString().replaceAll("-", ""));
}
/**
* Write out a split reference. Package local so it doesnt leak out of
* regionserver.
* @param fs
* @param splitDir Presumes path format is actually
* <code>SOME_DIRECTORY/REGIONNAME/FAMILY</code>.
* @param f File to split.
* @param splitRow
* @param top True if we are referring to the top half of the hfile.
* @return Path to created reference.
* @throws IOException
*/
static Path split(final FileSystem fs,
final Path splitDir,
final StoreFile f,
final byte [] splitRow,
final boolean top)
throws IOException {
// A reference to the bottom half of the hsf store file.
Reference r =
top? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
// Add the referred-to regions name as a dot separated suffix.
// See REF_NAME_REGEX regex above. The referred-to regions name is
// up in the path of the passed in <code>f</code> -- parentdir is family,
// then the directory above is the region name.
String parentRegionName = f.getPath().getParent().getParent().getName();
// Write reference with same file id only with the other region name as
// suffix and into the new region location (under same family).
Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
return r.write(fs, p);
}
public Long getMinimumTimestamp() {
return (getReader().timeRangeTracker == null) ?
null :

View File

@ -330,7 +330,7 @@ public class RestoreSnapshotHelper {
Path hfile = new Path(familyDir, hfileName);
LOG.trace("Removing hfile=" + hfile +
" from region=" + regionInfo.getEncodedName() + " table=" + tableName);
HFileArchiver.archiveStoreFile(fs, regionInfo, conf, tableDir, family, hfile);
HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile);
}
} else {
// Family doesn't exists in the snapshot

View File

@ -555,7 +555,7 @@ public class HBaseFsck extends Configured implements Tool {
HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
LOG.info("Creating new region : " + hri);
HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
Path target = region.getRegionDir();
Path target = region.getRegionFileSystem().getRegionDir();
// rename all the data to new region
mergeRegionDirs(target, hi);
@ -2183,11 +2183,11 @@ public class HBaseFsck extends Configured implements Tool {
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
LOG.info("Created new empty container region: " +
newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
debugLsr(region.getRegionDir());
debugLsr(region.getRegionFileSystem().getRegionDir());
// all target regions are closed, should be able to safely cleanup.
boolean didFix= false;
Path target = region.getRegionDir();
Path target = region.getRegionFileSystem().getRegionDir();
for (HbckInfo contained : overlap) {
LOG.info("Merging " + contained + " into " + target );
int merges = mergeRegionDirs(target, contained);

View File

@ -52,21 +52,6 @@ public class HFileArchiveUtil {
return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName));
}
/**
* Get the directory to archive a store directory
* @param conf {@link Configuration} to read for the archive directory name
* @param tableName table name under which the store currently lives
* @param region parent region information under which the store currently lives
* @param familyName name of the family in the store
* @return {@link Path} to the directory to archive the given store or
* <tt>null</tt> if it should not be archived
*/
public static Path getStoreArchivePath(final Configuration conf, final String tableName,
final HRegionInfo region, final String familyName) throws IOException {
Path tableArchiveDir = getTableArchivePath(conf, tableName);
return HStore.getStoreHomedir(tableArchiveDir, region, Bytes.toBytes(familyName));
}
/**
* Get the directory to archive a store directory
* @param conf {@link Configuration} to read for the archive directory name. Can be null.
@ -79,19 +64,17 @@ public class HFileArchiveUtil {
public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
byte[] family) {
Path tableArchiveDir = getTableArchivePath(tabledir);
return HStore.getStoreHomedir(tableArchiveDir,
HRegionInfo.encodeRegionName(region.getRegionName()), family);
return HStore.getStoreHomedir(tableArchiveDir, region, family);
}
/**
* Get the archive directory for a given region under the specified table
* @param conf {@link Configuration} to read the archive directory from. Can be null
* @param tabledir the original table directory. Cannot be null.
* @param regiondir the path to the region directory. Cannot be null.
* @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
* should not be archived
*/
public static Path getRegionArchiveDir(Configuration conf, Path tabledir, Path regiondir) {
public static Path getRegionArchiveDir(Path tabledir, Path regiondir) {
// get the archive directory for a table
Path archiveDir = getTableArchivePath(tabledir);

View File

@ -43,7 +43,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -57,8 +56,7 @@ import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.NoServerForRegionException;
import org.apache.hadoop.hbase.regionserver.HStore;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
@ -123,7 +121,8 @@ import com.google.common.collect.Sets;
* <p>
* The more complicated answer is that this depends upon the largest storefile
* in your region. With a growing data size, this will get larger over time. You
* want the largest region to be just big enough that the {@link HStore} compact
* want the largest region to be just big enough that the
* {@link org.apache.hadoop.hbase.regionserver.HStore} compact
* selection algorithm only compacts it due to a timed major. If you don't, your
* cluster can be prone to compaction storms as the algorithm decides to run
* major compactions on a large series of regions all at once. Note that
@ -628,9 +627,10 @@ public class RegionSplitter {
LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
// get table info
Path hbDir = new Path(table.getConfiguration().get(HConstants.HBASE_DIR));
Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName());
Path rootDir = FSUtils.getRootDir(table.getConfiguration());
Path tableDir = HTableDescriptor.getTableDir(rootDir, table.getTableName());
FileSystem fs = tableDir.getFileSystem(table.getConfiguration());
HTableDescriptor htd = table.getTableDescriptor();
// clear the cache to forcibly refresh region information
table.clearRegionCache();
@ -661,25 +661,22 @@ public class RegionSplitter {
check.add(table.getRegionLocation(start).getRegionInfo());
check.add(table.getRegionLocation(split).getRegionInfo());
for (HRegionInfo hri : check.toArray(new HRegionInfo[] {})) {
boolean refFound = false;
byte[] sk = hri.getStartKey();
if (sk.length == 0)
sk = splitAlgo.firstRow();
String startKey = splitAlgo.rowToStr(sk);
HTableDescriptor htd = table.getTableDescriptor();
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
table.getConfiguration(), fs, tableDir, hri, true);
// check every Column Family for that region
boolean refFound = false;
for (HColumnDescriptor c : htd.getFamilies()) {
Path cfDir = HStore.getStoreHomedir(tableDir, hri, c.getName());
if (fs.exists(cfDir)) {
for (FileStatus file : fs.listStatus(cfDir)) {
refFound |= StoreFileInfo.isReference(file.getPath());
if (refFound)
break;
}
}
if (refFound)
if ((refFound = regionFs.hasReferences(htd.getNameAsString()))) {
break;
}
}
// compaction is completed when all reference files are gone
if (!refFound) {
check.remove(hri);
@ -691,8 +688,7 @@ public class RegionSplitter {
physicalSplitting.add(region);
}
} catch (NoServerForRegionException nsfre) {
LOG.debug("No Server Exception thrown for: "
+ splitAlgo.rowToStr(start));
LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
physicalSplitting.add(region);
table.clearRegionCache();
}

View File

@ -139,7 +139,8 @@ public class TestHFileArchiving {
FileSystem fs = UTIL.getTestFileSystem();
// now attempt to depose the region
Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());
Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
@ -172,7 +173,7 @@ public class TestHFileArchiving {
assertEquals(1, servingRegions.size());
HRegion region = servingRegions.get(0);
FileSystem fs = region.getFilesystem();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
// make sure there are some files in the regiondir
Path rootDir = FSUtils.getRootDir(fs.getConf());
@ -238,8 +239,7 @@ public class TestHFileArchiving {
clearArchiveDirectory();
// then get the current store files
Path regionDir = region.getRegionDir();
List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
List<String> storeFiles = getRegionStoreFiles(region);
// then delete the table so the hfiles get archived
UTIL.deleteTable(TABLE_NAME);
@ -299,8 +299,7 @@ public class TestHFileArchiving {
clearArchiveDirectory();
// then get the current store files
Path regionDir = region.getRegionDir();
List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
List<String> storeFiles = getRegionStoreFiles(region);
// then delete the table so the hfiles get archived
UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
@ -416,8 +415,9 @@ public class TestHFileArchiving {
return fileNames;
}
private List<String> getRegionStoreFiles(final FileSystem fs, final Path regionDir)
throws IOException {
private List<String> getRegionStoreFiles(final HRegion region) throws IOException {
Path regionDir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
List<String> storeFiles = getAllFileNames(fs, regionDir);
// remove all the non-storefile named files for the region
for (int i = 0; i < storeFiles.size(); i++) {

View File

@ -70,9 +70,9 @@ public class TestHFileLinkCleaner {
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableName, hri, familyName);
tableName, hri.getEncodedName(), familyName);
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
tableLinkName, hriLink, familyName);
tableLinkName, hriLink.getEncodedName(), familyName);
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);

View File

@ -245,8 +245,8 @@ public class TestHRegion extends HBaseTestCase {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, conf, family);
try {
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
@ -257,8 +257,7 @@ public class TestHRegion extends HBaseTestCase {
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
HLog.Writer writer = HLogFactory.createWriter(fs,
recoveredEdits, conf);
HLog.Writer writer = HLogFactory.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
@ -273,8 +272,7 @@ public class TestHRegion extends HBaseTestCase {
Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(
Bytes.BYTES_COMPARATOR);
for (Store store : region.getStores().values()) {
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
minSeqId - 1);
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
}
long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
assertEquals(maxSeqId, seqId);
@ -297,8 +295,8 @@ public class TestHRegion extends HBaseTestCase {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, conf, family);
try {
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
@ -309,8 +307,7 @@ public class TestHRegion extends HBaseTestCase {
for (long i = minSeqId; i <= maxSeqId; i += 10) {
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
fs.create(recoveredEdits);
HLog.Writer writer = HLogFactory.createWriter(fs,
recoveredEdits, conf);
HLog.Writer writer = HLogFactory.createWriter(fs, recoveredEdits, conf);
long time = System.nanoTime();
WALEdit edit = new WALEdit();
@ -354,13 +351,12 @@ public class TestHRegion extends HBaseTestCase {
byte[] family = Bytes.toBytes("family");
this.region = initHRegion(tableName, method, conf, family);
try {
Path regiondir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path regiondir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
for (int i = 1000; i < 1050; i += 10) {
Path recoveredEdits = new Path(
recoveredEditsDir, String.format("%019d", i));
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
FSDataOutputStream dos= fs.create(recoveredEdits);
dos.writeInt(i);
dos.close();
@ -1713,9 +1709,9 @@ public class TestHRegion extends HBaseTestCase {
openClosedRegion(subregions[i]);
subregions[i].compactStores();
}
Path oldRegionPath = region.getRegionDir();
Path oldRegion1 = subregions[0].getRegionDir();
Path oldRegion2 = subregions[1].getRegionDir();
Path oldRegionPath = region.getRegionFileSystem().getRegionDir();
Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir();
Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir();
long startTime = System.currentTimeMillis();
region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
LOG.info("Merge regions elapsed time: " +
@ -3491,8 +3487,8 @@ public class TestHRegion extends HBaseTestCase {
// Create a region and skip the initialization (like CreateTableHandler)
HRegion region = HRegion.createHRegion(hri, rootDir, conf, htd, null, false, true);
Path regionDir = region.getRegionDir();
FileSystem fs = region.getFilesystem();
Path regionDir = region.getRegionFileSystem().getRegionDir();
FileSystem fs = region.getRegionFileSystem().getFileSystem();
HRegion.closeHRegion(region);
Path regionInfoFile = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
@ -3503,7 +3499,7 @@ public class TestHRegion extends HBaseTestCase {
// Try to open the region
region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
assertEquals(regionDir, region.getRegionDir());
assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
HRegion.closeHRegion(region);
// Verify that the .regioninfo file is still there
@ -3516,7 +3512,7 @@ public class TestHRegion extends HBaseTestCase {
fs.exists(regionInfoFile));
region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
assertEquals(regionDir, region.getRegionDir());
assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
HRegion.closeHRegion(region);
// Verify that the .regioninfo file is still there

View File

@ -63,7 +63,7 @@ public class TestHRegionFileSystem {
assertEquals(hri, hriVerify);
// Open the region
regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, rootDir, hri);
regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, rootDir, hri, false);
assertEquals(regionDir, regionFs.getRegionDir());
// Delete the region

View File

@ -68,13 +68,13 @@ public class TestHRegionInfo {
assertEquals(modtime, modtime2);
// Now load the file.
HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
FileSystem.get(htu.getConfiguration()), r.getRegionDir());
r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
assertTrue(hri.equals(deserializedHri));
}
long getModTime(final HRegion r) throws IOException {
FileStatus [] statuses =
r.getFilesystem().listStatus(new Path(r.getRegionDir(), HRegionFileSystem.REGION_INFO_FILE));
FileStatus[] statuses = r.getRegionFileSystem().getFileSystem().listStatus(
new Path(r.getRegionFileSystem().getRegionDir(), HRegionFileSystem.REGION_INFO_FILE));
assertTrue(statuses != null && statuses.length == 1);
return statuses[0].getModificationTime();
}

View File

@ -95,9 +95,9 @@ public class TestSplitTransaction {
@After public void teardown() throws IOException {
if (this.parent != null && !this.parent.isClosed()) this.parent.close();
if (this.fs.exists(this.parent.getRegionDir()) &&
!this.fs.delete(this.parent.getRegionDir(), true)) {
throw new IOException("Failed delete of " + this.parent.getRegionDir());
Path regionDir = this.parent.getRegionFileSystem().getRegionDir();
if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
throw new IOException("Failed delete of " + regionDir);
}
if (this.wal != null) this.wal.closeAndDelete();
this.fs.delete(this.testdir, true);
@ -136,11 +136,9 @@ public class TestSplitTransaction {
// Make sure that region a and region b are still in the filesystem, that
// they have not been removed; this is supposed to be the case if we go
// past point of no return.
Path tableDir = this.parent.getRegionDir().getParent();
Path daughterADir =
new Path(tableDir, spiedUponSt.getFirstDaughter().getEncodedName());
Path daughterBDir =
new Path(tableDir, spiedUponSt.getSecondDaughter().getEncodedName());
Path tableDir = this.parent.getRegionFileSystem().getTableDir();
Path daughterADir = new Path(tableDir, spiedUponSt.getFirstDaughter().getEncodedName());
Path daughterBDir = new Path(tableDir, spiedUponSt.getSecondDaughter().getEncodedName());
assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterADir));
assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterBDir));
}
@ -154,7 +152,11 @@ public class TestSplitTransaction {
}
private SplitTransaction prepareGOOD_SPLIT_ROW() {
SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW);
return prepareGOOD_SPLIT_ROW(this.parent);
}
private SplitTransaction prepareGOOD_SPLIT_ROW(final HRegion parentRegion) {
SplitTransaction st = new SplitTransaction(parentRegion, GOOD_SPLIT_ROW);
assertTrue(st.prepare());
return st;
}
@ -165,6 +167,7 @@ public class TestSplitTransaction {
@Test public void testPrepareWithRegionsWithReference() throws IOException {
HStore storeMock = Mockito.mock(HStore.class);
when(storeMock.hasReferences()).thenReturn(true);
when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf"));
when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of());
this.parent.stores.put(Bytes.toBytes(""), storeMock);
@ -214,13 +217,13 @@ public class TestSplitTransaction {
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
// Do some assertions about execution.
assertTrue(this.fs.exists(st.getSplitDir()));
assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir()));
// Assert the parent region is closed.
assertTrue(this.parent.isClosed());
// Assert splitdir is empty -- because its content will have been moved out
// to be under the daughter region dirs.
assertEquals(0, this.fs.listStatus(st.getSplitDir()).length);
assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length);
// Check daughters have correct key span.
assertTrue(Bytes.equals(this.parent.getStartKey(), daughters.getFirst().getStartKey()));
assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getEndKey()));
@ -249,9 +252,10 @@ public class TestSplitTransaction {
assertEquals(rowcount, parentRowCount);
// Start transaction.
SplitTransaction st = prepareGOOD_SPLIT_ROW();
HRegion spiedRegion = spy(this.parent);
SplitTransaction st = prepareGOOD_SPLIT_ROW(spiedRegion);
SplitTransaction spiedUponSt = spy(st);
when(spiedUponSt.createDaughterRegion(spiedUponSt.getSecondDaughter())).
when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())).
thenThrow(new MockedFailedDaughterCreation());
// Run the execute. Look at what it returns.
boolean expectedException = false;

View File

@ -68,10 +68,10 @@ public class TestStoreFile extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
private static String ROOT_DIR =
TEST_UTIL.getDataTestDir("TestStoreFile").toString();
private static String ROOT_DIR = TEST_UTIL.getDataTestDir("TestStoreFile").toString();
private static final ChecksumType CKTYPE = ChecksumType.CRC32;
private static final int CKBYTES = 512;
private static String TEST_FAMILY = "cf";
@Override
public void setUp() throws Exception {
@ -89,16 +89,19 @@ public class TestStoreFile extends HBaseTestCase {
* @throws Exception
*/
public void testBasicHalfMapFile() throws Exception {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path outputDir = new Path(new Path(this.testDir, "7e0102"),
"familyname");
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
this.fs, 2 * 1024)
.withOutputDir(outputDir)
final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testBasicHalfMapFileTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 2 * 1024)
.withFilePath(regionFs.createTempName())
.build();
writeStoreFile(writer);
checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf,
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
checkHalfHFile(regionFs, sf);
}
private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
@ -134,17 +137,19 @@ public class TestStoreFile extends HBaseTestCase {
* store files in other regions works.
* @throws IOException
*/
public void testReference()
throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
public void testReference() throws IOException {
final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testReferenceTb"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
this.fs, 8 * 1024)
.withOutputDir(storedir)
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 8 * 1024)
.withFilePath(regionFs.createTempName())
.build();
writeStoreFile(writer);
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
StoreFile.Reader reader = hsf.createReader();
// Split on a row, not in middle of row. Midkey returned by reader
@ -155,7 +160,8 @@ public class TestStoreFile extends HBaseTestCase {
kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
byte [] finalRow = kv.getRow();
// Make a reference
Path refPath = StoreFile.split(fs, storedir, hsf, midRow, true);
HRegionInfo splitHri = new HRegionInfo(hri.getTableName(), null, midRow);
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
// Now confirm that I can read from the reference and that it only gets
@ -173,26 +179,21 @@ public class TestStoreFile extends HBaseTestCase {
}
public void testHFileLink() throws IOException {
final String columnFamily = "f";
final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testHFileLinkTb"));
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
Path storedir = new Path(new Path(this.testDir,
new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
// Make a store file and write data to it.
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
this.fs, 8 * 1024)
.withOutputDir(storedir)
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
.withFilePath(regionFs.createTempName())
.build();
Path storeFilePath = writer.getPath();
writeStoreFile(writer);
writer.close();
Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
@ -218,53 +219,49 @@ public class TestStoreFile extends HBaseTestCase {
* to hfilelinks (created by snapshot clones) can be properly interpreted.
*/
public void testReferenceToHFileLink() throws IOException {
final String columnFamily = "f";
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
Configuration testConf = new Configuration(this.conf);
FSUtils.setRootDir(testConf, this.testDir);
Path rootDir = FSUtils.getRootDir(conf);
String tablename = "_original-evil-name"; // adding legal table name chars to verify regex handles it.
HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename));
// store dir = <root>/<tablename>/<rgn>/<cf>
Path storedir = new Path(new Path(rootDir,
new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
// adding legal table name chars to verify regex handles it.
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("_original-evil-name"));
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
this.fs, 8 * 1024)
.withOutputDir(storedir)
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
.withFilePath(regionFs.createTempName())
.build();
Path storeFilePath = writer.getPath();
writeStoreFile(writer);
writer.close();
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
String target = "clone";
Path dstPath = new Path(rootDir, new Path(new Path(target, "7e0102"), columnFamily));
HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
HRegionInfo hriClone = new HRegionInfo(Bytes.toBytes("clone"));
HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hriClone);
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
Path linkFilePath = new Path(dstPath,
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
// create splits of the link.
// <root>/clone/splitA/<cf>/<reftohfilelink>,
// <root>/clone/splitB/<cf>/<reftohfilelink>
Path splitDirA = new Path(new Path(rootDir,
new Path(target, "571A")), columnFamily);
Path splitDirB = new Path(new Path(rootDir,
new Path(target, "571B")), columnFamily);
StoreFile f = new StoreFile(fs, linkFilePath, conf, cacheConf, BloomType.NONE,
HRegionInfo splitHriA = new HRegionInfo(hri.getTableName(), null, SPLITKEY);
HRegionInfo splitHriB = new HRegionInfo(hri.getTableName(), SPLITKEY, null);
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE);
byte[] splitRow = SPLITKEY;
Path pathA = StoreFile.split(fs, splitDirA, f, splitRow, true); // top
Path pathB = StoreFile.split(fs, splitDirB, f, splitRow, false); // bottom
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
// OK test the thing
FSUtils.logFileSystemState(fs, rootDir, LOG);
FSUtils.logFileSystemState(fs, this.testDir, LOG);
// There is a case where a file with the hfilelink pattern is actually a daughter
// reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
StoreFile hsfA = new StoreFile(this.fs, pathA, conf, cacheConf,
StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
// Now confirm that I can read from the ref to link
@ -277,7 +274,7 @@ public class TestStoreFile extends HBaseTestCase {
assertTrue(count > 0); // read some rows here
// Try to open store file from link
StoreFile hsfB = new StoreFile(this.fs, pathB, conf, cacheConf,
StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
// Now confirm that I can read from the ref to link
@ -291,31 +288,21 @@ public class TestStoreFile extends HBaseTestCase {
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
private void checkHalfHFile(final StoreFile f)
throws IOException {
private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f)
throws IOException {
byte [] midkey = f.createReader().midkey();
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
byte [] midRow = midKV.getRow();
// Create top split.
Path topDir = HStore.getStoreHomedir(this.testDir, "1",
Bytes.toBytes(f.getPath().getParent().getName()));
if (this.fs.exists(topDir)) {
this.fs.delete(topDir, true);
}
Path topPath = StoreFile.split(this.fs, topDir, f, midRow, true);
HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), null, midRow);
Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true);
// Create bottom split.
Path bottomDir = HStore.getStoreHomedir(this.testDir, "2",
Bytes.toBytes(f.getPath().getParent().getName()));
if (this.fs.exists(bottomDir)) {
this.fs.delete(bottomDir, true);
}
Path bottomPath = StoreFile.split(this.fs, bottomDir, f, midRow, false);
HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), midRow, null);
Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
// Make readers on top and bottom.
StoreFile.Reader top =
new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath,
conf, cacheConf, BloomType.NONE,
StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
ByteBuffer previous = null;
LOG.info("Midkey: " + midKV.toString());
@ -329,7 +316,7 @@ public class TestStoreFile extends HBaseTestCase {
ByteBuffer key = null;
HFileScanner topScanner = top.getScanner(false, false);
while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
(topScanner.isSeeked() && topScanner.next())) {
(topScanner.isSeeked() && topScanner.next())) {
key = topScanner.getKey();
if (topScanner.getReader().getComparator().compare(key.array(),
@ -361,20 +348,19 @@ public class TestStoreFile extends HBaseTestCase {
LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
}
// Remove references.
this.fs.delete(topPath, false);
this.fs.delete(bottomPath, false);
regionFs.cleanupDaughterRegion(topHri);
regionFs.cleanupDaughterRegion(bottomHri);
// Next test using a midkey that does not exist in the file.
// First, do a key that is < than first key. Ensure splits behave
// properly.
byte [] badmidkey = Bytes.toBytes(" .");
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
top = new StoreFile(this.fs, topPath, conf, cacheConf,
BloomType.NONE,
assertTrue(fs.exists(f.getPath()));
topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
BloomType.NONE,
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
bottomScanner = bottom.getScanner(false, false);
int count = 0;
@ -409,18 +395,16 @@ public class TestStoreFile extends HBaseTestCase {
assertTrue(tmp.charAt(i) == 'z');
}
// Remove references.
this.fs.delete(topPath, false);
this.fs.delete(bottomPath, false);
regionFs.cleanupDaughterRegion(topHri);
regionFs.cleanupDaughterRegion(bottomHri);
// Test when badkey is > than last key in file ('||' > 'zz').
badmidkey = Bytes.toBytes("|||");
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
top = new StoreFile(this.fs, topPath, conf, cacheConf,
BloomType.NONE,
topPath = splitStoreFile(regionFs,topHri, TEST_FAMILY, f, badmidkey, true);
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
BloomType.NONE,
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
NoOpDataBlockEncoder.INSTANCE).createReader();
first = true;
bottomScanner = bottom.getScanner(false, false);
@ -463,10 +447,8 @@ public class TestStoreFile extends HBaseTestCase {
private static final String localFormatter = "%010d";
private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs)
throws Exception {
float err = conf.getFloat(
BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs) throws Exception {
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
Path f = writer.getPath();
long now = System.currentTimeMillis();
for (int i = 0; i < 2000; i += 2) {
@ -511,8 +493,7 @@ public class TestStoreFile extends HBaseTestCase {
public void testBloomFilter() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
(float) 0.01);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
// write the file
@ -530,11 +511,9 @@ public class TestStoreFile extends HBaseTestCase {
public void testDeleteFamilyBloomFilter() throws Exception {
FileSystem fs = FileSystem.getLocal(conf);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
(float) 0.01);
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
0);
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
// write the file
Path f = new Path(ROOT_DIR, getName());
@ -624,9 +603,8 @@ public class TestStoreFile extends HBaseTestCase {
int versions = 2;
// run once using columns and once using rows
BloomType[] bt =
{BloomType.ROWCOL, BloomType.ROW};
int[] expKeys = {rowCount*colCount, rowCount};
BloomType[] bt = {BloomType.ROWCOL, BloomType.ROW};
int[] expKeys = {rowCount*colCount, rowCount};
// below line deserves commentary. it is expected bloom false positives
// column = rowCount*2*colCount inserts
// row-level = only rowCount*2 inserts, but failures will be magnified by
@ -946,6 +924,15 @@ public class TestStoreFile extends HBaseTestCase {
assertEquals(startEvicted, cs.getEvictedCount());
}
private Path splitStoreFile(final HRegionFileSystem regionFs, final HRegionInfo hri,
final String family, final StoreFile sf, final byte[] splitKey, boolean isTopRef)
throws IOException {
FileSystem fs = regionFs.getFileSystem();
Path path = regionFs.splitStoreFile(hri, family, sf, splitKey, isTopRef);
Path regionDir = regionFs.commitDaughterRegion(hri);
return new Path(new Path(regionDir, family), path.getName());
}
private StoreFile.Writer writeStoreFile(Configuration conf,
CacheConfig cacheConf, Path path, int numBlocks)
throws IOException {
@ -961,8 +948,7 @@ public class TestStoreFile extends HBaseTestCase {
totalSize += kv.getLength() + 1;
}
int blockSize = totalSize / numBlocks;
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
blockSize)
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, blockSize)
.withFilePath(path)
.withMaxKeyCount(2000)
.withChecksumType(CKTYPE)
@ -1010,9 +996,7 @@ public class TestStoreFile extends HBaseTestCase {
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
}
}

View File

@ -530,8 +530,7 @@ public class TestWALReplay {
for (HColumnDescriptor hcd: htd.getFamilies()) {
cf_count++;
if (cf_count == 2) {
this.fs.delete(new Path(region.getRegionDir(), Bytes.toString(hcd.getName()))
, true);
region.getRegionFileSystem().deleteFamily(hcd.getNameAsString());
}
}

View File

@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
import org.apache.hadoop.hbase.io.HFileLink;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
@ -156,19 +156,21 @@ public class TestRestoreSnapshotHelper {
private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd)
throws IOException {
// First region, simple with one plain hfile.
HRegion r0 = HRegion.createHRegion(new HRegionInfo(htd.getName()), archiveDir,
conf, htd, null, true, true);
Path storeFile = new Path(new Path(r0.getRegionDir(), TEST_FAMILY), TEST_HFILE);
HRegionInfo hri = new HRegionInfo(htd.getName());
HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
fs, new Path(archiveDir, hri.getTableNameAsString()), hri);
Path storeFile = new Path(rootDir, TEST_HFILE);
fs.createNewFile(storeFile);
r0.close();
r0fs.commitStoreFile(TEST_FAMILY, storeFile);
// Second region, used to test the split case.
// This region contains a reference to the hfile in the first region.
HRegion r1 = HRegion.createHRegion(new HRegionInfo(htd.getName()), archiveDir,
conf, htd, null, true, true);
fs.createNewFile(new Path(new Path(r1.getRegionDir(), TEST_FAMILY),
storeFile.getName() + '.' + r0.getRegionInfo().getEncodedName()));
r1.close();
hri = new HRegionInfo(htd.getName());
HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
fs, new Path(archiveDir, hri.getTableNameAsString()), hri);
storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
fs.createNewFile(storeFile);
r1fs.commitStoreFile(TEST_FAMILY, storeFile);
Path tableDir = HTableDescriptor.getTableDir(archiveDir, htd.getName());
FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);

View File

@ -213,7 +213,8 @@ public class HFileArchiveTestingUtil {
* @return {@link Path} to the archive directory for the given region
*/
public static Path getRegionArchiveDir(Configuration conf, HRegion region) {
return HFileArchiveUtil.getRegionArchiveDir(conf, region.getTableDir(), region.getRegionDir());
return HFileArchiveUtil.getRegionArchiveDir(region.getRegionFileSystem().getTableDir(),
region.getRegionFileSystem().getRegionDir());
}
/**
@ -225,9 +226,8 @@ public class HFileArchiveTestingUtil {
*/
public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store)
throws IOException {
HRegionInfo hri = region.getRegionInfo();
return HFileArchiveUtil.getStoreArchivePath(conf, hri.getTableNameAsString(), hri,
store.getFamily().getNameAsString());
return HFileArchiveUtil.getStoreArchivePath(conf, region.getRegionInfo(),
region.getRegionFileSystem().getTableDir(), store.getFamily().getName());
}
public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName,

View File

@ -50,10 +50,9 @@ public class TestHFileArchiveUtil {
@Test
public void testRegionArchiveDir() {
Configuration conf = null;
Path tableDir = new Path("table");
Path regionDir = new Path("region");
assertNotNull(HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir));
assertNotNull(HFileArchiveUtil.getRegionArchiveDir(tableDir, regionDir));
}
@Test