HBASE-7809 Refactor Split/Merge to use HRegionFileSystem
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1457148 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
436c63c497
commit
eb039567de
|
@ -265,35 +265,6 @@ public class HFileArchiver {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Archive the store file
|
|
||||||
* @param fs the filesystem where the store files live
|
|
||||||
* @param regionInfo region hosting the store files
|
|
||||||
* @param conf {@link Configuration} to examine to determine the archive directory
|
|
||||||
* @param tableDir {@link Path} to where the table is being stored (for building the archive path)
|
|
||||||
* @param family the family hosting the store files
|
|
||||||
* @param storeFile file to be archived
|
|
||||||
* @throws IOException if the files could not be correctly disposed.
|
|
||||||
*/
|
|
||||||
public static void archiveStoreFile(FileSystem fs, HRegionInfo regionInfo,
|
|
||||||
Configuration conf, Path tableDir, byte[] family, Path storeFile) throws IOException {
|
|
||||||
Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family);
|
|
||||||
// make sure we don't archive if we can't and that the archive dir exists
|
|
||||||
if (!fs.mkdirs(storeArchiveDir)) {
|
|
||||||
throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:"
|
|
||||||
+ Bytes.toString(family) + ", deleting compacted files instead.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// do the actual archive
|
|
||||||
long start = EnvironmentEdgeManager.currentTimeMillis();
|
|
||||||
File file = new FileablePath(fs, storeFile);
|
|
||||||
if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) {
|
|
||||||
throw new IOException("Failed to archive/delete the file for region:"
|
|
||||||
+ regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family)
|
|
||||||
+ " into " + storeArchiveDir + ". Something is probably awry on the filesystem.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Archive the given files and resolve any conflicts with existing files via appending the time
|
* Archive the given files and resolve any conflicts with existing files via appending the time
|
||||||
* archiving started (so all conflicts in the same group have the same timestamp appended).
|
* archiving started (so all conflicts in the same group have the same timestamp appended).
|
||||||
|
|
|
@ -30,10 +30,8 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.PathFilter;
|
|
||||||
import org.apache.hadoop.hbase.Chore;
|
import org.apache.hadoop.hbase.Chore;
|
||||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
|
@ -43,10 +41,8 @@ import org.apache.hadoop.hbase.backup.HFileArchiver;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
import org.apache.hadoop.hbase.catalog.MetaEditor;
|
||||||
import org.apache.hadoop.hbase.catalog.MetaReader;
|
import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
import org.apache.hadoop.hbase.client.Result;
|
||||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.hbase.util.PairOfSameType;
|
import org.apache.hadoop.hbase.util.PairOfSameType;
|
||||||
|
|
||||||
|
@ -226,10 +222,8 @@ class CatalogJanitor extends Chore {
|
||||||
boolean result = false;
|
boolean result = false;
|
||||||
// Run checks on each daughter split.
|
// Run checks on each daughter split.
|
||||||
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
|
PairOfSameType<HRegionInfo> daughters = HRegionInfo.getDaughterRegions(rowContent);
|
||||||
Pair<Boolean, Boolean> a =
|
Pair<Boolean, Boolean> a = checkDaughterInFs(parent, daughters.getFirst());
|
||||||
checkDaughterInFs(parent, daughters.getFirst());
|
Pair<Boolean, Boolean> b = checkDaughterInFs(parent, daughters.getSecond());
|
||||||
Pair<Boolean, Boolean> b =
|
|
||||||
checkDaughterInFs(parent, daughters.getSecond());
|
|
||||||
if (hasNoReferences(a) && hasNoReferences(b)) {
|
if (hasNoReferences(a) && hasNoReferences(b)) {
|
||||||
LOG.debug("Deleting region " + parent.getRegionNameAsString() +
|
LOG.debug("Deleting region " + parent.getRegionNameAsString() +
|
||||||
" because daughter splits no longer hold references");
|
" because daughter splits no longer hold references");
|
||||||
|
@ -284,45 +278,35 @@ class CatalogJanitor extends Chore {
|
||||||
*/
|
*/
|
||||||
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
|
Pair<Boolean, Boolean> checkDaughterInFs(final HRegionInfo parent, final HRegionInfo daughter)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
boolean references = false;
|
|
||||||
boolean exists = false;
|
|
||||||
if (daughter == null) {
|
if (daughter == null) {
|
||||||
return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
|
return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
|
FileSystem fs = this.services.getMasterFileSystem().getFileSystem();
|
||||||
Path rootdir = this.services.getMasterFileSystem().getRootDir();
|
Path rootdir = this.services.getMasterFileSystem().getRootDir();
|
||||||
Path tabledir = new Path(rootdir, daughter.getTableNameAsString());
|
Path tabledir = HTableDescriptor.getTableDir(rootdir, daughter.getTableName());
|
||||||
Path regiondir = new Path(tabledir, daughter.getEncodedName());
|
|
||||||
exists = fs.exists(regiondir);
|
HRegionFileSystem regionFs = null;
|
||||||
if (!exists) {
|
try {
|
||||||
LOG.warn("Daughter regiondir does not exist: " + regiondir.toString());
|
regionFs = HRegionFileSystem.openRegionFromFileSystem(
|
||||||
return new Pair<Boolean, Boolean>(exists, Boolean.FALSE);
|
this.services.getConfiguration(), fs, tabledir, daughter, true);
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.warn("Daughter region does not exist: " + daughter.getEncodedName());
|
||||||
|
return new Pair<Boolean, Boolean>(Boolean.FALSE, Boolean.FALSE);
|
||||||
}
|
}
|
||||||
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableName());
|
|
||||||
|
|
||||||
|
boolean references = false;
|
||||||
|
HTableDescriptor parentDescriptor = getTableDescriptor(parent.getTableNameAsString());
|
||||||
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
|
for (HColumnDescriptor family: parentDescriptor.getFamilies()) {
|
||||||
Path p = HStore.getStoreHomedir(tabledir, daughter, family.getName());
|
if ((references = regionFs.hasReferences(family.getNameAsString()))) {
|
||||||
if (!fs.exists(p)) continue;
|
|
||||||
// Look for reference files. Call listStatus with anonymous instance of PathFilter.
|
|
||||||
FileStatus [] ps = FSUtils.listStatus(fs, p,
|
|
||||||
new PathFilter () {
|
|
||||||
public boolean accept(Path path) {
|
|
||||||
return StoreFileInfo.isReference(path);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
if (ps != null && ps.length > 0) {
|
|
||||||
references = true;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return new Pair<Boolean, Boolean>(Boolean.valueOf(exists),
|
return new Pair<Boolean, Boolean>(Boolean.TRUE, Boolean.valueOf(references));
|
||||||
Boolean.valueOf(references));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private HTableDescriptor getTableDescriptor(byte[] tableName)
|
private HTableDescriptor getTableDescriptor(final String tableName)
|
||||||
throws FileNotFoundException, IOException {
|
throws FileNotFoundException, IOException {
|
||||||
return this.services.getTableDescriptors().get(Bytes.toString(tableName));
|
return this.services.getTableDescriptors().get(tableName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||||
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
import org.apache.hadoop.hbase.client.Delete;
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
import org.apache.hadoop.hbase.client.Get;
|
||||||
|
@ -216,12 +215,6 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
// TODO: account for each registered handler in HeapSize computation
|
// TODO: account for each registered handler in HeapSize computation
|
||||||
private Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
|
private Map<String, Service> coprocessorServiceHandlers = Maps.newHashMap();
|
||||||
|
|
||||||
//These variable are just used for getting data out of the region, to test on
|
|
||||||
//client side
|
|
||||||
// private int numStores = 0;
|
|
||||||
// private int [] storeSize = null;
|
|
||||||
// private byte [] name = null;
|
|
||||||
|
|
||||||
public final AtomicLong memstoreSize = new AtomicLong(0);
|
public final AtomicLong memstoreSize = new AtomicLong(0);
|
||||||
|
|
||||||
// Debug possible data loss due to WAL off
|
// Debug possible data loss due to WAL off
|
||||||
|
@ -574,7 +567,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
// Get rid of any splits or merges that were lost in-progress. Clean out
|
// Get rid of any splits or merges that were lost in-progress. Clean out
|
||||||
// these directories here on open. We may be opening a region that was
|
// these directories here on open. We may be opening a region that was
|
||||||
// being split but we crashed in the middle of it all.
|
// being split but we crashed in the middle of it all.
|
||||||
SplitTransaction.cleanupAnySplitDetritus(this);
|
fs.cleanupAnySplitDetritus();
|
||||||
fs.cleanupMergesDir();
|
fs.cleanupMergesDir();
|
||||||
|
|
||||||
this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
|
this.writestate.setReadOnly(this.htableDescriptor.isReadOnly());
|
||||||
|
@ -664,7 +657,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
mvcc.initialize(maxMemstoreTS + 1);
|
mvcc.initialize(maxMemstoreTS + 1);
|
||||||
// Recover any edits if available.
|
// Recover any edits if available.
|
||||||
maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
|
maxSeqId = Math.max(maxSeqId, replayRecoveredEditsIfAny(
|
||||||
this.getRegionDir(), maxSeqIdInStores, reporter, status));
|
this.fs.getRegionDir(), maxSeqIdInStores, reporter, status));
|
||||||
return maxSeqId;
|
return maxSeqId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -817,7 +810,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
*
|
*
|
||||||
* @throws IOException e
|
* @throws IOException e
|
||||||
*/
|
*/
|
||||||
public List<StoreFile> close() throws IOException {
|
public Map<byte[], List<StoreFile>> close() throws IOException {
|
||||||
return close(false);
|
return close(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -837,7 +830,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
*
|
*
|
||||||
* @throws IOException e
|
* @throws IOException e
|
||||||
*/
|
*/
|
||||||
public List<StoreFile> close(final boolean abort) throws IOException {
|
public Map<byte[], List<StoreFile>> close(final boolean abort) throws IOException {
|
||||||
// Only allow one thread to close at a time. Serialize them so dual
|
// Only allow one thread to close at a time. Serialize them so dual
|
||||||
// threads attempting to close will run up against each other.
|
// threads attempting to close will run up against each other.
|
||||||
MonitoredTask status = TaskMonitor.get().createStatus(
|
MonitoredTask status = TaskMonitor.get().createStatus(
|
||||||
|
@ -854,9 +847,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<StoreFile> doClose(
|
private Map<byte[], List<StoreFile>> doClose(final boolean abort, MonitoredTask status)
|
||||||
final boolean abort, MonitoredTask status)
|
throws IOException {
|
||||||
throws IOException {
|
|
||||||
if (isClosed()) {
|
if (isClosed()) {
|
||||||
LOG.warn("Region " + this + " already closed");
|
LOG.warn("Region " + this + " already closed");
|
||||||
return null;
|
return null;
|
||||||
|
@ -902,28 +894,35 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
internalFlushcache(status);
|
internalFlushcache(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
List<StoreFile> result = new ArrayList<StoreFile>();
|
Map<byte[], List<StoreFile>> result =
|
||||||
|
new TreeMap<byte[], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
|
||||||
if (!stores.isEmpty()) {
|
if (!stores.isEmpty()) {
|
||||||
// initialize the thread pool for closing stores in parallel.
|
// initialize the thread pool for closing stores in parallel.
|
||||||
ThreadPoolExecutor storeCloserThreadPool =
|
ThreadPoolExecutor storeCloserThreadPool =
|
||||||
getStoreOpenAndCloseThreadPool("StoreCloserThread-" + this.getRegionNameAsString());
|
getStoreOpenAndCloseThreadPool("StoreCloserThread-" + this.getRegionNameAsString());
|
||||||
CompletionService<Collection<StoreFile>> completionService =
|
CompletionService<Pair<byte[], Collection<StoreFile>>> completionService =
|
||||||
new ExecutorCompletionService<Collection<StoreFile>>(storeCloserThreadPool);
|
new ExecutorCompletionService<Pair<byte[], Collection<StoreFile>>>(storeCloserThreadPool);
|
||||||
|
|
||||||
// close each store in parallel
|
// close each store in parallel
|
||||||
for (final Store store : stores.values()) {
|
for (final Store store : stores.values()) {
|
||||||
completionService
|
completionService
|
||||||
.submit(new Callable<Collection<StoreFile>>() {
|
.submit(new Callable<Pair<byte[], Collection<StoreFile>>>() {
|
||||||
public Collection<StoreFile> call() throws IOException {
|
public Pair<byte[], Collection<StoreFile>> call() throws IOException {
|
||||||
return store.close();
|
return new Pair<byte[], Collection<StoreFile>>(
|
||||||
|
store.getFamily().getName(), store.close());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
for (int i = 0; i < stores.size(); i++) {
|
for (int i = 0; i < stores.size(); i++) {
|
||||||
Future<Collection<StoreFile>> future = completionService.take();
|
Future<Pair<byte[], Collection<StoreFile>>> future = completionService.take();
|
||||||
Collection<StoreFile> storeFileList = future.get();
|
Pair<byte[], Collection<StoreFile>> storeFiles = future.get();
|
||||||
result.addAll(storeFileList);
|
List<StoreFile> familyFiles = result.get(storeFiles.getFirst());
|
||||||
|
if (familyFiles == null) {
|
||||||
|
familyFiles = new ArrayList<StoreFile>();
|
||||||
|
result.put(storeFiles.getFirst(), familyFiles);
|
||||||
|
}
|
||||||
|
familyFiles.addAll(storeFiles.getSecond());
|
||||||
}
|
}
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
|
@ -1062,11 +1061,6 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
return this.baseConf;
|
return this.baseConf;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return region directory Path */
|
|
||||||
public Path getRegionDir() {
|
|
||||||
return fs.getRegionDir();
|
|
||||||
}
|
|
||||||
|
|
||||||
/** @return {@link FileSystem} being used by this region */
|
/** @return {@link FileSystem} being used by this region */
|
||||||
public FileSystem getFilesystem() {
|
public FileSystem getFilesystem() {
|
||||||
return fs.getFileSystem();
|
return fs.getFileSystem();
|
||||||
|
@ -2348,7 +2342,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
// 1. dump region meta info into the snapshot directory
|
// 1. dump region meta info into the snapshot directory
|
||||||
LOG.debug("Storing region-info for snapshot.");
|
LOG.debug("Storing region-info for snapshot.");
|
||||||
HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
|
HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
|
||||||
getFilesystem(), snapshotDir, getRegionInfo());
|
this.fs.getFileSystem(), snapshotDir, getRegionInfo());
|
||||||
|
|
||||||
// 2. iterate through all the stores in the region
|
// 2. iterate through all the stores in the region
|
||||||
LOG.debug("Creating references for hfiles");
|
LOG.debug("Creating references for hfiles");
|
||||||
|
@ -3290,11 +3284,6 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
return this.getRegionNameAsString();
|
return this.getRegionNameAsString();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** @return Path of region base directory */
|
|
||||||
public Path getTableDir() {
|
|
||||||
return this.fs.getTableDir();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* RegionScannerImpl is used to combine scanners from multiple Stores (aka column families).
|
* RegionScannerImpl is used to combine scanners from multiple Stores (aka column families).
|
||||||
*/
|
*/
|
||||||
|
@ -4022,7 +4011,8 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
*/
|
*/
|
||||||
public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
|
public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegion r = newHRegion(other.getTableDir(), other.getLog(), other.getFilesystem(),
|
HRegionFileSystem regionFs = other.getRegionFileSystem();
|
||||||
|
HRegion r = newHRegion(regionFs.getTableDir(), other.getLog(), regionFs.getFileSystem(),
|
||||||
other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
|
other.baseConf, other.getRegionInfo(), other.getTableDesc(), null);
|
||||||
return r.openHRegion(reporter);
|
return r.openHRegion(reporter);
|
||||||
}
|
}
|
||||||
|
@ -4056,22 +4046,14 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
/**
|
/**
|
||||||
* Create a daughter region from given a temp directory with the region data.
|
* Create a daughter region from given a temp directory with the region data.
|
||||||
* @param hri Spec. for daughter region to open.
|
* @param hri Spec. for daughter region to open.
|
||||||
* @param daughterTmpDir Directory that contains region files.
|
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
HRegion createDaughterRegion(final HRegionInfo hri, final Path daughterTmpDir)
|
HRegion createDaughterRegionFromSplits(final HRegionInfo hri) throws IOException {
|
||||||
throws IOException {
|
HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getLog(), fs.getFileSystem(),
|
||||||
FileSystem fs = this.fs.getFileSystem();
|
|
||||||
HRegion r = HRegion.newHRegion(this.getTableDir(), this.getLog(), fs,
|
|
||||||
this.getBaseConf(), hri, this.getTableDesc(), rsServices);
|
this.getBaseConf(), hri, this.getTableDesc(), rsServices);
|
||||||
r.readRequestsCount.set(this.getReadRequestsCount() / 2);
|
r.readRequestsCount.set(this.getReadRequestsCount() / 2);
|
||||||
r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
|
r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
|
||||||
// Move the tmp dir in the expected location
|
fs.commitDaughterRegion(hri);
|
||||||
if (daughterTmpDir != null && fs.exists(daughterTmpDir)) {
|
|
||||||
if (!fs.rename(daughterTmpDir, r.getRegionDir())) {
|
|
||||||
LOG.warn("Unable to rename " + daughterTmpDir + " to " + r.getRegionDir());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4086,8 +4068,7 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
// TODO remove since only test and merge use this
|
// TODO remove since only test and merge use this
|
||||||
public static void addRegionToMETA(HRegion meta, HRegion r)
|
public static void addRegionToMETA(final HRegion meta, final HRegion r) throws IOException {
|
||||||
throws IOException {
|
|
||||||
meta.checkResources();
|
meta.checkResources();
|
||||||
// The row key is the region name
|
// The row key is the region name
|
||||||
byte[] row = r.getRegionName();
|
byte[] row = r.getRegionName();
|
||||||
|
@ -4144,24 +4125,6 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
(Bytes.compareTo(info.getEndKey(), row) > 0));
|
(Bytes.compareTo(info.getEndKey(), row) > 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Make the directories for a specific column family
|
|
||||||
*
|
|
||||||
* @param fs the file system
|
|
||||||
* @param tabledir base directory where region will live (usually the table dir)
|
|
||||||
* @param hri
|
|
||||||
* @param colFamily the column family
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
private static Path makeColumnFamilyDirs(FileSystem fs, Path tabledir,
|
|
||||||
final HRegionInfo hri, byte [] colFamily) throws IOException {
|
|
||||||
Path dir = HStore.getStoreHomedir(tabledir, hri, colFamily);
|
|
||||||
if (!fs.mkdirs(dir)) {
|
|
||||||
LOG.warn("Failed to create " + dir);
|
|
||||||
}
|
|
||||||
return dir;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Merge two HRegions. The regions must be adjacent and must not overlap.
|
* Merge two HRegions. The regions must be adjacent and must not overlap.
|
||||||
*
|
*
|
||||||
|
@ -4202,37 +4165,35 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
* @return new merged region
|
* @return new merged region
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static HRegion merge(HRegion a, HRegion b)
|
public static HRegion merge(final HRegion a, final HRegion b) throws IOException {
|
||||||
throws IOException {
|
|
||||||
if (!a.getRegionInfo().getTableNameAsString().equals(
|
if (!a.getRegionInfo().getTableNameAsString().equals(
|
||||||
b.getRegionInfo().getTableNameAsString())) {
|
b.getRegionInfo().getTableNameAsString())) {
|
||||||
throw new IOException("Regions do not belong to the same table");
|
throw new IOException("Regions do not belong to the same table");
|
||||||
}
|
}
|
||||||
|
|
||||||
FileSystem fs = a.getFilesystem();
|
FileSystem fs = a.getRegionFileSystem().getFileSystem();
|
||||||
|
|
||||||
// Make sure each region's cache is empty
|
// Make sure each region's cache is empty
|
||||||
|
|
||||||
a.flushcache();
|
a.flushcache();
|
||||||
b.flushcache();
|
b.flushcache();
|
||||||
|
|
||||||
// Compact each region so we only have one store file per family
|
// Compact each region so we only have one store file per family
|
||||||
|
|
||||||
a.compactStores(true);
|
a.compactStores(true);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Files for region: " + a);
|
LOG.debug("Files for region: " + a);
|
||||||
FSUtils.logFileSystemState(fs, a.getRegionDir(), LOG);
|
a.getRegionFileSystem().logFileSystemState(LOG);
|
||||||
}
|
}
|
||||||
b.compactStores(true);
|
b.compactStores(true);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Files for region: " + b);
|
LOG.debug("Files for region: " + b);
|
||||||
FSUtils.logFileSystemState(fs, b.getRegionDir(), LOG);
|
b.getRegionFileSystem().logFileSystemState(LOG);
|
||||||
}
|
}
|
||||||
|
|
||||||
Configuration conf = a.baseConf;
|
Configuration conf = a.baseConf;
|
||||||
HTableDescriptor tabledesc = a.getTableDesc();
|
HTableDescriptor tabledesc = a.getTableDesc();
|
||||||
HLog log = a.getLog();
|
HLog log = a.getLog();
|
||||||
Path tableDir = a.getTableDir();
|
Path tableDir = a.getRegionFileSystem().getTableDir();
|
||||||
|
|
||||||
// Presume both are of same region type -- i.e. both user or catalog
|
// Presume both are of same region type -- i.e. both user or catalog
|
||||||
// table regions. This way can use comparator.
|
// table regions. This way can use comparator.
|
||||||
final byte[] startKey =
|
final byte[] startKey =
|
||||||
|
@ -4258,43 +4219,34 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
? b.getEndKey()
|
? b.getEndKey()
|
||||||
: a.getEndKey());
|
: a.getEndKey());
|
||||||
|
|
||||||
HRegionInfo newRegionInfo =
|
HRegionInfo newRegionInfo = new HRegionInfo(tabledesc.getName(), startKey, endKey);
|
||||||
new HRegionInfo(tabledesc.getName(), startKey, endKey);
|
|
||||||
LOG.info("Creating new region " + newRegionInfo.toString());
|
LOG.info("Creating new region " + newRegionInfo);
|
||||||
String encodedName = newRegionInfo.getEncodedName();
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
Path newRegionDir = HRegion.getRegionDir(a.getTableDir(), encodedName);
|
conf, fs, tableDir, newRegionInfo);
|
||||||
if(fs.exists(newRegionDir)) {
|
|
||||||
throw new IOException("Cannot merge; target file collision at " +
|
|
||||||
newRegionDir);
|
|
||||||
}
|
|
||||||
fs.mkdirs(newRegionDir);
|
|
||||||
|
|
||||||
LOG.info("starting merge of regions: " + a + " and " + b +
|
LOG.info("starting merge of regions: " + a + " and " + b +
|
||||||
" into new region " + newRegionInfo.toString() +
|
" into new region " + newRegionInfo.toString() +
|
||||||
" with start key <" + Bytes.toStringBinary(startKey) + "> and end key <" +
|
" with start key <" + Bytes.toStringBinary(startKey) + "> and end key <" +
|
||||||
Bytes.toStringBinary(endKey) + ">");
|
Bytes.toStringBinary(endKey) + ">");
|
||||||
|
|
||||||
// Move HStoreFiles under new region directory
|
// Because we compacted the source regions we should have no more than two
|
||||||
Map<byte [], List<StoreFile>> byFamily =
|
// StoreFiles per family and there will be no reference store
|
||||||
new TreeMap<byte [], List<StoreFile>>(Bytes.BYTES_COMPARATOR);
|
Map<byte[], List<StoreFile>> aStoreFiles = a.close();
|
||||||
byFamily = filesByFamily(byFamily, a.close());
|
Map<byte[], List<StoreFile>> bStoreFiles = b.close();
|
||||||
byFamily = filesByFamily(byFamily, b.close());
|
|
||||||
for (Map.Entry<byte [], List<StoreFile>> es : byFamily.entrySet()) {
|
// Move StoreFiles under new region directory
|
||||||
byte [] colFamily = es.getKey();
|
regionFs.commitStoreFiles(aStoreFiles);
|
||||||
Path storeDir = makeColumnFamilyDirs(fs, tableDir, newRegionInfo, colFamily);
|
regionFs.commitStoreFiles(bStoreFiles);
|
||||||
// Because we compacted the source regions we should have no more than two
|
|
||||||
// HStoreFiles per family and there will be no reference store
|
|
||||||
List<StoreFile> srcFiles = es.getValue();
|
|
||||||
for (StoreFile hsf: srcFiles) {
|
|
||||||
StoreFile.rename(fs, hsf.getPath(), StoreFile.getUniqueFile(fs, storeDir));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Files for new region");
|
LOG.debug("Files for new region");
|
||||||
FSUtils.logFileSystemState(fs, newRegionDir, LOG);
|
regionFs.logFileSystemState(LOG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create HRegion and update the metrics
|
||||||
HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf,
|
HRegion dstRegion = HRegion.newHRegion(tableDir, log, fs, conf,
|
||||||
newRegionInfo, a.getTableDesc(), null);
|
newRegionInfo, tabledesc, null);
|
||||||
dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get());
|
dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get());
|
||||||
dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get());
|
dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get());
|
||||||
dstRegion.checkAndMutateChecksFailed.set(
|
dstRegion.checkAndMutateChecksFailed.set(
|
||||||
|
@ -4303,44 +4255,23 @@ public class HRegion implements HeapSize { // , Writable{
|
||||||
a.checkAndMutateChecksPassed.get() + b.checkAndMutateChecksPassed.get());
|
a.checkAndMutateChecksPassed.get() + b.checkAndMutateChecksPassed.get());
|
||||||
dstRegion.initialize();
|
dstRegion.initialize();
|
||||||
dstRegion.compactStores();
|
dstRegion.compactStores();
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Files for new region");
|
LOG.debug("Files for new region");
|
||||||
FSUtils.logFileSystemState(fs, dstRegion.getRegionDir(), LOG);
|
dstRegion.getRegionFileSystem().logFileSystemState(LOG);
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete out the 'A' region
|
// delete out the 'A' region
|
||||||
HFileArchiver.archiveRegion(fs,
|
HRegionFileSystem.deleteRegionFromFileSystem(
|
||||||
FSUtils.getRootDir(a.getBaseConf()), a.getTableDir(), a.getRegionDir());
|
a.getBaseConf(), fs, tableDir, a.getRegionInfo());
|
||||||
// delete out the 'B' region
|
// delete out the 'B' region
|
||||||
HFileArchiver.archiveRegion(fs,
|
HRegionFileSystem.deleteRegionFromFileSystem(
|
||||||
FSUtils.getRootDir(b.getBaseConf()), b.getTableDir(), b.getRegionDir());
|
b.getBaseConf(), fs, tableDir, b.getRegionInfo());
|
||||||
|
|
||||||
LOG.info("merge completed. New region is " + dstRegion);
|
LOG.info("merge completed. New region is " + dstRegion);
|
||||||
|
|
||||||
return dstRegion;
|
return dstRegion;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Fills a map with a vector of store files keyed by column family.
|
|
||||||
* @param byFamily Map to fill.
|
|
||||||
* @param storeFiles Store files to process.
|
|
||||||
* @param family
|
|
||||||
* @return Returns <code>byFamily</code>
|
|
||||||
*/
|
|
||||||
private static Map<byte [], List<StoreFile>> filesByFamily(
|
|
||||||
Map<byte [], List<StoreFile>> byFamily, List<StoreFile> storeFiles) {
|
|
||||||
for (StoreFile src: storeFiles) {
|
|
||||||
byte [] family = src.getFamily();
|
|
||||||
List<StoreFile> v = byFamily.get(family);
|
|
||||||
if (v == null) {
|
|
||||||
v = new ArrayList<StoreFile>();
|
|
||||||
byFamily.put(family, v);
|
|
||||||
}
|
|
||||||
v.add(src);
|
|
||||||
}
|
|
||||||
return byFamily;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return True if needs a major compaction.
|
* @return True if needs a major compaction.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
|
|
@ -24,6 +24,8 @@ import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -36,11 +38,13 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.PathFilter;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
import org.apache.hadoop.hbase.backup.HFileArchiver;
|
||||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||||
|
import org.apache.hadoop.hbase.io.Reference;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
|
||||||
|
@ -108,14 +112,14 @@ public class HRegionFileSystem {
|
||||||
// Temp Helpers
|
// Temp Helpers
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
/** @return {@link Path} to the region's temp directory, used for file creations */
|
/** @return {@link Path} to the region's temp directory, used for file creations */
|
||||||
public Path getTempDir() {
|
Path getTempDir() {
|
||||||
return new Path(getRegionDir(), REGION_TEMP_DIR);
|
return new Path(getRegionDir(), REGION_TEMP_DIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clean up any temp detritus that may have been left around from previous operation attempts.
|
* Clean up any temp detritus that may have been left around from previous operation attempts.
|
||||||
*/
|
*/
|
||||||
public void cleanupTempDir() throws IOException {
|
void cleanupTempDir() throws IOException {
|
||||||
FSUtils.deleteDirectory(fs, getTempDir());
|
FSUtils.deleteDirectory(fs, getTempDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +141,7 @@ public class HRegionFileSystem {
|
||||||
* @return {@link Path} to the directory of the specified family
|
* @return {@link Path} to the directory of the specified family
|
||||||
* @throws IOException if the directory creation fails.
|
* @throws IOException if the directory creation fails.
|
||||||
*/
|
*/
|
||||||
public Path createStoreDir(final String familyName) throws IOException {
|
Path createStoreDir(final String familyName) throws IOException {
|
||||||
Path storeDir = getStoreDir(familyName);
|
Path storeDir = getStoreDir(familyName);
|
||||||
if (!fs.exists(storeDir) && !fs.mkdirs(storeDir)) {
|
if (!fs.exists(storeDir) && !fs.mkdirs(storeDir)) {
|
||||||
throw new IOException("Failed create of: " + storeDir);
|
throw new IOException("Failed create of: " + storeDir);
|
||||||
|
@ -175,8 +179,26 @@ public class HRegionFileSystem {
|
||||||
return storeFiles;
|
return storeFiles;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the specified family has reference files
|
||||||
|
* @param familyName Column Family Name
|
||||||
|
* @return true if family contains reference files
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public boolean hasReferences(final String familyName) throws IOException {
|
||||||
|
FileStatus[] files = FSUtils.listStatus(fs, getStoreDir(familyName),
|
||||||
|
new PathFilter () {
|
||||||
|
public boolean accept(Path path) {
|
||||||
|
return StoreFileInfo.isReference(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
return files != null && files.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the set of families present on disk
|
* @return the set of families present on disk
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Collection<String> getFamilies() throws IOException {
|
public Collection<String> getFamilies() throws IOException {
|
||||||
FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
|
FileStatus[] fds = FSUtils.listStatus(fs, getRegionDir(), new FSUtils.FamilyDirFilter(fs));
|
||||||
|
@ -190,6 +212,24 @@ public class HRegionFileSystem {
|
||||||
return families;
|
return families;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove the region family from disk, archiving the store files.
|
||||||
|
* @param familyName Column Family Name
|
||||||
|
* @throws IOException if an error occours during the archiving
|
||||||
|
*/
|
||||||
|
public void deleteFamily(final String familyName) throws IOException {
|
||||||
|
// archive family store files
|
||||||
|
HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName));
|
||||||
|
|
||||||
|
// delete the family folder
|
||||||
|
Path familyDir = getStoreDir(familyName);
|
||||||
|
if (!fs.delete(familyDir, true)) {
|
||||||
|
throw new IOException("Could not delete family " + familyName +
|
||||||
|
" from FileSystem for region " + regionInfo.getRegionNameAsString() +
|
||||||
|
"(" + regionInfo.getEncodedName() + ")");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate a unique file name, used by createTempName() and commitStoreFile()
|
* Generate a unique file name, used by createTempName() and commitStoreFile()
|
||||||
* @param suffix extra information to append to the generated name
|
* @param suffix extra information to append to the generated name
|
||||||
|
@ -252,7 +292,7 @@ public class HRegionFileSystem {
|
||||||
* @return The new {@link Path} of the committed file
|
* @return The new {@link Path} of the committed file
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Path commitStoreFile(final String familyName, final Path buildPath,
|
private Path commitStoreFile(final String familyName, final Path buildPath,
|
||||||
final long seqNum, final boolean generateNewName) throws IOException {
|
final long seqNum, final boolean generateNewName) throws IOException {
|
||||||
Path storeDir = getStoreDir(familyName);
|
Path storeDir = getStoreDir(familyName);
|
||||||
fs.mkdirs(storeDir);
|
fs.mkdirs(storeDir);
|
||||||
|
@ -271,6 +311,20 @@ public class HRegionFileSystem {
|
||||||
return dstPath;
|
return dstPath;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Moves multiple store files to the relative region's family store directory.
|
||||||
|
* @param storeFiles list of store files divided by family
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void commitStoreFiles(final Map<byte[], List<StoreFile>> storeFiles) throws IOException {
|
||||||
|
for (Map.Entry<byte[], List<StoreFile>> es: storeFiles.entrySet()) {
|
||||||
|
String familyName = Bytes.toString(es.getKey());
|
||||||
|
for (StoreFile sf: es.getValue()) {
|
||||||
|
commitStoreFile(familyName, sf.getPath());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Archives the specified store file from the specified family.
|
* Archives the specified store file from the specified family.
|
||||||
* @param familyName Family that contains the store files
|
* @param familyName Family that contains the store files
|
||||||
|
@ -306,7 +360,7 @@ public class HRegionFileSystem {
|
||||||
* @return The destination {@link Path} of the bulk loaded file
|
* @return The destination {@link Path} of the bulk loaded file
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
|
Path bulkLoadStoreFile(final String familyName, Path srcPath, long seqNum)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// Copy the file if it's on another filesystem
|
// Copy the file if it's on another filesystem
|
||||||
FileSystem srcFs = srcPath.getFileSystem(conf);
|
FileSystem srcFs = srcPath.getFileSystem(conf);
|
||||||
|
@ -331,35 +385,149 @@ public class HRegionFileSystem {
|
||||||
// Splits Helpers
|
// Splits Helpers
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
/** @return {@link Path} to the temp directory used during split operations */
|
/** @return {@link Path} to the temp directory used during split operations */
|
||||||
public Path getSplitsDir() {
|
Path getSplitsDir() {
|
||||||
return new Path(getRegionDir(), REGION_SPLITS_DIR);
|
return new Path(getRegionDir(), REGION_SPLITS_DIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Path getSplitsDir(final HRegionInfo hri) {
|
||||||
|
return new Path(getSplitsDir(), hri.getEncodedName());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clean up any split detritus that may have been left around from previous split attempts.
|
* Clean up any split detritus that may have been left around from previous split attempts.
|
||||||
*/
|
*/
|
||||||
public void cleanupSplitsDir() throws IOException {
|
void cleanupSplitsDir() throws IOException {
|
||||||
FSUtils.deleteDirectory(fs, getSplitsDir());
|
FSUtils.deleteDirectory(fs, getSplitsDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Clean up any split detritus that may have been left around from previous
|
||||||
|
* split attempts.
|
||||||
|
* Call this method on initial region deploy.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void cleanupAnySplitDetritus() throws IOException {
|
||||||
|
Path splitdir = this.getSplitsDir();
|
||||||
|
if (!fs.exists(splitdir)) return;
|
||||||
|
// Look at the splitdir. It could have the encoded names of the daughter
|
||||||
|
// regions we tried to make. See if the daughter regions actually got made
|
||||||
|
// out under the tabledir. If here under splitdir still, then the split did
|
||||||
|
// not complete. Try and do cleanup. This code WILL NOT catch the case
|
||||||
|
// where we successfully created daughter a but regionserver crashed during
|
||||||
|
// the creation of region b. In this case, there'll be an orphan daughter
|
||||||
|
// dir in the filesystem. TOOD: Fix.
|
||||||
|
FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs));
|
||||||
|
if (daughters != null) {
|
||||||
|
for (FileStatus daughter: daughters) {
|
||||||
|
Path daughterDir = new Path(getTableDir(), daughter.getPath().getName());
|
||||||
|
if (fs.exists(daughterDir) && !fs.delete(daughterDir, true)) {
|
||||||
|
throw new IOException("Failed delete of " + daughterDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleanupSplitsDir();
|
||||||
|
LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove daughter region
|
||||||
|
* @param regionInfo daughter {@link HRegionInfo}
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException {
|
||||||
|
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
|
||||||
|
if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
|
||||||
|
throw new IOException("Failed delete of " + regionDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Commit a daughter region, moving it from the split temporary directory
|
||||||
|
* to the proper location in the filesystem.
|
||||||
|
* @param regionInfo daughter {@link HRegionInfo}
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException {
|
||||||
|
Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName());
|
||||||
|
Path daughterTmpDir = this.getSplitsDir(regionInfo);
|
||||||
|
if (fs.exists(daughterTmpDir) && !fs.rename(daughterTmpDir, regionDir)) {
|
||||||
|
throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir);
|
||||||
|
}
|
||||||
|
return regionDir;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create the region splits directory.
|
||||||
|
*/
|
||||||
|
void createSplitsDir() throws IOException {
|
||||||
|
Path splitdir = getSplitsDir();
|
||||||
|
if (fs.exists(splitdir)) {
|
||||||
|
LOG.info("The " + splitdir + " directory exists. Hence deleting it to recreate it");
|
||||||
|
if (!fs.delete(splitdir, true)) {
|
||||||
|
throw new IOException("Failed deletion of " + splitdir
|
||||||
|
+ " before creating them again.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!fs.mkdirs(splitdir)) {
|
||||||
|
throw new IOException("Failed create of " + splitdir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Write out a split reference. Package local so it doesnt leak out of
|
||||||
|
* regionserver.
|
||||||
|
* @param hri {@link HRegionInfo} of the destination
|
||||||
|
* @param familyName Column Family Name
|
||||||
|
* @param f File to split.
|
||||||
|
* @param splitRow Split Row
|
||||||
|
* @param top True if we are referring to the top half of the hfile.
|
||||||
|
* @return Path to created reference.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
Path splitStoreFile(final HRegionInfo hri, final String familyName,
|
||||||
|
final StoreFile f, final byte[] splitRow, final boolean top) throws IOException {
|
||||||
|
Path splitDir = new Path(getSplitsDir(hri), familyName);
|
||||||
|
// A reference to the bottom half of the hsf store file.
|
||||||
|
Reference r =
|
||||||
|
top ? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
|
||||||
|
// Add the referred-to regions name as a dot separated suffix.
|
||||||
|
// See REF_NAME_REGEX regex above. The referred-to regions name is
|
||||||
|
// up in the path of the passed in <code>f</code> -- parentdir is family,
|
||||||
|
// then the directory above is the region name.
|
||||||
|
String parentRegionName = regionInfo.getEncodedName();
|
||||||
|
// Write reference with same file id only with the other region name as
|
||||||
|
// suffix and into the new region location (under same family).
|
||||||
|
Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
|
||||||
|
return r.write(fs, p);
|
||||||
|
}
|
||||||
|
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
// Merge Helpers
|
// Merge Helpers
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
/** @return {@link Path} to the temp directory used during merge operations */
|
/** @return {@link Path} to the temp directory used during merge operations */
|
||||||
public Path getMergesDir() {
|
Path getMergesDir() {
|
||||||
return new Path(getRegionDir(), REGION_MERGES_DIR);
|
return new Path(getRegionDir(), REGION_MERGES_DIR);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Clean up any merge detritus that may have been left around from previous merge attempts.
|
* Clean up any merge detritus that may have been left around from previous merge attempts.
|
||||||
*/
|
*/
|
||||||
public void cleanupMergesDir() throws IOException {
|
void cleanupMergesDir() throws IOException {
|
||||||
FSUtils.deleteDirectory(fs, getMergesDir());
|
FSUtils.deleteDirectory(fs, getMergesDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
// Create/Open/Delete Helpers
|
// Create/Open/Delete Helpers
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
|
/**
|
||||||
|
* Log the current state of the region
|
||||||
|
* @param LOG log to output information
|
||||||
|
* @throws IOException if an unexpected exception occurs
|
||||||
|
*/
|
||||||
|
void logFileSystemState(final Log LOG) throws IOException {
|
||||||
|
FSUtils.logFileSystemState(fs, this.getRegionDir(), LOG);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param hri
|
* @param hri
|
||||||
* @return Content of the file we write out to the filesystem under a region
|
* @return Content of the file we write out to the filesystem under a region
|
||||||
|
@ -517,10 +685,12 @@ public class HRegionFileSystem {
|
||||||
* @param fs {@link FileSystem} from which to add the region
|
* @param fs {@link FileSystem} from which to add the region
|
||||||
* @param tableDir {@link Path} to where the table is being stored
|
* @param tableDir {@link Path} to where the table is being stored
|
||||||
* @param regionInfo {@link HRegionInfo} for region to be added
|
* @param regionInfo {@link HRegionInfo} for region to be added
|
||||||
|
* @param readOnly True if you don't want to edit the region data
|
||||||
* @throws IOException if the region creation fails due to a FileSystem exception.
|
* @throws IOException if the region creation fails due to a FileSystem exception.
|
||||||
*/
|
*/
|
||||||
public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
|
public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf,
|
||||||
final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException {
|
final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly)
|
||||||
|
throws IOException {
|
||||||
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
|
HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo);
|
||||||
Path regionDir = regionFs.getRegionDir();
|
Path regionDir = regionFs.getRegionDir();
|
||||||
|
|
||||||
|
@ -529,12 +699,16 @@ public class HRegionFileSystem {
|
||||||
throw new IOException("The specified region do not exists on disk: " + regionDir);
|
throw new IOException("The specified region do not exists on disk: " + regionDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup temporary directories
|
if (readOnly) {
|
||||||
regionFs.cleanupTempDir();
|
// Cleanup temporary directories
|
||||||
regionFs.cleanupSplitsDir();
|
regionFs.cleanupTempDir();
|
||||||
regionFs.cleanupMergesDir();
|
regionFs.cleanupSplitsDir();
|
||||||
// if it doesn't exists, Write HRI to a file, in case we need to recover .META.
|
regionFs.cleanupMergesDir();
|
||||||
regionFs.checkRegionInfoOnFilesystem();
|
|
||||||
|
// if it doesn't exists, Write HRI to a file, in case we need to recover .META.
|
||||||
|
regionFs.checkRegionInfoOnFilesystem();
|
||||||
|
}
|
||||||
|
|
||||||
return regionFs;
|
return regionFs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -93,13 +93,12 @@ class SplitRequest implements Runnable {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
LOG.info("Region split, META updated, and report to master. Parent="
|
LOG.info("Region split, META updated, and report to master. Parent="
|
||||||
+ parent.getRegionInfo().getRegionNameAsString() + ", new regions: "
|
+ parent.getRegionNameAsString() + ", new regions: "
|
||||||
+ st.getFirstDaughter().getRegionNameAsString() + ", "
|
+ st.getFirstDaughter().getRegionNameAsString() + ", "
|
||||||
+ st.getSecondDaughter().getRegionNameAsString() + ". Split took "
|
+ st.getSecondDaughter().getRegionNameAsString() + ". Split took "
|
||||||
+ StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
|
+ StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime));
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
LOG.error("Split failed " + this, RemoteExceptionHandler
|
LOG.error("Split failed " + this, RemoteExceptionHandler.checkIOException(ex));
|
||||||
.checkIOException(ex));
|
|
||||||
server.checkFileSystem();
|
server.checkFileSystem();
|
||||||
} finally {
|
} finally {
|
||||||
if (this.parent.getCoprocessorHost() != null) {
|
if (this.parent.getCoprocessorHost() != null) {
|
||||||
|
|
|
@ -22,6 +22,7 @@ import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.ListIterator;
|
import java.util.ListIterator;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
|
@ -34,9 +35,6 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
import org.apache.hadoop.hbase.HRegionInfo;
|
||||||
import org.apache.hadoop.hbase.RegionTransition;
|
import org.apache.hadoop.hbase.RegionTransition;
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
|
@ -46,7 +44,6 @@ import org.apache.hadoop.hbase.executor.EventType;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
|
||||||
import org.apache.hadoop.hbase.util.HasThread;
|
import org.apache.hadoop.hbase.util.HasThread;
|
||||||
import org.apache.hadoop.hbase.util.PairOfSameType;
|
import org.apache.hadoop.hbase.util.PairOfSameType;
|
||||||
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
|
||||||
|
@ -90,7 +87,6 @@ public class SplitTransaction {
|
||||||
private final HRegion parent;
|
private final HRegion parent;
|
||||||
private HRegionInfo hri_a;
|
private HRegionInfo hri_a;
|
||||||
private HRegionInfo hri_b;
|
private HRegionInfo hri_b;
|
||||||
private Path splitdir;
|
|
||||||
private long fileSplitTimeout = 30000;
|
private long fileSplitTimeout = 30000;
|
||||||
private int znodeVersion = -1;
|
private int znodeVersion = -1;
|
||||||
|
|
||||||
|
@ -150,7 +146,6 @@ public class SplitTransaction {
|
||||||
public SplitTransaction(final HRegion r, final byte [] splitrow) {
|
public SplitTransaction(final HRegion r, final byte [] splitrow) {
|
||||||
this.parent = r;
|
this.parent = r;
|
||||||
this.splitrow = splitrow;
|
this.splitrow = splitrow;
|
||||||
this.splitdir = getSplitDir(this.parent);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -174,10 +169,8 @@ public class SplitTransaction {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
long rid = getDaughterRegionIdTimestamp(hri);
|
long rid = getDaughterRegionIdTimestamp(hri);
|
||||||
this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
|
this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow, false, rid);
|
||||||
false, rid);
|
this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey, false, rid);
|
||||||
this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
|
|
||||||
false, rid);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,7 +199,8 @@ public class SplitTransaction {
|
||||||
* @param server Hosting server instance. Can be null when testing (won't try
|
* @param server Hosting server instance. Can be null when testing (won't try
|
||||||
* and update in zk if a null server)
|
* and update in zk if a null server)
|
||||||
* @param services Used to online/offline regions.
|
* @param services Used to online/offline regions.
|
||||||
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
|
* @throws IOException If thrown, transaction failed.
|
||||||
|
* Call {@link #rollback(Server, RegionServerServices)}
|
||||||
* @return Regions created
|
* @return Regions created
|
||||||
*/
|
*/
|
||||||
/* package */PairOfSameType<HRegion> createDaughters(final Server server,
|
/* package */PairOfSameType<HRegion> createDaughters(final Server server,
|
||||||
|
@ -216,7 +210,8 @@ public class SplitTransaction {
|
||||||
(services != null && services.isStopping())) {
|
(services != null && services.isStopping())) {
|
||||||
throw new IOException("Server is stopped or stopping");
|
throw new IOException("Server is stopped or stopping");
|
||||||
}
|
}
|
||||||
assert !this.parent.lock.writeLock().isHeldByCurrentThread(): "Unsafe to hold write lock while performing RPCs";
|
assert !this.parent.lock.writeLock().isHeldByCurrentThread():
|
||||||
|
"Unsafe to hold write lock while performing RPCs";
|
||||||
|
|
||||||
// Coprocessor callback
|
// Coprocessor callback
|
||||||
if (this.parent.getCoprocessorHost() != null) {
|
if (this.parent.getCoprocessorHost() != null) {
|
||||||
|
@ -253,7 +248,8 @@ public class SplitTransaction {
|
||||||
// Master will get the callback for node change only if the transition is successful.
|
// Master will get the callback for node change only if the transition is successful.
|
||||||
// Note that if the transition fails then the rollback will delete the created znode
|
// Note that if the transition fails then the rollback will delete the created znode
|
||||||
// as the journal entry SET_SPLITTING_IN_ZK is added.
|
// as the journal entry SET_SPLITTING_IN_ZK is added.
|
||||||
// TODO : May be we can add some new state to znode and handle the new state incase of success/failure
|
// TODO : May be we can add some new state to znode and handle the new state incase
|
||||||
|
// of success/failure
|
||||||
this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(),
|
this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(),
|
||||||
this.parent.getRegionInfo(), server.getServerName(), -1);
|
this.parent.getRegionInfo(), server.getServerName(), -1);
|
||||||
} catch (KeeperException e) {
|
} catch (KeeperException e) {
|
||||||
|
@ -262,10 +258,10 @@ public class SplitTransaction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
createSplitDir(this.parent.getFilesystem(), this.splitdir);
|
this.parent.getRegionFileSystem().createSplitsDir();
|
||||||
this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
|
this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
|
||||||
|
|
||||||
List<StoreFile> hstoreFilesToSplit = null;
|
Map<byte[], List<StoreFile>> hstoreFilesToSplit = null;
|
||||||
Exception exceptionToThrow = null;
|
Exception exceptionToThrow = null;
|
||||||
try{
|
try{
|
||||||
hstoreFilesToSplit = this.parent.close(false);
|
hstoreFilesToSplit = this.parent.close(false);
|
||||||
|
@ -298,18 +294,18 @@ public class SplitTransaction {
|
||||||
// splitStoreFiles creates daughter region dirs under the parent splits dir
|
// splitStoreFiles creates daughter region dirs under the parent splits dir
|
||||||
// Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
|
// Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
|
||||||
// clean this up.
|
// clean this up.
|
||||||
splitStoreFiles(this.splitdir, hstoreFilesToSplit);
|
splitStoreFiles(hstoreFilesToSplit);
|
||||||
|
|
||||||
// Log to the journal that we are creating region A, the first daughter
|
// Log to the journal that we are creating region A, the first daughter
|
||||||
// region. We could fail halfway through. If we do, we could have left
|
// region. We could fail halfway through. If we do, we could have left
|
||||||
// stuff in fs that needs cleanup -- a storefile or two. Thats why we
|
// stuff in fs that needs cleanup -- a storefile or two. Thats why we
|
||||||
// add entry to journal BEFORE rather than AFTER the change.
|
// add entry to journal BEFORE rather than AFTER the change.
|
||||||
this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
|
this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
|
||||||
HRegion a = createDaughterRegion(this.hri_a);
|
HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
|
||||||
|
|
||||||
// Ditto
|
// Ditto
|
||||||
this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
|
this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
|
||||||
HRegion b = createDaughterRegion(this.hri_b);
|
HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
|
||||||
|
|
||||||
// This is the point of no return. Adding subsequent edits to .META. as we
|
// This is the point of no return. Adding subsequent edits to .META. as we
|
||||||
// do below when we do the daughter opens adding each to .META. can fail in
|
// do below when we do the daughter opens adding each to .META. can fail in
|
||||||
|
@ -347,7 +343,8 @@ public class SplitTransaction {
|
||||||
* @param services Used to online/offline regions.
|
* @param services Used to online/offline regions.
|
||||||
* @param a first daughter region
|
* @param a first daughter region
|
||||||
* @param a second daughter region
|
* @param a second daughter region
|
||||||
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
|
* @throws IOException If thrown, transaction failed.
|
||||||
|
* Call {@link #rollback(Server, RegionServerServices)}
|
||||||
*/
|
*/
|
||||||
/* package */void openDaughters(final Server server,
|
/* package */void openDaughters(final Server server,
|
||||||
final RegionServerServices services, HRegion a, HRegion b)
|
final RegionServerServices services, HRegion a, HRegion b)
|
||||||
|
@ -404,7 +401,8 @@ public class SplitTransaction {
|
||||||
* @param services Used to online/offline regions.
|
* @param services Used to online/offline regions.
|
||||||
* @param a first daughter region
|
* @param a first daughter region
|
||||||
* @param a second daughter region
|
* @param a second daughter region
|
||||||
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
|
* @throws IOException If thrown, transaction failed.
|
||||||
|
* Call {@link #rollback(Server, RegionServerServices)}
|
||||||
*/
|
*/
|
||||||
/* package */void transitionZKNode(final Server server,
|
/* package */void transitionZKNode(final Server server,
|
||||||
final RegionServerServices services, HRegion a, HRegion b)
|
final RegionServerServices services, HRegion a, HRegion b)
|
||||||
|
@ -456,7 +454,8 @@ public class SplitTransaction {
|
||||||
* @param server Hosting server instance. Can be null when testing (won't try
|
* @param server Hosting server instance. Can be null when testing (won't try
|
||||||
* and update in zk if a null server)
|
* and update in zk if a null server)
|
||||||
* @param services Used to online/offline regions.
|
* @param services Used to online/offline regions.
|
||||||
* @throws IOException If thrown, transaction failed. Call {@link #rollback(Server, RegionServerServices)}
|
* @throws IOException If thrown, transaction failed.
|
||||||
|
* Call {@link #rollback(Server, RegionServerServices)}
|
||||||
* @return Regions created
|
* @return Regions created
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @see #rollback(Server, RegionServerServices)
|
* @see #rollback(Server, RegionServerServices)
|
||||||
|
@ -542,56 +541,8 @@ public class SplitTransaction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Path getSplitDir(final HRegion r) {
|
private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
|
||||||
return new Path(r.getRegionDir(), HRegionFileSystem.REGION_SPLITS_DIR);
|
throws IOException {
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param fs Filesystem to use
|
|
||||||
* @param splitdir Directory to store temporary split data in
|
|
||||||
* @throws IOException If <code>splitdir</code> already exists or we fail
|
|
||||||
* to create it.
|
|
||||||
* @see #cleanupSplitDir(FileSystem, Path)
|
|
||||||
*/
|
|
||||||
private static void createSplitDir(final FileSystem fs, final Path splitdir)
|
|
||||||
throws IOException {
|
|
||||||
if (fs.exists(splitdir)) {
|
|
||||||
LOG.info("The " + splitdir
|
|
||||||
+ " directory exists. Hence deleting it to recreate it");
|
|
||||||
if (!fs.delete(splitdir, true)) {
|
|
||||||
throw new IOException("Failed deletion of " + splitdir
|
|
||||||
+ " before creating them again.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!fs.mkdirs(splitdir)) throw new IOException("Failed create of " + splitdir);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void cleanupSplitDir(final FileSystem fs, final Path splitdir)
|
|
||||||
throws IOException {
|
|
||||||
// Splitdir may have been cleaned up by reopen of the parent dir.
|
|
||||||
deleteDir(fs, splitdir, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param fs Filesystem to use
|
|
||||||
* @param dir Directory to delete
|
|
||||||
* @param mustPreExist If true, we'll throw exception if <code>dir</code>
|
|
||||||
* does not preexist, else we'll just pass.
|
|
||||||
* @throws IOException Thrown if we fail to delete passed <code>dir</code>
|
|
||||||
*/
|
|
||||||
private static void deleteDir(final FileSystem fs, final Path dir,
|
|
||||||
final boolean mustPreExist)
|
|
||||||
throws IOException {
|
|
||||||
if (!fs.exists(dir)) {
|
|
||||||
if (mustPreExist) throw new IOException(dir.toString() + " does not exist!");
|
|
||||||
} else if (!fs.delete(dir, true)) {
|
|
||||||
throw new IOException("Failed delete of " + dir);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void splitStoreFiles(final Path splitdir,
|
|
||||||
final List<StoreFile> hstoreFilesToSplit)
|
|
||||||
throws IOException {
|
|
||||||
if (hstoreFilesToSplit == null) {
|
if (hstoreFilesToSplit == null) {
|
||||||
// Could be null because close didn't succeed -- for now consider it fatal
|
// Could be null because close didn't succeed -- for now consider it fatal
|
||||||
throw new IOException("Close returned empty list of StoreFiles");
|
throw new IOException("Close returned empty list of StoreFiles");
|
||||||
|
@ -611,11 +562,12 @@ public class SplitTransaction {
|
||||||
(ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
|
(ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
|
||||||
List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
|
List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
|
||||||
|
|
||||||
// Split each store file.
|
// Split each store file.
|
||||||
for (StoreFile sf: hstoreFilesToSplit) {
|
for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
|
||||||
//splitStoreFile(sf, splitdir);
|
for (StoreFile sf: entry.getValue()) {
|
||||||
StoreFileSplitter sfs = new StoreFileSplitter(sf, splitdir);
|
StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
|
||||||
futures.add(threadPool.submit(sfs));
|
futures.add(threadPool.submit(sfs));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Shutdown the pool
|
// Shutdown the pool
|
||||||
threadPool.shutdown();
|
threadPool.shutdown();
|
||||||
|
@ -652,14 +604,11 @@ public class SplitTransaction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void splitStoreFile(final StoreFile sf, final Path splitdir)
|
private void splitStoreFile(final byte[] family, final StoreFile sf) throws IOException {
|
||||||
throws IOException {
|
HRegionFileSystem fs = this.parent.getRegionFileSystem();
|
||||||
FileSystem fs = this.parent.getFilesystem();
|
String familyName = Bytes.toString(family);
|
||||||
byte [] family = sf.getFamily();
|
fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false);
|
||||||
Path storedir = HStore.getStoreHomedir(splitdir, this.hri_a, family);
|
fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true);
|
||||||
StoreFile.split(fs, storedir, sf, this.splitrow, false);
|
|
||||||
storedir = HStore.getStoreHomedir(splitdir, this.hri_b, family);
|
|
||||||
StoreFile.split(fs, storedir, sf, this.splitrow, true);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -667,60 +616,25 @@ public class SplitTransaction {
|
||||||
* in parallel instead of sequentially.
|
* in parallel instead of sequentially.
|
||||||
*/
|
*/
|
||||||
class StoreFileSplitter implements Callable<Void> {
|
class StoreFileSplitter implements Callable<Void> {
|
||||||
|
private final byte[] family;
|
||||||
private final StoreFile sf;
|
private final StoreFile sf;
|
||||||
private final Path splitdir;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor that takes what it needs to split
|
* Constructor that takes what it needs to split
|
||||||
|
* @param family Family that contains the store file
|
||||||
* @param sf which file
|
* @param sf which file
|
||||||
* @param splitdir where the splitting is done
|
|
||||||
*/
|
*/
|
||||||
public StoreFileSplitter(final StoreFile sf, final Path splitdir) {
|
public StoreFileSplitter(final byte[] family, final StoreFile sf) {
|
||||||
this.sf = sf;
|
this.sf = sf;
|
||||||
this.splitdir = splitdir;
|
this.family = family;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Void call() throws IOException {
|
public Void call() throws IOException {
|
||||||
splitStoreFile(sf, splitdir);
|
splitStoreFile(family, sf);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @param hri Spec. for daughter region to open.
|
|
||||||
* @param rsServices RegionServerServices this region should use.
|
|
||||||
* @return Created daughter HRegion.
|
|
||||||
* @throws IOException
|
|
||||||
* @see #cleanupDaughterRegion(FileSystem, Path, String)
|
|
||||||
*/
|
|
||||||
HRegion createDaughterRegion(final HRegionInfo hri) throws IOException {
|
|
||||||
// Package private so unit tests have access.
|
|
||||||
Path regionDir = getSplitDirForDaughter(this.splitdir, hri);
|
|
||||||
return this.parent.createDaughterRegion(hri, regionDir);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void cleanupDaughterRegion(final FileSystem fs,
|
|
||||||
final Path tabledir, final String encodedName)
|
|
||||||
throws IOException {
|
|
||||||
Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
|
|
||||||
// Dir may not preexist.
|
|
||||||
deleteDir(fs, regiondir, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Get the daughter directories in the splits dir. The splits dir is under
|
|
||||||
* the parent regions' directory.
|
|
||||||
* @param splitdir
|
|
||||||
* @param hri
|
|
||||||
* @return Path to daughter split dir.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
private static Path getSplitDirForDaughter(final Path splitdir, final HRegionInfo hri)
|
|
||||||
throws IOException {
|
|
||||||
return new Path(splitdir, hri.getEncodedName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param server Hosting server instance (May be null when testing).
|
* @param server Hosting server instance (May be null when testing).
|
||||||
* @param services
|
* @param services
|
||||||
|
@ -736,7 +650,6 @@ public class SplitTransaction {
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean result = true;
|
boolean result = true;
|
||||||
FileSystem fs = this.parent.getFilesystem();
|
|
||||||
ListIterator<JournalEntry> iterator =
|
ListIterator<JournalEntry> iterator =
|
||||||
this.journal.listIterator(this.journal.size());
|
this.journal.listIterator(this.journal.size());
|
||||||
// Iterate in reverse.
|
// Iterate in reverse.
|
||||||
|
@ -751,8 +664,8 @@ public class SplitTransaction {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CREATE_SPLIT_DIR:
|
case CREATE_SPLIT_DIR:
|
||||||
this.parent.writestate.writesEnabled = true;
|
this.parent.writestate.writesEnabled = true;
|
||||||
cleanupSplitDir(fs, this.splitdir);
|
this.parent.getRegionFileSystem().cleanupSplitsDir();
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CLOSED_PARENT_REGION:
|
case CLOSED_PARENT_REGION:
|
||||||
|
@ -771,13 +684,11 @@ public class SplitTransaction {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case STARTED_REGION_A_CREATION:
|
case STARTED_REGION_A_CREATION:
|
||||||
cleanupDaughterRegion(fs, this.parent.getTableDir(),
|
this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
|
||||||
this.hri_a.getEncodedName());
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case STARTED_REGION_B_CREATION:
|
case STARTED_REGION_B_CREATION:
|
||||||
cleanupDaughterRegion(fs, this.parent.getTableDir(),
|
this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
|
||||||
this.hri_b.getEncodedName());
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case OFFLINED_PARENT:
|
case OFFLINED_PARENT:
|
||||||
|
@ -810,39 +721,6 @@ public class SplitTransaction {
|
||||||
return hri_b;
|
return hri_b;
|
||||||
}
|
}
|
||||||
|
|
||||||
// For unit testing.
|
|
||||||
Path getSplitDir() {
|
|
||||||
return this.splitdir;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Clean up any split detritus that may have been left around from previous
|
|
||||||
* split attempts.
|
|
||||||
* Call this method on initial region deploy. Cleans up any mess
|
|
||||||
* left by previous deploys of passed <code>r</code> region.
|
|
||||||
* @param r
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
|
|
||||||
Path splitdir = getSplitDir(r);
|
|
||||||
FileSystem fs = r.getFilesystem();
|
|
||||||
if (!fs.exists(splitdir)) return;
|
|
||||||
// Look at the splitdir. It could have the encoded names of the daughter
|
|
||||||
// regions we tried to make. See if the daughter regions actually got made
|
|
||||||
// out under the tabledir. If here under splitdir still, then the split did
|
|
||||||
// not complete. Try and do cleanup. This code WILL NOT catch the case
|
|
||||||
// where we successfully created daughter a but regionserver crashed during
|
|
||||||
// the creation of region b. In this case, there'll be an orphan daughter
|
|
||||||
// dir in the filesystem. TOOD: Fix.
|
|
||||||
FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
|
|
||||||
for (int i = 0; i < daughters.length; i++) {
|
|
||||||
cleanupDaughterRegion(fs, r.getTableDir(),
|
|
||||||
daughters[i].getPath().getName());
|
|
||||||
}
|
|
||||||
cleanupSplitDir(r.getFilesystem(), splitdir);
|
|
||||||
LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void cleanZK(final Server server, final HRegionInfo hri) {
|
private static void cleanZK(final Server server, final HRegionInfo hri) {
|
||||||
try {
|
try {
|
||||||
// Only delete if its in expected state; could have been hijacked.
|
// Only delete if its in expected state; could have been hijacked.
|
||||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
import org.apache.hadoop.hbase.KeyValue.KVComparator;
|
||||||
import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
|
import org.apache.hadoop.hbase.KeyValue.MetaKeyComparator;
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
import org.apache.hadoop.hbase.client.Scan;
|
||||||
import org.apache.hadoop.hbase.io.Reference;
|
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
import org.apache.hadoop.hbase.io.compress.Compression;
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
||||||
import org.apache.hadoop.hbase.io.hfile.BlockType;
|
import org.apache.hadoop.hbase.io.hfile.BlockType;
|
||||||
|
@ -253,13 +251,6 @@ public class StoreFile {
|
||||||
return this.fileInfo.getPath();
|
return this.fileInfo.getPath();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* @return The Store/ColumnFamily this file belongs to.
|
|
||||||
*/
|
|
||||||
byte [] getFamily() {
|
|
||||||
return Bytes.toBytes(this.getPath().getParent().getName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return True if this is a StoreFile Reference; call after {@link #open()}
|
* @return True if this is a StoreFile Reference; call after {@link #open()}
|
||||||
* else may get wrong answer.
|
* else may get wrong answer.
|
||||||
|
@ -545,28 +536,6 @@ public class StoreFile {
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Utility to help with rename.
|
|
||||||
* @param fs
|
|
||||||
* @param src
|
|
||||||
* @param tgt
|
|
||||||
* @return True if succeeded.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
public static Path rename(final FileSystem fs,
|
|
||||||
final Path src,
|
|
||||||
final Path tgt)
|
|
||||||
throws IOException {
|
|
||||||
|
|
||||||
if (!fs.exists(src)) {
|
|
||||||
throw new FileNotFoundException(src.toString());
|
|
||||||
}
|
|
||||||
if (!fs.rename(src, tgt)) {
|
|
||||||
throw new IOException("Failed rename of " + src + " to " + tgt);
|
|
||||||
}
|
|
||||||
return tgt;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class WriterBuilder {
|
public static class WriterBuilder {
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
private final CacheConfig cacheConf;
|
private final CacheConfig cacheConf;
|
||||||
|
@ -720,38 +689,6 @@ public class StoreFile {
|
||||||
return new Path(dir, UUID.randomUUID().toString().replaceAll("-", ""));
|
return new Path(dir, UUID.randomUUID().toString().replaceAll("-", ""));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Write out a split reference. Package local so it doesnt leak out of
|
|
||||||
* regionserver.
|
|
||||||
* @param fs
|
|
||||||
* @param splitDir Presumes path format is actually
|
|
||||||
* <code>SOME_DIRECTORY/REGIONNAME/FAMILY</code>.
|
|
||||||
* @param f File to split.
|
|
||||||
* @param splitRow
|
|
||||||
* @param top True if we are referring to the top half of the hfile.
|
|
||||||
* @return Path to created reference.
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
static Path split(final FileSystem fs,
|
|
||||||
final Path splitDir,
|
|
||||||
final StoreFile f,
|
|
||||||
final byte [] splitRow,
|
|
||||||
final boolean top)
|
|
||||||
throws IOException {
|
|
||||||
// A reference to the bottom half of the hsf store file.
|
|
||||||
Reference r =
|
|
||||||
top? Reference.createTopReference(splitRow): Reference.createBottomReference(splitRow);
|
|
||||||
// Add the referred-to regions name as a dot separated suffix.
|
|
||||||
// See REF_NAME_REGEX regex above. The referred-to regions name is
|
|
||||||
// up in the path of the passed in <code>f</code> -- parentdir is family,
|
|
||||||
// then the directory above is the region name.
|
|
||||||
String parentRegionName = f.getPath().getParent().getParent().getName();
|
|
||||||
// Write reference with same file id only with the other region name as
|
|
||||||
// suffix and into the new region location (under same family).
|
|
||||||
Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName);
|
|
||||||
return r.write(fs, p);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Long getMinimumTimestamp() {
|
public Long getMinimumTimestamp() {
|
||||||
return (getReader().timeRangeTracker == null) ?
|
return (getReader().timeRangeTracker == null) ?
|
||||||
null :
|
null :
|
||||||
|
|
|
@ -330,7 +330,7 @@ public class RestoreSnapshotHelper {
|
||||||
Path hfile = new Path(familyDir, hfileName);
|
Path hfile = new Path(familyDir, hfileName);
|
||||||
LOG.trace("Removing hfile=" + hfile +
|
LOG.trace("Removing hfile=" + hfile +
|
||||||
" from region=" + regionInfo.getEncodedName() + " table=" + tableName);
|
" from region=" + regionInfo.getEncodedName() + " table=" + tableName);
|
||||||
HFileArchiver.archiveStoreFile(fs, regionInfo, conf, tableDir, family, hfile);
|
HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Family doesn't exists in the snapshot
|
// Family doesn't exists in the snapshot
|
||||||
|
|
|
@ -555,7 +555,7 @@ public class HBaseFsck extends Configured implements Tool {
|
||||||
HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
|
HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
|
||||||
LOG.info("Creating new region : " + hri);
|
LOG.info("Creating new region : " + hri);
|
||||||
HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
|
HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
|
||||||
Path target = region.getRegionDir();
|
Path target = region.getRegionFileSystem().getRegionDir();
|
||||||
|
|
||||||
// rename all the data to new region
|
// rename all the data to new region
|
||||||
mergeRegionDirs(target, hi);
|
mergeRegionDirs(target, hi);
|
||||||
|
@ -2183,11 +2183,11 @@ public class HBaseFsck extends Configured implements Tool {
|
||||||
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
|
HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd);
|
||||||
LOG.info("Created new empty container region: " +
|
LOG.info("Created new empty container region: " +
|
||||||
newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
|
newRegion + " to contain regions: " + Joiner.on(",").join(overlap));
|
||||||
debugLsr(region.getRegionDir());
|
debugLsr(region.getRegionFileSystem().getRegionDir());
|
||||||
|
|
||||||
// all target regions are closed, should be able to safely cleanup.
|
// all target regions are closed, should be able to safely cleanup.
|
||||||
boolean didFix= false;
|
boolean didFix= false;
|
||||||
Path target = region.getRegionDir();
|
Path target = region.getRegionFileSystem().getRegionDir();
|
||||||
for (HbckInfo contained : overlap) {
|
for (HbckInfo contained : overlap) {
|
||||||
LOG.info("Merging " + contained + " into " + target );
|
LOG.info("Merging " + contained + " into " + target );
|
||||||
int merges = mergeRegionDirs(target, contained);
|
int merges = mergeRegionDirs(target, contained);
|
||||||
|
|
|
@ -52,21 +52,6 @@ public class HFileArchiveUtil {
|
||||||
return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName));
|
return HStore.getStoreHomedir(tableArchiveDir, regionName, Bytes.toBytes(familyName));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the directory to archive a store directory
|
|
||||||
* @param conf {@link Configuration} to read for the archive directory name
|
|
||||||
* @param tableName table name under which the store currently lives
|
|
||||||
* @param region parent region information under which the store currently lives
|
|
||||||
* @param familyName name of the family in the store
|
|
||||||
* @return {@link Path} to the directory to archive the given store or
|
|
||||||
* <tt>null</tt> if it should not be archived
|
|
||||||
*/
|
|
||||||
public static Path getStoreArchivePath(final Configuration conf, final String tableName,
|
|
||||||
final HRegionInfo region, final String familyName) throws IOException {
|
|
||||||
Path tableArchiveDir = getTableArchivePath(conf, tableName);
|
|
||||||
return HStore.getStoreHomedir(tableArchiveDir, region, Bytes.toBytes(familyName));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the directory to archive a store directory
|
* Get the directory to archive a store directory
|
||||||
* @param conf {@link Configuration} to read for the archive directory name. Can be null.
|
* @param conf {@link Configuration} to read for the archive directory name. Can be null.
|
||||||
|
@ -79,19 +64,17 @@ public class HFileArchiveUtil {
|
||||||
public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
|
public static Path getStoreArchivePath(Configuration conf, HRegionInfo region, Path tabledir,
|
||||||
byte[] family) {
|
byte[] family) {
|
||||||
Path tableArchiveDir = getTableArchivePath(tabledir);
|
Path tableArchiveDir = getTableArchivePath(tabledir);
|
||||||
return HStore.getStoreHomedir(tableArchiveDir,
|
return HStore.getStoreHomedir(tableArchiveDir, region, family);
|
||||||
HRegionInfo.encodeRegionName(region.getRegionName()), family);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the archive directory for a given region under the specified table
|
* Get the archive directory for a given region under the specified table
|
||||||
* @param conf {@link Configuration} to read the archive directory from. Can be null
|
|
||||||
* @param tabledir the original table directory. Cannot be null.
|
* @param tabledir the original table directory. Cannot be null.
|
||||||
* @param regiondir the path to the region directory. Cannot be null.
|
* @param regiondir the path to the region directory. Cannot be null.
|
||||||
* @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
|
* @return {@link Path} to the directory to archive the given region, or <tt>null</tt> if it
|
||||||
* should not be archived
|
* should not be archived
|
||||||
*/
|
*/
|
||||||
public static Path getRegionArchiveDir(Configuration conf, Path tabledir, Path regiondir) {
|
public static Path getRegionArchiveDir(Path tabledir, Path regiondir) {
|
||||||
// get the archive directory for a table
|
// get the archive directory for a table
|
||||||
Path archiveDir = getTableArchivePath(tabledir);
|
Path archiveDir = getTableArchivePath(tabledir);
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
|
@ -57,8 +56,7 @@ import org.apache.hadoop.hbase.catalog.MetaReader;
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||||
import org.apache.hadoop.hbase.client.HTable;
|
import org.apache.hadoop.hbase.client.HTable;
|
||||||
import org.apache.hadoop.hbase.client.NoServerForRegionException;
|
import org.apache.hadoop.hbase.client.NoServerForRegionException;
|
||||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
@ -123,7 +121,8 @@ import com.google.common.collect.Sets;
|
||||||
* <p>
|
* <p>
|
||||||
* The more complicated answer is that this depends upon the largest storefile
|
* The more complicated answer is that this depends upon the largest storefile
|
||||||
* in your region. With a growing data size, this will get larger over time. You
|
* in your region. With a growing data size, this will get larger over time. You
|
||||||
* want the largest region to be just big enough that the {@link HStore} compact
|
* want the largest region to be just big enough that the
|
||||||
|
* {@link org.apache.hadoop.hbase.regionserver.HStore} compact
|
||||||
* selection algorithm only compacts it due to a timed major. If you don't, your
|
* selection algorithm only compacts it due to a timed major. If you don't, your
|
||||||
* cluster can be prone to compaction storms as the algorithm decides to run
|
* cluster can be prone to compaction storms as the algorithm decides to run
|
||||||
* major compactions on a large series of regions all at once. Note that
|
* major compactions on a large series of regions all at once. Note that
|
||||||
|
@ -628,9 +627,10 @@ public class RegionSplitter {
|
||||||
LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
|
LinkedList<Pair<byte[], byte[]>> physicalSplitting = Lists.newLinkedList();
|
||||||
|
|
||||||
// get table info
|
// get table info
|
||||||
Path hbDir = new Path(table.getConfiguration().get(HConstants.HBASE_DIR));
|
Path rootDir = FSUtils.getRootDir(table.getConfiguration());
|
||||||
Path tableDir = HTableDescriptor.getTableDir(hbDir, table.getTableName());
|
Path tableDir = HTableDescriptor.getTableDir(rootDir, table.getTableName());
|
||||||
FileSystem fs = tableDir.getFileSystem(table.getConfiguration());
|
FileSystem fs = tableDir.getFileSystem(table.getConfiguration());
|
||||||
|
HTableDescriptor htd = table.getTableDescriptor();
|
||||||
|
|
||||||
// clear the cache to forcibly refresh region information
|
// clear the cache to forcibly refresh region information
|
||||||
table.clearRegionCache();
|
table.clearRegionCache();
|
||||||
|
@ -661,25 +661,22 @@ public class RegionSplitter {
|
||||||
check.add(table.getRegionLocation(start).getRegionInfo());
|
check.add(table.getRegionLocation(start).getRegionInfo());
|
||||||
check.add(table.getRegionLocation(split).getRegionInfo());
|
check.add(table.getRegionLocation(split).getRegionInfo());
|
||||||
for (HRegionInfo hri : check.toArray(new HRegionInfo[] {})) {
|
for (HRegionInfo hri : check.toArray(new HRegionInfo[] {})) {
|
||||||
boolean refFound = false;
|
|
||||||
byte[] sk = hri.getStartKey();
|
byte[] sk = hri.getStartKey();
|
||||||
if (sk.length == 0)
|
if (sk.length == 0)
|
||||||
sk = splitAlgo.firstRow();
|
sk = splitAlgo.firstRow();
|
||||||
String startKey = splitAlgo.rowToStr(sk);
|
String startKey = splitAlgo.rowToStr(sk);
|
||||||
HTableDescriptor htd = table.getTableDescriptor();
|
|
||||||
|
HRegionFileSystem regionFs = HRegionFileSystem.openRegionFromFileSystem(
|
||||||
|
table.getConfiguration(), fs, tableDir, hri, true);
|
||||||
|
|
||||||
// check every Column Family for that region
|
// check every Column Family for that region
|
||||||
|
boolean refFound = false;
|
||||||
for (HColumnDescriptor c : htd.getFamilies()) {
|
for (HColumnDescriptor c : htd.getFamilies()) {
|
||||||
Path cfDir = HStore.getStoreHomedir(tableDir, hri, c.getName());
|
if ((refFound = regionFs.hasReferences(htd.getNameAsString()))) {
|
||||||
if (fs.exists(cfDir)) {
|
|
||||||
for (FileStatus file : fs.listStatus(cfDir)) {
|
|
||||||
refFound |= StoreFileInfo.isReference(file.getPath());
|
|
||||||
if (refFound)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (refFound)
|
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// compaction is completed when all reference files are gone
|
// compaction is completed when all reference files are gone
|
||||||
if (!refFound) {
|
if (!refFound) {
|
||||||
check.remove(hri);
|
check.remove(hri);
|
||||||
|
@ -691,8 +688,7 @@ public class RegionSplitter {
|
||||||
physicalSplitting.add(region);
|
physicalSplitting.add(region);
|
||||||
}
|
}
|
||||||
} catch (NoServerForRegionException nsfre) {
|
} catch (NoServerForRegionException nsfre) {
|
||||||
LOG.debug("No Server Exception thrown for: "
|
LOG.debug("No Server Exception thrown for: " + splitAlgo.rowToStr(start));
|
||||||
+ splitAlgo.rowToStr(start));
|
|
||||||
physicalSplitting.add(region);
|
physicalSplitting.add(region);
|
||||||
table.clearRegionCache();
|
table.clearRegionCache();
|
||||||
}
|
}
|
||||||
|
|
|
@ -139,7 +139,8 @@ public class TestHFileArchiving {
|
||||||
FileSystem fs = UTIL.getTestFileSystem();
|
FileSystem fs = UTIL.getTestFileSystem();
|
||||||
|
|
||||||
// now attempt to depose the region
|
// now attempt to depose the region
|
||||||
Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());
|
Path rootDir = region.getRegionFileSystem().getTableDir().getParent();
|
||||||
|
Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
|
||||||
|
|
||||||
HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
|
HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());
|
||||||
|
|
||||||
|
@ -172,7 +173,7 @@ public class TestHFileArchiving {
|
||||||
assertEquals(1, servingRegions.size());
|
assertEquals(1, servingRegions.size());
|
||||||
HRegion region = servingRegions.get(0);
|
HRegion region = servingRegions.get(0);
|
||||||
|
|
||||||
FileSystem fs = region.getFilesystem();
|
FileSystem fs = region.getRegionFileSystem().getFileSystem();
|
||||||
|
|
||||||
// make sure there are some files in the regiondir
|
// make sure there are some files in the regiondir
|
||||||
Path rootDir = FSUtils.getRootDir(fs.getConf());
|
Path rootDir = FSUtils.getRootDir(fs.getConf());
|
||||||
|
@ -238,8 +239,7 @@ public class TestHFileArchiving {
|
||||||
clearArchiveDirectory();
|
clearArchiveDirectory();
|
||||||
|
|
||||||
// then get the current store files
|
// then get the current store files
|
||||||
Path regionDir = region.getRegionDir();
|
List<String> storeFiles = getRegionStoreFiles(region);
|
||||||
List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
|
|
||||||
|
|
||||||
// then delete the table so the hfiles get archived
|
// then delete the table so the hfiles get archived
|
||||||
UTIL.deleteTable(TABLE_NAME);
|
UTIL.deleteTable(TABLE_NAME);
|
||||||
|
@ -299,8 +299,7 @@ public class TestHFileArchiving {
|
||||||
clearArchiveDirectory();
|
clearArchiveDirectory();
|
||||||
|
|
||||||
// then get the current store files
|
// then get the current store files
|
||||||
Path regionDir = region.getRegionDir();
|
List<String> storeFiles = getRegionStoreFiles(region);
|
||||||
List<String> storeFiles = getRegionStoreFiles(fs, regionDir);
|
|
||||||
|
|
||||||
// then delete the table so the hfiles get archived
|
// then delete the table so the hfiles get archived
|
||||||
UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
|
UTIL.getHBaseAdmin().deleteColumn(TABLE_NAME, TEST_FAM);
|
||||||
|
@ -416,8 +415,9 @@ public class TestHFileArchiving {
|
||||||
return fileNames;
|
return fileNames;
|
||||||
}
|
}
|
||||||
|
|
||||||
private List<String> getRegionStoreFiles(final FileSystem fs, final Path regionDir)
|
private List<String> getRegionStoreFiles(final HRegion region) throws IOException {
|
||||||
throws IOException {
|
Path regionDir = region.getRegionFileSystem().getRegionDir();
|
||||||
|
FileSystem fs = region.getRegionFileSystem().getFileSystem();
|
||||||
List<String> storeFiles = getAllFileNames(fs, regionDir);
|
List<String> storeFiles = getAllFileNames(fs, regionDir);
|
||||||
// remove all the non-storefile named files for the region
|
// remove all the non-storefile named files for the region
|
||||||
for (int i = 0; i < storeFiles.size(); i++) {
|
for (int i = 0; i < storeFiles.size(); i++) {
|
||||||
|
|
|
@ -70,9 +70,9 @@ public class TestHFileLinkCleaner {
|
||||||
|
|
||||||
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
|
Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
|
||||||
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
||||||
tableName, hri, familyName);
|
tableName, hri.getEncodedName(), familyName);
|
||||||
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
|
||||||
tableLinkName, hriLink, familyName);
|
tableLinkName, hriLink.getEncodedName(), familyName);
|
||||||
|
|
||||||
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
|
// Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
|
||||||
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
|
Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
|
||||||
|
|
|
@ -245,8 +245,8 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
byte[] family = Bytes.toBytes("family");
|
byte[] family = Bytes.toBytes("family");
|
||||||
this.region = initHRegion(tableName, method, conf, family);
|
this.region = initHRegion(tableName, method, conf, family);
|
||||||
try {
|
try {
|
||||||
Path regiondir = region.getRegionDir();
|
Path regiondir = region.getRegionFileSystem().getRegionDir();
|
||||||
FileSystem fs = region.getFilesystem();
|
FileSystem fs = region.getRegionFileSystem().getFileSystem();
|
||||||
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
|
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
|
||||||
|
|
||||||
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
|
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
|
||||||
|
@ -257,8 +257,7 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
for (long i = minSeqId; i <= maxSeqId; i += 10) {
|
for (long i = minSeqId; i <= maxSeqId; i += 10) {
|
||||||
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
|
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
|
||||||
fs.create(recoveredEdits);
|
fs.create(recoveredEdits);
|
||||||
HLog.Writer writer = HLogFactory.createWriter(fs,
|
HLog.Writer writer = HLogFactory.createWriter(fs, recoveredEdits, conf);
|
||||||
recoveredEdits, conf);
|
|
||||||
|
|
||||||
long time = System.nanoTime();
|
long time = System.nanoTime();
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
|
@ -273,8 +272,7 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(
|
Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(
|
||||||
Bytes.BYTES_COMPARATOR);
|
Bytes.BYTES_COMPARATOR);
|
||||||
for (Store store : region.getStores().values()) {
|
for (Store store : region.getStores().values()) {
|
||||||
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
|
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1);
|
||||||
minSeqId - 1);
|
|
||||||
}
|
}
|
||||||
long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
|
long seqId = region.replayRecoveredEditsIfAny(regiondir, maxSeqIdInStores, null, status);
|
||||||
assertEquals(maxSeqId, seqId);
|
assertEquals(maxSeqId, seqId);
|
||||||
|
@ -297,8 +295,8 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
byte[] family = Bytes.toBytes("family");
|
byte[] family = Bytes.toBytes("family");
|
||||||
this.region = initHRegion(tableName, method, conf, family);
|
this.region = initHRegion(tableName, method, conf, family);
|
||||||
try {
|
try {
|
||||||
Path regiondir = region.getRegionDir();
|
Path regiondir = region.getRegionFileSystem().getRegionDir();
|
||||||
FileSystem fs = region.getFilesystem();
|
FileSystem fs = region.getRegionFileSystem().getFileSystem();
|
||||||
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
|
byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
|
||||||
|
|
||||||
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
|
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
|
||||||
|
@ -309,8 +307,7 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
for (long i = minSeqId; i <= maxSeqId; i += 10) {
|
for (long i = minSeqId; i <= maxSeqId; i += 10) {
|
||||||
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
|
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
|
||||||
fs.create(recoveredEdits);
|
fs.create(recoveredEdits);
|
||||||
HLog.Writer writer = HLogFactory.createWriter(fs,
|
HLog.Writer writer = HLogFactory.createWriter(fs, recoveredEdits, conf);
|
||||||
recoveredEdits, conf);
|
|
||||||
|
|
||||||
long time = System.nanoTime();
|
long time = System.nanoTime();
|
||||||
WALEdit edit = new WALEdit();
|
WALEdit edit = new WALEdit();
|
||||||
|
@ -354,13 +351,12 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
byte[] family = Bytes.toBytes("family");
|
byte[] family = Bytes.toBytes("family");
|
||||||
this.region = initHRegion(tableName, method, conf, family);
|
this.region = initHRegion(tableName, method, conf, family);
|
||||||
try {
|
try {
|
||||||
Path regiondir = region.getRegionDir();
|
Path regiondir = region.getRegionFileSystem().getRegionDir();
|
||||||
FileSystem fs = region.getFilesystem();
|
FileSystem fs = region.getRegionFileSystem().getFileSystem();
|
||||||
|
|
||||||
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
|
Path recoveredEditsDir = HLogUtil.getRegionDirRecoveredEditsDir(regiondir);
|
||||||
for (int i = 1000; i < 1050; i += 10) {
|
for (int i = 1000; i < 1050; i += 10) {
|
||||||
Path recoveredEdits = new Path(
|
Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i));
|
||||||
recoveredEditsDir, String.format("%019d", i));
|
|
||||||
FSDataOutputStream dos= fs.create(recoveredEdits);
|
FSDataOutputStream dos= fs.create(recoveredEdits);
|
||||||
dos.writeInt(i);
|
dos.writeInt(i);
|
||||||
dos.close();
|
dos.close();
|
||||||
|
@ -1713,9 +1709,9 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
openClosedRegion(subregions[i]);
|
openClosedRegion(subregions[i]);
|
||||||
subregions[i].compactStores();
|
subregions[i].compactStores();
|
||||||
}
|
}
|
||||||
Path oldRegionPath = region.getRegionDir();
|
Path oldRegionPath = region.getRegionFileSystem().getRegionDir();
|
||||||
Path oldRegion1 = subregions[0].getRegionDir();
|
Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir();
|
||||||
Path oldRegion2 = subregions[1].getRegionDir();
|
Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir();
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
|
region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
|
||||||
LOG.info("Merge regions elapsed time: " +
|
LOG.info("Merge regions elapsed time: " +
|
||||||
|
@ -3491,8 +3487,8 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
|
|
||||||
// Create a region and skip the initialization (like CreateTableHandler)
|
// Create a region and skip the initialization (like CreateTableHandler)
|
||||||
HRegion region = HRegion.createHRegion(hri, rootDir, conf, htd, null, false, true);
|
HRegion region = HRegion.createHRegion(hri, rootDir, conf, htd, null, false, true);
|
||||||
Path regionDir = region.getRegionDir();
|
Path regionDir = region.getRegionFileSystem().getRegionDir();
|
||||||
FileSystem fs = region.getFilesystem();
|
FileSystem fs = region.getRegionFileSystem().getFileSystem();
|
||||||
HRegion.closeHRegion(region);
|
HRegion.closeHRegion(region);
|
||||||
|
|
||||||
Path regionInfoFile = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
|
Path regionInfoFile = new Path(regionDir, HRegionFileSystem.REGION_INFO_FILE);
|
||||||
|
@ -3503,7 +3499,7 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
|
|
||||||
// Try to open the region
|
// Try to open the region
|
||||||
region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
|
region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
|
||||||
assertEquals(regionDir, region.getRegionDir());
|
assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
|
||||||
HRegion.closeHRegion(region);
|
HRegion.closeHRegion(region);
|
||||||
|
|
||||||
// Verify that the .regioninfo file is still there
|
// Verify that the .regioninfo file is still there
|
||||||
|
@ -3516,7 +3512,7 @@ public class TestHRegion extends HBaseTestCase {
|
||||||
fs.exists(regionInfoFile));
|
fs.exists(regionInfoFile));
|
||||||
|
|
||||||
region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
|
region = HRegion.openHRegion(rootDir, hri, htd, null, conf);
|
||||||
assertEquals(regionDir, region.getRegionDir());
|
assertEquals(regionDir, region.getRegionFileSystem().getRegionDir());
|
||||||
HRegion.closeHRegion(region);
|
HRegion.closeHRegion(region);
|
||||||
|
|
||||||
// Verify that the .regioninfo file is still there
|
// Verify that the .regioninfo file is still there
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class TestHRegionFileSystem {
|
||||||
assertEquals(hri, hriVerify);
|
assertEquals(hri, hriVerify);
|
||||||
|
|
||||||
// Open the region
|
// Open the region
|
||||||
regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, rootDir, hri);
|
regionFs = HRegionFileSystem.openRegionFromFileSystem(conf, fs, rootDir, hri, false);
|
||||||
assertEquals(regionDir, regionFs.getRegionDir());
|
assertEquals(regionDir, regionFs.getRegionDir());
|
||||||
|
|
||||||
// Delete the region
|
// Delete the region
|
||||||
|
|
|
@ -68,13 +68,13 @@ public class TestHRegionInfo {
|
||||||
assertEquals(modtime, modtime2);
|
assertEquals(modtime, modtime2);
|
||||||
// Now load the file.
|
// Now load the file.
|
||||||
HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
|
HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
|
||||||
FileSystem.get(htu.getConfiguration()), r.getRegionDir());
|
r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
|
||||||
assertTrue(hri.equals(deserializedHri));
|
assertTrue(hri.equals(deserializedHri));
|
||||||
}
|
}
|
||||||
|
|
||||||
long getModTime(final HRegion r) throws IOException {
|
long getModTime(final HRegion r) throws IOException {
|
||||||
FileStatus [] statuses =
|
FileStatus[] statuses = r.getRegionFileSystem().getFileSystem().listStatus(
|
||||||
r.getFilesystem().listStatus(new Path(r.getRegionDir(), HRegionFileSystem.REGION_INFO_FILE));
|
new Path(r.getRegionFileSystem().getRegionDir(), HRegionFileSystem.REGION_INFO_FILE));
|
||||||
assertTrue(statuses != null && statuses.length == 1);
|
assertTrue(statuses != null && statuses.length == 1);
|
||||||
return statuses[0].getModificationTime();
|
return statuses[0].getModificationTime();
|
||||||
}
|
}
|
||||||
|
|
|
@ -95,9 +95,9 @@ public class TestSplitTransaction {
|
||||||
|
|
||||||
@After public void teardown() throws IOException {
|
@After public void teardown() throws IOException {
|
||||||
if (this.parent != null && !this.parent.isClosed()) this.parent.close();
|
if (this.parent != null && !this.parent.isClosed()) this.parent.close();
|
||||||
if (this.fs.exists(this.parent.getRegionDir()) &&
|
Path regionDir = this.parent.getRegionFileSystem().getRegionDir();
|
||||||
!this.fs.delete(this.parent.getRegionDir(), true)) {
|
if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
|
||||||
throw new IOException("Failed delete of " + this.parent.getRegionDir());
|
throw new IOException("Failed delete of " + regionDir);
|
||||||
}
|
}
|
||||||
if (this.wal != null) this.wal.closeAndDelete();
|
if (this.wal != null) this.wal.closeAndDelete();
|
||||||
this.fs.delete(this.testdir, true);
|
this.fs.delete(this.testdir, true);
|
||||||
|
@ -136,11 +136,9 @@ public class TestSplitTransaction {
|
||||||
// Make sure that region a and region b are still in the filesystem, that
|
// Make sure that region a and region b are still in the filesystem, that
|
||||||
// they have not been removed; this is supposed to be the case if we go
|
// they have not been removed; this is supposed to be the case if we go
|
||||||
// past point of no return.
|
// past point of no return.
|
||||||
Path tableDir = this.parent.getRegionDir().getParent();
|
Path tableDir = this.parent.getRegionFileSystem().getTableDir();
|
||||||
Path daughterADir =
|
Path daughterADir = new Path(tableDir, spiedUponSt.getFirstDaughter().getEncodedName());
|
||||||
new Path(tableDir, spiedUponSt.getFirstDaughter().getEncodedName());
|
Path daughterBDir = new Path(tableDir, spiedUponSt.getSecondDaughter().getEncodedName());
|
||||||
Path daughterBDir =
|
|
||||||
new Path(tableDir, spiedUponSt.getSecondDaughter().getEncodedName());
|
|
||||||
assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterADir));
|
assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterADir));
|
||||||
assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterBDir));
|
assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterBDir));
|
||||||
}
|
}
|
||||||
|
@ -154,7 +152,11 @@ public class TestSplitTransaction {
|
||||||
}
|
}
|
||||||
|
|
||||||
private SplitTransaction prepareGOOD_SPLIT_ROW() {
|
private SplitTransaction prepareGOOD_SPLIT_ROW() {
|
||||||
SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW);
|
return prepareGOOD_SPLIT_ROW(this.parent);
|
||||||
|
}
|
||||||
|
|
||||||
|
private SplitTransaction prepareGOOD_SPLIT_ROW(final HRegion parentRegion) {
|
||||||
|
SplitTransaction st = new SplitTransaction(parentRegion, GOOD_SPLIT_ROW);
|
||||||
assertTrue(st.prepare());
|
assertTrue(st.prepare());
|
||||||
return st;
|
return st;
|
||||||
}
|
}
|
||||||
|
@ -165,6 +167,7 @@ public class TestSplitTransaction {
|
||||||
@Test public void testPrepareWithRegionsWithReference() throws IOException {
|
@Test public void testPrepareWithRegionsWithReference() throws IOException {
|
||||||
HStore storeMock = Mockito.mock(HStore.class);
|
HStore storeMock = Mockito.mock(HStore.class);
|
||||||
when(storeMock.hasReferences()).thenReturn(true);
|
when(storeMock.hasReferences()).thenReturn(true);
|
||||||
|
when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf"));
|
||||||
when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of());
|
when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of());
|
||||||
this.parent.stores.put(Bytes.toBytes(""), storeMock);
|
this.parent.stores.put(Bytes.toBytes(""), storeMock);
|
||||||
|
|
||||||
|
@ -214,13 +217,13 @@ public class TestSplitTransaction {
|
||||||
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
|
when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
|
||||||
PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
|
PairOfSameType<HRegion> daughters = st.execute(mockServer, null);
|
||||||
// Do some assertions about execution.
|
// Do some assertions about execution.
|
||||||
assertTrue(this.fs.exists(st.getSplitDir()));
|
assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir()));
|
||||||
// Assert the parent region is closed.
|
// Assert the parent region is closed.
|
||||||
assertTrue(this.parent.isClosed());
|
assertTrue(this.parent.isClosed());
|
||||||
|
|
||||||
// Assert splitdir is empty -- because its content will have been moved out
|
// Assert splitdir is empty -- because its content will have been moved out
|
||||||
// to be under the daughter region dirs.
|
// to be under the daughter region dirs.
|
||||||
assertEquals(0, this.fs.listStatus(st.getSplitDir()).length);
|
assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length);
|
||||||
// Check daughters have correct key span.
|
// Check daughters have correct key span.
|
||||||
assertTrue(Bytes.equals(this.parent.getStartKey(), daughters.getFirst().getStartKey()));
|
assertTrue(Bytes.equals(this.parent.getStartKey(), daughters.getFirst().getStartKey()));
|
||||||
assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getEndKey()));
|
assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getEndKey()));
|
||||||
|
@ -249,9 +252,10 @@ public class TestSplitTransaction {
|
||||||
assertEquals(rowcount, parentRowCount);
|
assertEquals(rowcount, parentRowCount);
|
||||||
|
|
||||||
// Start transaction.
|
// Start transaction.
|
||||||
SplitTransaction st = prepareGOOD_SPLIT_ROW();
|
HRegion spiedRegion = spy(this.parent);
|
||||||
|
SplitTransaction st = prepareGOOD_SPLIT_ROW(spiedRegion);
|
||||||
SplitTransaction spiedUponSt = spy(st);
|
SplitTransaction spiedUponSt = spy(st);
|
||||||
when(spiedUponSt.createDaughterRegion(spiedUponSt.getSecondDaughter())).
|
when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())).
|
||||||
thenThrow(new MockedFailedDaughterCreation());
|
thenThrow(new MockedFailedDaughterCreation());
|
||||||
// Run the execute. Look at what it returns.
|
// Run the execute. Look at what it returns.
|
||||||
boolean expectedException = false;
|
boolean expectedException = false;
|
||||||
|
|
|
@ -68,10 +68,10 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
|
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
|
||||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
|
private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
|
||||||
private static String ROOT_DIR =
|
private static String ROOT_DIR = TEST_UTIL.getDataTestDir("TestStoreFile").toString();
|
||||||
TEST_UTIL.getDataTestDir("TestStoreFile").toString();
|
|
||||||
private static final ChecksumType CKTYPE = ChecksumType.CRC32;
|
private static final ChecksumType CKTYPE = ChecksumType.CRC32;
|
||||||
private static final int CKBYTES = 512;
|
private static final int CKBYTES = 512;
|
||||||
|
private static String TEST_FAMILY = "cf";
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
|
@ -89,16 +89,19 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
* @throws Exception
|
* @throws Exception
|
||||||
*/
|
*/
|
||||||
public void testBasicHalfMapFile() throws Exception {
|
public void testBasicHalfMapFile() throws Exception {
|
||||||
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
|
final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testBasicHalfMapFileTb"));
|
||||||
Path outputDir = new Path(new Path(this.testDir, "7e0102"),
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
"familyname");
|
conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
|
|
||||||
this.fs, 2 * 1024)
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 2 * 1024)
|
||||||
.withOutputDir(outputDir)
|
.withFilePath(regionFs.createTempName())
|
||||||
.build();
|
.build();
|
||||||
writeStoreFile(writer);
|
writeStoreFile(writer);
|
||||||
checkHalfHFile(new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
|
||||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE));
|
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
|
||||||
|
StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf,
|
||||||
|
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||||
|
checkHalfHFile(regionFs, sf);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
|
private void writeStoreFile(final StoreFile.Writer writer) throws IOException {
|
||||||
|
@ -134,17 +137,19 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
* store files in other regions works.
|
* store files in other regions works.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public void testReference()
|
public void testReference() throws IOException {
|
||||||
throws IOException {
|
final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testReferenceTb"));
|
||||||
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
Path storedir = new Path(new Path(this.testDir, "7e0102"), "familyname");
|
conf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
|
||||||
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs, 8 * 1024)
|
||||||
this.fs, 8 * 1024)
|
.withFilePath(regionFs.createTempName())
|
||||||
.withOutputDir(storedir)
|
|
||||||
.build();
|
.build();
|
||||||
writeStoreFile(writer);
|
writeStoreFile(writer);
|
||||||
StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
|
|
||||||
|
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
|
||||||
|
StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
|
||||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||||
StoreFile.Reader reader = hsf.createReader();
|
StoreFile.Reader reader = hsf.createReader();
|
||||||
// Split on a row, not in middle of row. Midkey returned by reader
|
// Split on a row, not in middle of row. Midkey returned by reader
|
||||||
|
@ -155,7 +160,8 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
|
kv = KeyValue.createKeyValueFromKey(reader.getLastKey());
|
||||||
byte [] finalRow = kv.getRow();
|
byte [] finalRow = kv.getRow();
|
||||||
// Make a reference
|
// Make a reference
|
||||||
Path refPath = StoreFile.split(fs, storedir, hsf, midRow, true);
|
HRegionInfo splitHri = new HRegionInfo(hri.getTableName(), null, midRow);
|
||||||
|
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
|
||||||
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
|
StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
|
||||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||||
// Now confirm that I can read from the reference and that it only gets
|
// Now confirm that I can read from the reference and that it only gets
|
||||||
|
@ -173,26 +179,21 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testHFileLink() throws IOException {
|
public void testHFileLink() throws IOException {
|
||||||
final String columnFamily = "f";
|
final HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testHFileLinkTb"));
|
||||||
|
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
|
||||||
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
|
Configuration testConf = new Configuration(this.conf);
|
||||||
Configuration testConf = new Configuration(this.conf);
|
FSUtils.setRootDir(testConf, this.testDir);
|
||||||
FSUtils.setRootDir(testConf, this.testDir);
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
|
testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
|
||||||
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
|
|
||||||
Path storedir = new Path(new Path(this.testDir,
|
|
||||||
new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
|
|
||||||
|
|
||||||
// Make a store file and write data to it.
|
// Make a store file and write data to it.
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
|
||||||
this.fs, 8 * 1024)
|
.withFilePath(regionFs.createTempName())
|
||||||
.withOutputDir(storedir)
|
|
||||||
.build();
|
.build();
|
||||||
Path storeFilePath = writer.getPath();
|
|
||||||
writeStoreFile(writer);
|
writeStoreFile(writer);
|
||||||
writer.close();
|
|
||||||
|
|
||||||
Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
|
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
|
||||||
|
Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
|
||||||
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
|
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
|
||||||
Path linkFilePath = new Path(dstPath,
|
Path linkFilePath = new Path(dstPath,
|
||||||
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
|
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
|
||||||
|
@ -218,53 +219,49 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
* to hfilelinks (created by snapshot clones) can be properly interpreted.
|
* to hfilelinks (created by snapshot clones) can be properly interpreted.
|
||||||
*/
|
*/
|
||||||
public void testReferenceToHFileLink() throws IOException {
|
public void testReferenceToHFileLink() throws IOException {
|
||||||
final String columnFamily = "f";
|
// force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
|
||||||
|
Configuration testConf = new Configuration(this.conf);
|
||||||
|
FSUtils.setRootDir(testConf, this.testDir);
|
||||||
|
|
||||||
Path rootDir = FSUtils.getRootDir(conf);
|
// adding legal table name chars to verify regex handles it.
|
||||||
|
HRegionInfo hri = new HRegionInfo(Bytes.toBytes("_original-evil-name"));
|
||||||
String tablename = "_original-evil-name"; // adding legal table name chars to verify regex handles it.
|
HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename));
|
testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hri);
|
||||||
// store dir = <root>/<tablename>/<rgn>/<cf>
|
|
||||||
Path storedir = new Path(new Path(rootDir,
|
|
||||||
new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);
|
|
||||||
|
|
||||||
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
|
// Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
|
||||||
this.fs, 8 * 1024)
|
.withFilePath(regionFs.createTempName())
|
||||||
.withOutputDir(storedir)
|
|
||||||
.build();
|
.build();
|
||||||
Path storeFilePath = writer.getPath();
|
|
||||||
writeStoreFile(writer);
|
writeStoreFile(writer);
|
||||||
writer.close();
|
Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
|
||||||
|
|
||||||
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
|
// create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
|
||||||
String target = "clone";
|
HRegionInfo hriClone = new HRegionInfo(Bytes.toBytes("clone"));
|
||||||
Path dstPath = new Path(rootDir, new Path(new Path(target, "7e0102"), columnFamily));
|
HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
|
||||||
HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
|
testConf, fs, new Path(this.testDir, hri.getTableNameAsString()), hriClone);
|
||||||
|
Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
|
||||||
|
HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
|
||||||
Path linkFilePath = new Path(dstPath,
|
Path linkFilePath = new Path(dstPath,
|
||||||
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
|
HFileLink.createHFileLinkName(hri, storeFilePath.getName()));
|
||||||
|
|
||||||
// create splits of the link.
|
// create splits of the link.
|
||||||
// <root>/clone/splitA/<cf>/<reftohfilelink>,
|
// <root>/clone/splitA/<cf>/<reftohfilelink>,
|
||||||
// <root>/clone/splitB/<cf>/<reftohfilelink>
|
// <root>/clone/splitB/<cf>/<reftohfilelink>
|
||||||
Path splitDirA = new Path(new Path(rootDir,
|
HRegionInfo splitHriA = new HRegionInfo(hri.getTableName(), null, SPLITKEY);
|
||||||
new Path(target, "571A")), columnFamily);
|
HRegionInfo splitHriB = new HRegionInfo(hri.getTableName(), SPLITKEY, null);
|
||||||
Path splitDirB = new Path(new Path(rootDir,
|
StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE,
|
||||||
new Path(target, "571B")), columnFamily);
|
|
||||||
StoreFile f = new StoreFile(fs, linkFilePath, conf, cacheConf, BloomType.NONE,
|
|
||||||
NoOpDataBlockEncoder.INSTANCE);
|
NoOpDataBlockEncoder.INSTANCE);
|
||||||
byte[] splitRow = SPLITKEY;
|
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
|
||||||
Path pathA = StoreFile.split(fs, splitDirA, f, splitRow, true); // top
|
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
|
||||||
Path pathB = StoreFile.split(fs, splitDirB, f, splitRow, false); // bottom
|
|
||||||
|
|
||||||
// OK test the thing
|
// OK test the thing
|
||||||
FSUtils.logFileSystemState(fs, rootDir, LOG);
|
FSUtils.logFileSystemState(fs, this.testDir, LOG);
|
||||||
|
|
||||||
// There is a case where a file with the hfilelink pattern is actually a daughter
|
// There is a case where a file with the hfilelink pattern is actually a daughter
|
||||||
// reference to a hfile link. This code in StoreFile that handles this case.
|
// reference to a hfile link. This code in StoreFile that handles this case.
|
||||||
|
|
||||||
// Try to open store file from link
|
// Try to open store file from link
|
||||||
StoreFile hsfA = new StoreFile(this.fs, pathA, conf, cacheConf,
|
StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
|
||||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||||
|
|
||||||
// Now confirm that I can read from the ref to link
|
// Now confirm that I can read from the ref to link
|
||||||
|
@ -275,9 +272,9 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
assertTrue(count > 0); // read some rows here
|
assertTrue(count > 0); // read some rows here
|
||||||
|
|
||||||
// Try to open store file from link
|
// Try to open store file from link
|
||||||
StoreFile hsfB = new StoreFile(this.fs, pathB, conf, cacheConf,
|
StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
|
||||||
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
|
||||||
|
|
||||||
// Now confirm that I can read from the ref to link
|
// Now confirm that I can read from the ref to link
|
||||||
|
@ -291,31 +288,21 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
|
assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void checkHalfHFile(final StoreFile f)
|
private void checkHalfHFile(final HRegionFileSystem regionFs, final StoreFile f)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
byte [] midkey = f.createReader().midkey();
|
byte [] midkey = f.createReader().midkey();
|
||||||
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
|
KeyValue midKV = KeyValue.createKeyValueFromKey(midkey);
|
||||||
byte [] midRow = midKV.getRow();
|
byte [] midRow = midKV.getRow();
|
||||||
// Create top split.
|
// Create top split.
|
||||||
Path topDir = HStore.getStoreHomedir(this.testDir, "1",
|
HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), null, midRow);
|
||||||
Bytes.toBytes(f.getPath().getParent().getName()));
|
Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true);
|
||||||
if (this.fs.exists(topDir)) {
|
|
||||||
this.fs.delete(topDir, true);
|
|
||||||
}
|
|
||||||
Path topPath = StoreFile.split(this.fs, topDir, f, midRow, true);
|
|
||||||
// Create bottom split.
|
// Create bottom split.
|
||||||
Path bottomDir = HStore.getStoreHomedir(this.testDir, "2",
|
HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTableName(), midRow, null);
|
||||||
Bytes.toBytes(f.getPath().getParent().getName()));
|
Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
|
||||||
if (this.fs.exists(bottomDir)) {
|
|
||||||
this.fs.delete(bottomDir, true);
|
|
||||||
}
|
|
||||||
Path bottomPath = StoreFile.split(this.fs, bottomDir, f, midRow, false);
|
|
||||||
// Make readers on top and bottom.
|
// Make readers on top and bottom.
|
||||||
StoreFile.Reader top =
|
StoreFile.Reader top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
|
||||||
new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
|
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
|
||||||
StoreFile.Reader bottom = new StoreFile(this.fs, bottomPath,
|
|
||||||
conf, cacheConf, BloomType.NONE,
|
|
||||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||||
ByteBuffer previous = null;
|
ByteBuffer previous = null;
|
||||||
LOG.info("Midkey: " + midKV.toString());
|
LOG.info("Midkey: " + midKV.toString());
|
||||||
|
@ -329,7 +316,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
ByteBuffer key = null;
|
ByteBuffer key = null;
|
||||||
HFileScanner topScanner = top.getScanner(false, false);
|
HFileScanner topScanner = top.getScanner(false, false);
|
||||||
while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
|
while ((!topScanner.isSeeked() && topScanner.seekTo()) ||
|
||||||
(topScanner.isSeeked() && topScanner.next())) {
|
(topScanner.isSeeked() && topScanner.next())) {
|
||||||
key = topScanner.getKey();
|
key = topScanner.getKey();
|
||||||
|
|
||||||
if (topScanner.getReader().getComparator().compare(key.array(),
|
if (topScanner.getReader().getComparator().compare(key.array(),
|
||||||
|
@ -361,20 +348,19 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
|
LOG.info("Last in bottom: " + Bytes.toString(Bytes.toBytes(previous)));
|
||||||
}
|
}
|
||||||
// Remove references.
|
// Remove references.
|
||||||
this.fs.delete(topPath, false);
|
regionFs.cleanupDaughterRegion(topHri);
|
||||||
this.fs.delete(bottomPath, false);
|
regionFs.cleanupDaughterRegion(bottomHri);
|
||||||
|
|
||||||
// Next test using a midkey that does not exist in the file.
|
// Next test using a midkey that does not exist in the file.
|
||||||
// First, do a key that is < than first key. Ensure splits behave
|
// First, do a key that is < than first key. Ensure splits behave
|
||||||
// properly.
|
// properly.
|
||||||
byte [] badmidkey = Bytes.toBytes(" .");
|
byte [] badmidkey = Bytes.toBytes(" .");
|
||||||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
|
assertTrue(fs.exists(f.getPath()));
|
||||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
|
topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, badmidkey, true);
|
||||||
top = new StoreFile(this.fs, topPath, conf, cacheConf,
|
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
|
||||||
BloomType.NONE,
|
top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
|
||||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||||
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
|
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
|
||||||
BloomType.NONE,
|
|
||||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||||
bottomScanner = bottom.getScanner(false, false);
|
bottomScanner = bottom.getScanner(false, false);
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
@ -409,18 +395,16 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
assertTrue(tmp.charAt(i) == 'z');
|
assertTrue(tmp.charAt(i) == 'z');
|
||||||
}
|
}
|
||||||
// Remove references.
|
// Remove references.
|
||||||
this.fs.delete(topPath, false);
|
regionFs.cleanupDaughterRegion(topHri);
|
||||||
this.fs.delete(bottomPath, false);
|
regionFs.cleanupDaughterRegion(bottomHri);
|
||||||
|
|
||||||
// Test when badkey is > than last key in file ('||' > 'zz').
|
// Test when badkey is > than last key in file ('||' > 'zz').
|
||||||
badmidkey = Bytes.toBytes("|||");
|
badmidkey = Bytes.toBytes("|||");
|
||||||
topPath = StoreFile.split(this.fs, topDir, f, badmidkey, true);
|
topPath = splitStoreFile(regionFs,topHri, TEST_FAMILY, f, badmidkey, true);
|
||||||
bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, false);
|
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
|
||||||
top = new StoreFile(this.fs, topPath, conf, cacheConf,
|
top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE,
|
||||||
BloomType.NONE,
|
|
||||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||||
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
|
bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf, BloomType.NONE,
|
||||||
BloomType.NONE,
|
|
||||||
NoOpDataBlockEncoder.INSTANCE).createReader();
|
NoOpDataBlockEncoder.INSTANCE).createReader();
|
||||||
first = true;
|
first = true;
|
||||||
bottomScanner = bottom.getScanner(false, false);
|
bottomScanner = bottom.getScanner(false, false);
|
||||||
|
@ -463,10 +447,8 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
|
|
||||||
private static final String localFormatter = "%010d";
|
private static final String localFormatter = "%010d";
|
||||||
|
|
||||||
private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs)
|
private void bloomWriteRead(StoreFile.Writer writer, FileSystem fs) throws Exception {
|
||||||
throws Exception {
|
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
|
||||||
float err = conf.getFloat(
|
|
||||||
BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
|
|
||||||
Path f = writer.getPath();
|
Path f = writer.getPath();
|
||||||
long now = System.currentTimeMillis();
|
long now = System.currentTimeMillis();
|
||||||
for (int i = 0; i < 2000; i += 2) {
|
for (int i = 0; i < 2000; i += 2) {
|
||||||
|
@ -511,8 +493,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
|
|
||||||
public void testBloomFilter() throws Exception {
|
public void testBloomFilter() throws Exception {
|
||||||
FileSystem fs = FileSystem.getLocal(conf);
|
FileSystem fs = FileSystem.getLocal(conf);
|
||||||
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
|
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
|
||||||
(float) 0.01);
|
|
||||||
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
|
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
|
||||||
|
|
||||||
// write the file
|
// write the file
|
||||||
|
@ -530,11 +511,9 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
|
|
||||||
public void testDeleteFamilyBloomFilter() throws Exception {
|
public void testDeleteFamilyBloomFilter() throws Exception {
|
||||||
FileSystem fs = FileSystem.getLocal(conf);
|
FileSystem fs = FileSystem.getLocal(conf);
|
||||||
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
|
conf.setFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, (float) 0.01);
|
||||||
(float) 0.01);
|
|
||||||
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
|
conf.setBoolean(BloomFilterFactory.IO_STOREFILE_BLOOM_ENABLED, true);
|
||||||
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE,
|
float err = conf.getFloat(BloomFilterFactory.IO_STOREFILE_BLOOM_ERROR_RATE, 0);
|
||||||
0);
|
|
||||||
|
|
||||||
// write the file
|
// write the file
|
||||||
Path f = new Path(ROOT_DIR, getName());
|
Path f = new Path(ROOT_DIR, getName());
|
||||||
|
@ -624,9 +603,8 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
int versions = 2;
|
int versions = 2;
|
||||||
|
|
||||||
// run once using columns and once using rows
|
// run once using columns and once using rows
|
||||||
BloomType[] bt =
|
BloomType[] bt = {BloomType.ROWCOL, BloomType.ROW};
|
||||||
{BloomType.ROWCOL, BloomType.ROW};
|
int[] expKeys = {rowCount*colCount, rowCount};
|
||||||
int[] expKeys = {rowCount*colCount, rowCount};
|
|
||||||
// below line deserves commentary. it is expected bloom false positives
|
// below line deserves commentary. it is expected bloom false positives
|
||||||
// column = rowCount*2*colCount inserts
|
// column = rowCount*2*colCount inserts
|
||||||
// row-level = only rowCount*2 inserts, but failures will be magnified by
|
// row-level = only rowCount*2 inserts, but failures will be magnified by
|
||||||
|
@ -946,6 +924,15 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
assertEquals(startEvicted, cs.getEvictedCount());
|
assertEquals(startEvicted, cs.getEvictedCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Path splitStoreFile(final HRegionFileSystem regionFs, final HRegionInfo hri,
|
||||||
|
final String family, final StoreFile sf, final byte[] splitKey, boolean isTopRef)
|
||||||
|
throws IOException {
|
||||||
|
FileSystem fs = regionFs.getFileSystem();
|
||||||
|
Path path = regionFs.splitStoreFile(hri, family, sf, splitKey, isTopRef);
|
||||||
|
Path regionDir = regionFs.commitDaughterRegion(hri);
|
||||||
|
return new Path(new Path(regionDir, family), path.getName());
|
||||||
|
}
|
||||||
|
|
||||||
private StoreFile.Writer writeStoreFile(Configuration conf,
|
private StoreFile.Writer writeStoreFile(Configuration conf,
|
||||||
CacheConfig cacheConf, Path path, int numBlocks)
|
CacheConfig cacheConf, Path path, int numBlocks)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -961,8 +948,7 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
totalSize += kv.getLength() + 1;
|
totalSize += kv.getLength() + 1;
|
||||||
}
|
}
|
||||||
int blockSize = totalSize / numBlocks;
|
int blockSize = totalSize / numBlocks;
|
||||||
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs,
|
StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, fs, blockSize)
|
||||||
blockSize)
|
|
||||||
.withFilePath(path)
|
.withFilePath(path)
|
||||||
.withMaxKeyCount(2000)
|
.withMaxKeyCount(2000)
|
||||||
.withChecksumType(CKTYPE)
|
.withChecksumType(CKTYPE)
|
||||||
|
@ -1003,16 +989,14 @@ public class TestStoreFile extends HBaseTestCase {
|
||||||
.withBytesPerChecksum(CKBYTES)
|
.withBytesPerChecksum(CKBYTES)
|
||||||
.build();
|
.build();
|
||||||
writer.close();
|
writer.close();
|
||||||
|
|
||||||
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
|
StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
|
||||||
cacheConf, BloomType.NONE, dataBlockEncoder);
|
cacheConf, BloomType.NONE, dataBlockEncoder);
|
||||||
StoreFile.Reader reader = storeFile.createReader();
|
StoreFile.Reader reader = storeFile.createReader();
|
||||||
|
|
||||||
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
|
Map<byte[], byte[]> fileInfo = reader.loadFileInfo();
|
||||||
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
|
byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING);
|
||||||
|
|
||||||
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
|
assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -530,8 +530,7 @@ public class TestWALReplay {
|
||||||
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
for (HColumnDescriptor hcd: htd.getFamilies()) {
|
||||||
cf_count++;
|
cf_count++;
|
||||||
if (cf_count == 2) {
|
if (cf_count == 2) {
|
||||||
this.fs.delete(new Path(region.getRegionDir(), Bytes.toString(hcd.getName()))
|
region.getRegionFileSystem().deleteFamily(hcd.getNameAsString());
|
||||||
, true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
|
||||||
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
|
||||||
import org.apache.hadoop.hbase.io.HFileLink;
|
import org.apache.hadoop.hbase.io.HFileLink;
|
||||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
|
||||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
import org.apache.hadoop.hbase.util.FSTableDescriptors;
|
||||||
|
@ -156,19 +156,21 @@ public class TestRestoreSnapshotHelper {
|
||||||
private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd)
|
private void createSnapshot(final Path rootDir, final Path snapshotDir, final HTableDescriptor htd)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// First region, simple with one plain hfile.
|
// First region, simple with one plain hfile.
|
||||||
HRegion r0 = HRegion.createHRegion(new HRegionInfo(htd.getName()), archiveDir,
|
HRegionInfo hri = new HRegionInfo(htd.getName());
|
||||||
conf, htd, null, true, true);
|
HRegionFileSystem r0fs = HRegionFileSystem.createRegionOnFileSystem(conf,
|
||||||
Path storeFile = new Path(new Path(r0.getRegionDir(), TEST_FAMILY), TEST_HFILE);
|
fs, new Path(archiveDir, hri.getTableNameAsString()), hri);
|
||||||
|
Path storeFile = new Path(rootDir, TEST_HFILE);
|
||||||
fs.createNewFile(storeFile);
|
fs.createNewFile(storeFile);
|
||||||
r0.close();
|
r0fs.commitStoreFile(TEST_FAMILY, storeFile);
|
||||||
|
|
||||||
// Second region, used to test the split case.
|
// Second region, used to test the split case.
|
||||||
// This region contains a reference to the hfile in the first region.
|
// This region contains a reference to the hfile in the first region.
|
||||||
HRegion r1 = HRegion.createHRegion(new HRegionInfo(htd.getName()), archiveDir,
|
hri = new HRegionInfo(htd.getName());
|
||||||
conf, htd, null, true, true);
|
HRegionFileSystem r1fs = HRegionFileSystem.createRegionOnFileSystem(conf,
|
||||||
fs.createNewFile(new Path(new Path(r1.getRegionDir(), TEST_FAMILY),
|
fs, new Path(archiveDir, hri.getTableNameAsString()), hri);
|
||||||
storeFile.getName() + '.' + r0.getRegionInfo().getEncodedName()));
|
storeFile = new Path(rootDir, TEST_HFILE + '.' + r0fs.getRegionInfo().getEncodedName());
|
||||||
r1.close();
|
fs.createNewFile(storeFile);
|
||||||
|
r1fs.commitStoreFile(TEST_FAMILY, storeFile);
|
||||||
|
|
||||||
Path tableDir = HTableDescriptor.getTableDir(archiveDir, htd.getName());
|
Path tableDir = HTableDescriptor.getTableDir(archiveDir, htd.getName());
|
||||||
FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
|
FileUtil.copy(fs, tableDir, fs, snapshotDir, false, conf);
|
||||||
|
|
|
@ -213,7 +213,8 @@ public class HFileArchiveTestingUtil {
|
||||||
* @return {@link Path} to the archive directory for the given region
|
* @return {@link Path} to the archive directory for the given region
|
||||||
*/
|
*/
|
||||||
public static Path getRegionArchiveDir(Configuration conf, HRegion region) {
|
public static Path getRegionArchiveDir(Configuration conf, HRegion region) {
|
||||||
return HFileArchiveUtil.getRegionArchiveDir(conf, region.getTableDir(), region.getRegionDir());
|
return HFileArchiveUtil.getRegionArchiveDir(region.getRegionFileSystem().getTableDir(),
|
||||||
|
region.getRegionFileSystem().getRegionDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -225,9 +226,8 @@ public class HFileArchiveTestingUtil {
|
||||||
*/
|
*/
|
||||||
public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store)
|
public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HRegionInfo hri = region.getRegionInfo();
|
return HFileArchiveUtil.getStoreArchivePath(conf, region.getRegionInfo(),
|
||||||
return HFileArchiveUtil.getStoreArchivePath(conf, hri.getTableNameAsString(), hri,
|
region.getRegionFileSystem().getTableDir(), store.getFamily().getName());
|
||||||
store.getFamily().getNameAsString());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName,
|
public static Path getStoreArchivePath(HBaseTestingUtility util, String tableName,
|
||||||
|
|
|
@ -50,10 +50,9 @@ public class TestHFileArchiveUtil {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRegionArchiveDir() {
|
public void testRegionArchiveDir() {
|
||||||
Configuration conf = null;
|
|
||||||
Path tableDir = new Path("table");
|
Path tableDir = new Path("table");
|
||||||
Path regionDir = new Path("region");
|
Path regionDir = new Path("region");
|
||||||
assertNotNull(HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir));
|
assertNotNull(HFileArchiveUtil.getRegionArchiveDir(tableDir, regionDir));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
Loading…
Reference in New Issue