HBASE-1719 hold a reference to the region in stores instead of only the region info
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@799074 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
ea183fe272
commit
21d54c0c8c
|
@ -533,6 +533,8 @@ Release 0.20.0 - Unreleased
|
|||
HBASE-1683 OOME on master splitting logs; stuck, won't go down
|
||||
HBASE-1704 Better zk error when failed connect
|
||||
HBASE-1714 Thrift server: prefix scan API
|
||||
HBASE-1719 hold a reference to the region in stores instead of only the
|
||||
region info
|
||||
|
||||
OPTIMIZATIONS
|
||||
HBASE-1412 Change values for delete column and column family in KeyValue
|
||||
|
|
|
@ -69,7 +69,6 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
|||
//TODO: Move NO_HASH to HStoreFile which is really the only place it is used.
|
||||
public static final int NO_HASH = -1;
|
||||
private volatile int encodedName = NO_HASH;
|
||||
private boolean splitRequest = false;
|
||||
|
||||
private void setHashCode() {
|
||||
int result = Arrays.hashCode(this.regionName);
|
||||
|
@ -456,17 +455,6 @@ public class HRegionInfo extends VersionedWritable implements WritableComparable
|
|||
return Bytes.compareTo(this.endKey, o.endKey);
|
||||
}
|
||||
|
||||
/**
|
||||
* For internal use in forcing splits ahead of file size limit.
|
||||
* @param b
|
||||
* @return previous value
|
||||
*/
|
||||
public boolean shouldSplit(boolean b) {
|
||||
boolean old = this.splitRequest;
|
||||
this.splitRequest = b;
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Comparator to use comparing {@link KeyValue}s.
|
||||
*/
|
||||
|
|
|
@ -199,6 +199,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
new ReentrantReadWriteLock();
|
||||
private final Object splitLock = new Object();
|
||||
private long minSequenceId;
|
||||
private boolean splitRequest;
|
||||
|
||||
/**
|
||||
* Name of the region info file that resides just under the region directory.
|
||||
|
@ -1507,7 +1508,7 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
protected Store instantiateHStore(Path baseDir,
|
||||
HColumnDescriptor c, Path oldLogFile, Progressable reporter)
|
||||
throws IOException {
|
||||
return new Store(baseDir, this.regionInfo, c, this.fs, oldLogFile,
|
||||
return new Store(baseDir, this, c, this.fs, oldLogFile,
|
||||
this.conf, reporter);
|
||||
}
|
||||
|
||||
|
@ -2449,6 +2450,17 @@ public class HRegion implements HConstants, HeapSize { // , Writable{
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For internal use in forcing splits ahead of file size limit.
|
||||
* @param b
|
||||
* @return previous value
|
||||
*/
|
||||
public boolean shouldSplit(boolean b) {
|
||||
boolean old = this.splitRequest;
|
||||
this.splitRequest = b;
|
||||
return old;
|
||||
}
|
||||
|
||||
/**
|
||||
* Facility for dumping and compacting catalog tables.
|
||||
* Only does catalog tables since these are only tables we for sure know
|
||||
|
|
|
@ -1482,7 +1482,7 @@ public class HRegionServer implements HConstants, HRegionInterface,
|
|||
case MSG_REGION_SPLIT:
|
||||
region = getRegion(info.getRegionName());
|
||||
region.flushcache();
|
||||
region.regionInfo.shouldSplit(true);
|
||||
region.shouldSplit(true);
|
||||
// force a compaction; split will be side-effect.
|
||||
compactSplitThread.compactionRequested(region,
|
||||
e.msg.getType().name());
|
||||
|
|
|
@ -95,7 +95,7 @@ public class Store implements HConstants, HeapSize {
|
|||
protected final MemStore memstore;
|
||||
// This stores directory in the filesystem.
|
||||
private final Path homedir;
|
||||
private final HRegionInfo regioninfo;
|
||||
private final HRegion region;
|
||||
private final HColumnDescriptor family;
|
||||
final FileSystem fs;
|
||||
private final HBaseConfiguration conf;
|
||||
|
@ -154,13 +154,14 @@ public class Store implements HConstants, HeapSize {
|
|||
* failed. Can be null.
|
||||
* @throws IOException
|
||||
*/
|
||||
protected Store(Path basedir, HRegionInfo info, HColumnDescriptor family,
|
||||
protected Store(Path basedir, HRegion region, HColumnDescriptor family,
|
||||
FileSystem fs, Path reconstructionLog, HBaseConfiguration conf,
|
||||
final Progressable reporter)
|
||||
throws IOException {
|
||||
HRegionInfo info = region.regionInfo;
|
||||
this.homedir = getStoreHomedir(basedir, info.getEncodedName(),
|
||||
family.getName());
|
||||
this.regioninfo = info;
|
||||
this.region = region;
|
||||
this.family = family;
|
||||
this.fs = fs;
|
||||
this.conf = conf;
|
||||
|
@ -317,7 +318,7 @@ public class Store implements HConstants, HeapSize {
|
|||
// METACOLUMN info such as HBASE::CACHEFLUSH entries
|
||||
if (/* commented out for now - stack via jgray key.isTransactionEntry() || */
|
||||
val.matchingFamily(HLog.METAFAMILY) ||
|
||||
!Bytes.equals(key.getRegionName(), regioninfo.getRegionName()) ||
|
||||
!Bytes.equals(key.getRegionName(), region.regionInfo.getRegionName()) ||
|
||||
!val.matchingFamily(family.getName())) {
|
||||
continue;
|
||||
}
|
||||
|
@ -536,7 +537,7 @@ public class Store implements HConstants, HeapSize {
|
|||
", sequenceid=" + logCacheFlushId +
|
||||
", memsize=" + StringUtils.humanReadableInt(flushed) +
|
||||
", filesize=" + StringUtils.humanReadableInt(r.length()) +
|
||||
" to " + this.regioninfo.getRegionNameAsString());
|
||||
" to " + this.region.regionInfo.getRegionNameAsString());
|
||||
}
|
||||
return sf;
|
||||
}
|
||||
|
@ -637,7 +638,7 @@ public class Store implements HConstants, HeapSize {
|
|||
* @throws IOException
|
||||
*/
|
||||
StoreSize compact(final boolean mc) throws IOException {
|
||||
boolean forceSplit = this.regioninfo.shouldSplit(false);
|
||||
boolean forceSplit = this.region.shouldSplit(false);
|
||||
boolean majorcompaction = mc;
|
||||
synchronized (compactLock) {
|
||||
// filesToCompact are sorted oldest to newest.
|
||||
|
@ -868,13 +869,17 @@ public class Store implements HConstants, HeapSize {
|
|||
more = scanner.next(kvs);
|
||||
// output to writer:
|
||||
for (KeyValue kv : kvs) {
|
||||
if (writer == null) writer = getWriter(this.regionCompactionDir);
|
||||
if (writer == null) {
|
||||
writer = getWriter(this.regionCompactionDir);
|
||||
}
|
||||
writer.append(kv);
|
||||
}
|
||||
kvs.clear();
|
||||
}
|
||||
} finally {
|
||||
if (scanner != null) scanner.close();
|
||||
if (scanner != null) {
|
||||
scanner.close();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
MinorCompactingStoreScanner scanner = null;
|
||||
|
@ -1473,8 +1478,12 @@ public class Store implements HConstants, HeapSize {
|
|||
}
|
||||
}
|
||||
|
||||
HRegion getHRegion() {
|
||||
return this.region;
|
||||
}
|
||||
|
||||
HRegionInfo getHRegionInfo() {
|
||||
return this.regioninfo;
|
||||
return this.region.regionInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -70,17 +70,22 @@ public class TestStore extends TestCase {
|
|||
private void init(String methodName) throws IOException {
|
||||
//Setting up a Store
|
||||
Path basedir = new Path(DIR+methodName);
|
||||
Path logdir = new Path(DIR+methodName+"/logs");
|
||||
HColumnDescriptor hcd = new HColumnDescriptor(family);
|
||||
HBaseConfiguration conf = new HBaseConfiguration();
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
Path reconstructionLog = null;
|
||||
Progressable reporter = null;
|
||||
|
||||
fs.delete(logdir, true);
|
||||
|
||||
HTableDescriptor htd = new HTableDescriptor(table);
|
||||
htd.addFamily(hcd);
|
||||
HRegionInfo info = new HRegionInfo(htd, null, null, false);
|
||||
HLog hlog = new HLog(fs, logdir, conf, null);
|
||||
HRegion region = new HRegion(basedir, hlog, fs, conf, info, null);
|
||||
|
||||
store = new Store(basedir, info, hcd, fs, reconstructionLog, conf,
|
||||
store = new Store(basedir, region, hcd, fs, reconstructionLog, conf,
|
||||
reporter);
|
||||
}
|
||||
|
||||
|
@ -112,7 +117,7 @@ public class TestStore extends TestCase {
|
|||
this.store.close();
|
||||
// Reopen it... should pick up two files
|
||||
this.store = new Store(storedir.getParent().getParent(),
|
||||
this.store.getHRegionInfo(),
|
||||
this.store.getHRegion(),
|
||||
this.store.getFamily(), fs, null, c, null);
|
||||
System.out.println(this.store.getHRegionInfo().getEncodedName());
|
||||
assertEquals(2, this.store.getStorefilesCount());
|
||||
|
|
Loading…
Reference in New Issue