HADOOP-2029 TestLogRolling fails too often in patch and nightlies

3rd commit against this issue.


git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk/src/contrib/hbase@584286 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2007-10-12 21:58:12 +00:00
parent 8faa2ea9c6
commit 920c6b2021
3 changed files with 38 additions and 25 deletions

View File

@ -223,7 +223,8 @@ public class HLog implements HConstants {
* sequence numbers are always greater than the latest sequence number of the
* region being brought on-line.
*
* @param newvalue
* @param newvalue We'll set log edit/sequence number to this value if it
* is greater than the current value.
*/
void setSequenceNumber(long newvalue) {
synchronized (sequenceLock) {
@ -317,8 +318,19 @@ public class HLog implements HConstants {
sequenceNumbers.addAll(this.outputfiles.headMap(
Long.valueOf(oldestOutstandingSeqNum)).keySet());
// Now remove old log files (if any)
LOG.debug("Found " + sequenceNumbers.size() + " logs to remove " +
"using oldest outstanding seqnum of " + oldestOutstandingSeqNum);
if (LOG.isDebugEnabled()) {
// Find region associated with oldest key -- helps debugging.
Text oldestRegion = null;
for (Map.Entry<Text, Long> e: this.lastSeqWritten.entrySet()) {
if (e.getValue().longValue() == oldestOutstandingSeqNum) {
oldestRegion = e.getKey();
break;
}
}
LOG.debug("Found " + sequenceNumbers.size() + " logs to remove " +
"using oldest outstanding seqnum of " + oldestOutstandingSeqNum +
" from region " + oldestRegion);
}
if (sequenceNumbers.size() > 0) {
for (Long seq : sequenceNumbers) {
deleteLogFile(this.outputfiles.remove(seq), seq);

View File

@ -213,7 +213,7 @@ public class HRegion implements HConstants {
protected final int optionalFlushCount;
private final HLocking lock = new HLocking();
private long desiredMaxFileSize;
private final long maxSequenceId;
private final long minSequenceId;
//////////////////////////////////////////////////////////////////////////////
// Constructor
@ -278,10 +278,10 @@ public class HRegion implements HConstants {
maxSeqId = storeSeqId;
}
}
this.maxSequenceId = maxSeqId;
this.minSequenceId = maxSeqId;
if (LOG.isDebugEnabled()) {
LOG.debug("maximum sequence id for region " + regionInfo.getRegionName() +
" is " + this.maxSequenceId);
LOG.debug("Next sequence id for region " + regionInfo.getRegionName() +
" is " + this.minSequenceId);
}
// Get rid of any splits or merges that were lost in-progress
@ -309,8 +309,12 @@ public class HRegion implements HConstants {
LOG.info("region " + this.regionInfo.regionName + " available");
}
long getMaxSequenceId() {
return this.maxSequenceId;
/**
* @return Updates to this region need to have a sequence id that is >= to
* the this number.
*/
long getMinSequenceId() {
return this.minSequenceId;
}
/** Returns a HRegionInfo object for this region */
@ -407,8 +411,8 @@ public class HRegion implements HConstants {
* @throws IOException
*/
HRegion[] closeAndSplit(final Text midKey,
final RegionUnavailableListener listener) throws IOException {
final RegionUnavailableListener listener)
throws IOException {
checkMidKey(midKey);
long startTime = System.currentTimeMillis();
Path splits = getSplitsDir();
@ -432,7 +436,6 @@ public class HRegion implements HConstants {
listener.closing(getRegionName());
}
// Now close the HRegion. Close returns all store files or null if not
// supposed to close (? What to do in this case? Implement abort of close?)
// Close also does wait on outstanding rows and calls a flush just-in-case.
@ -736,7 +739,6 @@ public class HRegion implements HConstants {
LOG.info("Optional flush called " + this.noFlushCount +
" times when data present without flushing. Forcing one.");
flushcache(false);
} else {
// Only increment if something in the cache.
// Gets zero'd when a flushcache is called.
@ -1127,15 +1129,14 @@ public class HRegion implements HConstants {
// will be extremely rare; we'll deal with it when it happens.
checkResources();
if (this.closed.get()) {
throw new IOException("Region " + this.getRegionName().toString() +
" closed");
}
// Get a read lock. We will not be able to get one if we are closing or
// if this region is being split. In neither case should we be allowing
// updates.
this.lock.obtainReadLock();
if (this.closed.get()) {
throw new IOException("Region " + this.getRegionName().toString() +
" closed");
}
try {
// We obtain a per-row lock, so other clients will block while one client
// performs an update. The read lock is released by the client calling
@ -1179,8 +1180,8 @@ public class HRegion implements HConstants {
}
}
if (blocked) {
LOG.info("Unblocking updates for '" + Thread.currentThread().getName() +
"'");
LOG.info("Unblocking updates for region " + getRegionName() + " '" +
Thread.currentThread().getName() + "'");
}
}

View File

@ -926,14 +926,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
}
}
void openRegion(HRegionInfo regionInfo) throws IOException {
void openRegion(final HRegionInfo regionInfo) throws IOException {
HRegion region = onlineRegions.get(regionInfo.regionName);
if(region == null) {
region = new HRegion(new Path(this.conf.get(HConstants.HBASE_DIR)),
this.log, FileSystem.get(conf), conf, regionInfo, null);
this.lock.writeLock().lock();
try {
this.log.setSequenceNumber(region.getMaxSequenceId());
this.log.setSequenceNumber(region.getMinSequenceId());
this.onlineRegions.put(region.getRegionName(), region);
} finally {
this.lock.writeLock().unlock();
@ -1238,7 +1238,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
//
protected long startUpdate(Text regionName, Text row) throws IOException {
HRegion region = getRegion(regionName, false);
HRegion region = getRegion(regionName);
return region.startUpdate(row);
}