HBASE-2869 Regularize how we log sequenceids -- sometimes its myseqid, other times its sequence id, etc.

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@966907 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2010-07-22 23:51:27 +00:00
parent 65b09b7cd5
commit 6dddd674cf
4 changed files with 15 additions and 16 deletions

View File

@ -793,6 +793,8 @@ Release 0.21.0 - Unreleased
and hadoop/avro 1.4.x and hadoop/avro 1.4.x
HBASE-2865 Cleanup of LRU logging; its hard to read, uses custom MB'maker, HBASE-2865 Cleanup of LRU logging; its hard to read, uses custom MB'maker,
repeats info, too many numbers after the point, etc. repeats info, too many numbers after the point, etc.
HBASE-2869 Regularize how we log sequenceids -- sometimes its myseqid,
other times its sequence id, etc.
NEW FEATURES NEW FEATURES
HBASE-1961 HBase EC2 scripts HBASE-1961 HBase EC2 scripts

View File

@ -957,7 +957,7 @@ public class HRegion implements HeapSize { // , Writable{
LOG.debug("Started memstore flush for region " + this + LOG.debug("Started memstore flush for region " + this +
". Current region memstore size " + ". Current region memstore size " +
StringUtils.humanReadableInt(this.memstoreSize.get()) + StringUtils.humanReadableInt(this.memstoreSize.get()) +
((wal != null)? "": "; wal is null, using passed myseqid=" + myseqid)); ((wal != null)? "": "; wal is null, using passed sequenceid=" + myseqid));
} }
// Stop updates while we snapshot the memstore of all stores. We only have // Stop updates while we snapshot the memstore of all stores. We only have
@ -1080,7 +1080,7 @@ public class HRegion implements HeapSize { // , Writable{
long now = EnvironmentEdgeManager.currentTimeMillis(); long now = EnvironmentEdgeManager.currentTimeMillis();
LOG.info("Finished memstore flush of ~" + LOG.info("Finished memstore flush of ~" +
StringUtils.humanReadableInt(currentMemStoreSize) + " for region " + StringUtils.humanReadableInt(currentMemStoreSize) + " for region " +
this + " in " + (now - startTime) + "ms, sequence id=" + sequenceId + this + " in " + (now - startTime) + "ms, sequenceid=" + sequenceId +
", compaction requested=" + compactionRequested + ", compaction requested=" + compactionRequested +
((wal == null)? "; wal=null": "")); ((wal == null)? "; wal=null": ""));
} }
@ -1950,7 +1950,7 @@ public class HRegion implements HeapSize { // , Writable{
private long replayRecoveredEdits(final Path edits, private long replayRecoveredEdits(final Path edits,
final long minSeqId, final Progressable reporter) final long minSeqId, final Progressable reporter)
throws IOException { throws IOException {
LOG.info("Replaying edits from " + edits + "; minSeqId=" + minSeqId); LOG.info("Replaying edits from " + edits + "; minSequenceid=" + minSeqId);
HLog.Reader reader = HLog.getReader(this.fs, edits, conf); HLog.Reader reader = HLog.getReader(this.fs, edits, conf);
try { try {
return replayRecoveredEdits(reader, minSeqId, reporter); return replayRecoveredEdits(reader, minSeqId, reporter);
@ -2026,8 +2026,8 @@ public class HRegion implements HeapSize { // , Writable{
} }
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Applied " + editsCount + ", skipped " + skippedEdits + LOG.debug("Applied " + editsCount + ", skipped " + skippedEdits +
", firstSeqIdInLog=" + firstSeqIdInLog + ", firstSequenceidInLog=" + firstSeqIdInLog +
", maxSeqIdInLog=" + currentEditSeqId); ", maxSequenceidInLog=" + currentEditSeqId);
} }
return currentEditSeqId; return currentEditSeqId;
} }
@ -2573,9 +2573,8 @@ public class HRegion implements HeapSize { // , Writable{
HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()), HTableDescriptor.getTableDir(rootDir, info.getTableDesc().getName()),
log, FileSystem.get(conf), conf, info, null); log, FileSystem.get(conf), conf, info, null);
long seqid = r.initialize(); long seqid = r.initialize();
if (log != null) { // If seqid > current wal seqid, the wal seqid is updated.
log.setSequenceNumber(seqid); if (log != null) log.setSequenceNumber(seqid);
}
return r; return r;
} }

View File

@ -1492,10 +1492,8 @@ public class HRegionServer implements HRegionInterface,
addProcessingMessage(regionInfo); addProcessingMessage(regionInfo);
} }
}); });
// If a wal and its seqid is < that of new region, use new regions seqid. // If seqid > current wal seqid, the wal seqid is updated.
if (wal != null) { if (wal != null) wal.setSequenceNumber(seqid);
if (seqid > wal.getSequenceNumber()) wal.setSequenceNumber(seqid);
}
return r; return r;
} }

View File

@ -397,7 +397,7 @@ public class HLog implements Syncable {
!this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) { !this.logSeqNum.compareAndSet(id, newvalue); id = this.logSeqNum.get()) {
// This could spin on occasion but better the occasional spin than locking // This could spin on occasion but better the occasional spin than locking
// every increment of sequence number. // every increment of sequence number.
LOG.debug("Change sequence number from " + logSeqNum + " to " + newvalue); LOG.debug("Changed sequenceid from " + logSeqNum + " to " + newvalue);
} }
} }
@ -475,7 +475,7 @@ public class HLog implements Syncable {
// Can we delete any of the old log files? // Can we delete any of the old log files?
if (this.outputfiles.size() > 0) { if (this.outputfiles.size() > 0) {
if (this.lastSeqWritten.size() <= 0) { if (this.lastSeqWritten.size() <= 0) {
LOG.debug("Last sequence written is empty. Deleting all old hlogs"); LOG.debug("Last sequenceid written is empty. Deleting all old hlogs");
// If so, then no new writes have come in since all regions were // If so, then no new writes have come in since all regions were
// flushed (and removed from the lastSeqWritten map). Means can // flushed (and removed from the lastSeqWritten map). Means can
// remove all but currently open log file. // remove all but currently open log file.
@ -569,7 +569,7 @@ public class HLog implements Syncable {
byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum); byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
LOG.debug("Found " + logsToRemove + " hlogs to remove " + LOG.debug("Found " + logsToRemove + " hlogs to remove " +
" out of total " + this.outputfiles.size() + "; " + " out of total " + this.outputfiles.size() + "; " +
"oldest outstanding seqnum is " + oldestOutstandingSeqNum + "oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
" from region " + Bytes.toString(oldestRegion)); " from region " + Bytes.toString(oldestRegion));
} }
for (Long seq : sequenceNumbers) { for (Long seq : sequenceNumbers) {
@ -669,7 +669,7 @@ public class HLog implements Syncable {
private void archiveLogFile(final Path p, final Long seqno) throws IOException { private void archiveLogFile(final Path p, final Long seqno) throws IOException {
Path newPath = getHLogArchivePath(this.oldLogDir, p); Path newPath = getHLogArchivePath(this.oldLogDir, p);
LOG.info("moving old hlog file " + FSUtils.getPath(p) + LOG.info("moving old hlog file " + FSUtils.getPath(p) +
" whose highest sequence/edit id is " + seqno + " to " + " whose highest sequenceid is " + seqno + " to " +
FSUtils.getPath(newPath)); FSUtils.getPath(newPath));
this.fs.rename(p, newPath); this.fs.rename(p, newPath);
} }