HBASE-1795 log recovery doesnt reset the max sequence id, new logfiles can
get tossed as 'duplicates' HBASE-1794 recovered log files are not inserted into the storefile map git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@814003 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
91805d7928
commit
cfef20aa18
|
@ -14,6 +14,9 @@ Release 0.21.0 - Unreleased
|
||||||
HBASE-1779 ThriftServer logged error if getVer() result is empty
|
HBASE-1779 ThriftServer logged error if getVer() result is empty
|
||||||
HBASE-1778 Improve PerformanceEvaluation (Schubert Zhang via Stack)
|
HBASE-1778 Improve PerformanceEvaluation (Schubert Zhang via Stack)
|
||||||
HBASE-1751 Fix KeyValue javadoc on getValue for client-side
|
HBASE-1751 Fix KeyValue javadoc on getValue for client-side
|
||||||
|
HBASE-1795 log recovery doesnt reset the max sequence id, new logfiles can
|
||||||
|
get tossed as 'duplicates'
|
||||||
|
HBASE-1794 recovered log files are not inserted into the storefile map
|
||||||
HBASE-1824 [stargate] default timestamp should be LATEST_TIMESTAMP
|
HBASE-1824 [stargate] default timestamp should be LATEST_TIMESTAMP
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
@ -35,12 +38,8 @@ Release 0.21.0 - Unreleased
|
||||||
HBASE-1800 Too many ZK connections
|
HBASE-1800 Too many ZK connections
|
||||||
HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1
|
HBASE-1819 Update to 0.20.1 hadoop and zk 3.2.1
|
||||||
HBASE-1820 Update jruby from 1.2 to 1.3.1
|
HBASE-1820 Update jruby from 1.2 to 1.3.1
|
||||||
HBASE-1722 Add support for exporting HBase metrics via JMX
|
|
||||||
HBASE-1825 code cleanup, hmaster split debug logs
|
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
HBASE-1765 Delay Result deserialization until asked for and permit
|
|
||||||
access to the raw binary to prevent forced deserialization
|
|
||||||
|
|
||||||
|
|
||||||
Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
Release 0.20.0 - Tue Sep 8 12:53:05 PDT 2009
|
||||||
|
|
|
@ -217,7 +217,10 @@ public class Store implements HConstants, HeapSize {
|
||||||
this.storefiles.putAll(loadStoreFiles());
|
this.storefiles.putAll(loadStoreFiles());
|
||||||
|
|
||||||
// Do reconstruction log.
|
// Do reconstruction log.
|
||||||
runReconstructionLog(reconstructionLog, this.maxSeqId, reporter);
|
long newId = runReconstructionLog(reconstructionLog, this.maxSeqId, reporter);
|
||||||
|
if (newId != -1) {
|
||||||
|
this.maxSeqId = newId; // start with the log id we just recovered.
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
HColumnDescriptor getFamily() {
|
HColumnDescriptor getFamily() {
|
||||||
|
@ -245,13 +248,14 @@ public class Store implements HConstants, HeapSize {
|
||||||
* @param reconstructionLog
|
* @param reconstructionLog
|
||||||
* @param msid
|
* @param msid
|
||||||
* @param reporter
|
* @param reporter
|
||||||
|
* @return the new max sequence id as per the log
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void runReconstructionLog(final Path reconstructionLog,
|
private long runReconstructionLog(final Path reconstructionLog,
|
||||||
final long msid, final Progressable reporter)
|
final long msid, final Progressable reporter)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
try {
|
try {
|
||||||
doReconstructionLog(reconstructionLog, msid, reporter);
|
return doReconstructionLog(reconstructionLog, msid, reporter);
|
||||||
} catch (EOFException e) {
|
} catch (EOFException e) {
|
||||||
// Presume we got here because of lack of HADOOP-1700; for now keep going
|
// Presume we got here because of lack of HADOOP-1700; for now keep going
|
||||||
// but this is probably not what we want long term. If we got here there
|
// but this is probably not what we want long term. If we got here there
|
||||||
|
@ -268,6 +272,7 @@ public class Store implements HConstants, HeapSize {
|
||||||
" opening " + Bytes.toString(this.storeName), e);
|
" opening " + Bytes.toString(this.storeName), e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -277,20 +282,22 @@ public class Store implements HConstants, HeapSize {
|
||||||
* We can ignore any log message that has a sequence ID that's equal to or
|
* We can ignore any log message that has a sequence ID that's equal to or
|
||||||
* lower than maxSeqID. (Because we know such log messages are already
|
* lower than maxSeqID. (Because we know such log messages are already
|
||||||
* reflected in the MapFiles.)
|
* reflected in the MapFiles.)
|
||||||
|
*
|
||||||
|
* @return the new max sequence id as per the log, or -1 if no log recovered
|
||||||
*/
|
*/
|
||||||
private void doReconstructionLog(final Path reconstructionLog,
|
private long doReconstructionLog(final Path reconstructionLog,
|
||||||
final long maxSeqID, final Progressable reporter)
|
final long maxSeqID, final Progressable reporter)
|
||||||
throws UnsupportedEncodingException, IOException {
|
throws UnsupportedEncodingException, IOException {
|
||||||
if (reconstructionLog == null || !this.fs.exists(reconstructionLog)) {
|
if (reconstructionLog == null || !this.fs.exists(reconstructionLog)) {
|
||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
// Check its not empty.
|
// Check its not empty.
|
||||||
FileStatus [] stats = this.fs.listStatus(reconstructionLog);
|
FileStatus [] stats = this.fs.listStatus(reconstructionLog);
|
||||||
if (stats == null || stats.length == 0) {
|
if (stats == null || stats.length == 0) {
|
||||||
LOG.warn("Passed reconstruction log " + reconstructionLog +
|
LOG.warn("Passed reconstruction log " + reconstructionLog +
|
||||||
" is zero-length");
|
" is zero-length");
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
// TODO: This could grow large and blow heap out. Need to get it into
|
// TODO: This could grow large and blow heap out. Need to get it into
|
||||||
// general memory usage accounting.
|
// general memory usage accounting.
|
||||||
|
@ -352,8 +359,21 @@ public class Store implements HConstants, HeapSize {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("flushing reconstructionCache");
|
LOG.debug("flushing reconstructionCache");
|
||||||
}
|
}
|
||||||
internalFlushCache(reconstructedCache, maxSeqIdInLog + 1);
|
|
||||||
|
long newFileSeqNo = maxSeqIdInLog + 1;
|
||||||
|
StoreFile sf = internalFlushCache(reconstructedCache, newFileSeqNo);
|
||||||
|
// add it to the list of store files with maxSeqIdInLog+1
|
||||||
|
if (sf == null) {
|
||||||
|
throw new IOException("Flush failed with a null store file");
|
||||||
|
}
|
||||||
|
// Add new file to store files. Clear snapshot too while we have the
|
||||||
|
// Store write lock.
|
||||||
|
this.storefiles.put(newFileSeqNo, sf);
|
||||||
|
notifyChangedReadersObservers();
|
||||||
|
|
||||||
|
return newFileSeqNo;
|
||||||
}
|
}
|
||||||
|
return -1; // the reconstructed cache was 0 sized
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue