HBASE-550 EOF trying to read reconstruction log stops region deployment

M  src/java/org/apache/hadoop/hbase/HStore.java
    (Constructor) If an exception out of reconstructionLog method, log it and
    keep going.  Presumption is that its result of a lack of HADOOP--1700.
    (reconstructionLog): Check for empty log file.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@643110 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2008-03-31 19:26:47 +00:00
parent 7f0818ede2
commit 53a6abfe6e
2 changed files with 20 additions and 7 deletions

View File

@ -105,6 +105,7 @@ Hbase Change Log
HBASE-529 RegionServer needs to recover if datanode goes down HBASE-529 RegionServer needs to recover if datanode goes down
HBASE-456 Clearly state which ports need to be opened in order to run HBase HBASE-456 Clearly state which ports need to be opened in order to run HBase
HBASE-536 Remove MiniDFS startup from MiniHBaseCluster HBASE-536 Remove MiniDFS startup from MiniHBaseCluster
HBASE-521 Improve client scanner interface
Branch 0.1 Branch 0.1

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.filter.RowFilterInterface; import org.apache.hadoop.hbase.filter.RowFilterInterface;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@ -206,10 +207,7 @@ public class HStore implements HConstants {
} }
if(LOG.isDebugEnabled()) { if(LOG.isDebugEnabled()) {
LOG.debug("starting " + storeName + LOG.debug("starting " + storeName);
((reconstructionLog == null || !fs.exists(reconstructionLog)) ?
" (no reconstruction log)" :
" with reconstruction log: " + reconstructionLog.toString()));
} }
// Go through the 'mapdir' and 'infodir' together, make sure that all // Go through the 'mapdir' and 'infodir' together, make sure that all
@ -236,7 +234,16 @@ public class HStore implements HConstants {
this.maxSeqId); this.maxSeqId);
} }
doReconstructionLog(reconstructionLog, maxSeqId); try {
doReconstructionLog(reconstructionLog, maxSeqId);
} catch (IOException e) {
// Presume we got here because of some HDFS issue or because of a lack of
// HADOOP-1700; for now keep going but this is probably not what we want
// long term. If we got here there has been data-loss
LOG.warn("Exception processing reconstruction log " + reconstructionLog +
" opening " + this.storeName +
" -- continuing. Probably DATA LOSS!", e);
}
// By default, we compact if an HStore has more than // By default, we compact if an HStore has more than
// MIN_COMMITS_FOR_COMPACTION map files // MIN_COMMITS_FOR_COMPACTION map files
@ -303,11 +310,16 @@ public class HStore implements HConstants {
private void doReconstructionLog(final Path reconstructionLog, private void doReconstructionLog(final Path reconstructionLog,
final long maxSeqID) final long maxSeqID)
throws UnsupportedEncodingException, IOException { throws UnsupportedEncodingException, IOException {
if (reconstructionLog == null || !fs.exists(reconstructionLog)) { if (reconstructionLog == null || !fs.exists(reconstructionLog)) {
// Nothing to do. // Nothing to do.
return; return;
} }
// Check its not empty.
FileStatus[] stats = fs.listStatus(reconstructionLog);
if (stats == null || stats.length == 0) {
LOG.warn("Passed reconstruction log " + reconstructionLog + " is zero-length");
return;
}
long maxSeqIdInLog = -1; long maxSeqIdInLog = -1;
TreeMap<HStoreKey, byte []> reconstructedCache = TreeMap<HStoreKey, byte []> reconstructedCache =
new TreeMap<HStoreKey, byte []>(); new TreeMap<HStoreKey, byte []>();
@ -1691,4 +1703,4 @@ public class HStore implements HConstants {
} }
} }
} }
} }