HBASE-3198 Log rolling archives files prematurely

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1031861 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2010-11-05 22:28:59 +00:00
parent ed119ae20d
commit 7b1950ac75
3 changed files with 66 additions and 7 deletions

View File

@ -652,6 +652,7 @@ Release 0.21.0 - Unreleased
(Alex Newman via Stack)
HBASE-3202 Closing a region, if we get a ConnectException, handle
it rather than abort
HBASE-3198 Log rolling archives files prematurely
IMPROVEMENTS

View File

@ -614,20 +614,20 @@ public class HLog implements Syncable {
*/
private byte [][] cleanOldLogs() throws IOException {
Long oldestOutstandingSeqNum = getOldestOutstandingSeqNum();
// Get the set of all log files whose final ID is older than or
// equal to the oldest pending region operation
// Get the set of all log files whose last sequence number is smaller than
// the oldest edit's sequence number.
TreeSet<Long> sequenceNumbers =
new TreeSet<Long>(this.outputfiles.headMap(
(Long.valueOf(oldestOutstandingSeqNum.longValue() + 1L))).keySet());
(Long.valueOf(oldestOutstandingSeqNum.longValue()))).keySet());
// Now remove old log files (if any)
int logsToRemove = sequenceNumbers.size();
if (logsToRemove > 0) {
if (LOG.isDebugEnabled()) {
// Find associated region; helps debugging.
byte [] oldestRegion = getOldestRegion(oldestOutstandingSeqNum);
LOG.debug("Found " + logsToRemove + " hlogs to remove " +
" out of total " + this.outputfiles.size() + "; " +
"oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
LOG.debug("Found " + logsToRemove + " hlogs to remove" +
" out of total " + this.outputfiles.size() + ";" +
" oldest outstanding sequenceid is " + oldestOutstandingSeqNum +
" from region " + Bytes.toString(oldestRegion));
}
for (Long seq : sequenceNumbers) {
@ -724,7 +724,7 @@ public class HLog implements Syncable {
}
if (currentfilenum >= 0) {
oldFile = computeFilename(currentfilenum);
this.outputfiles.put(Long.valueOf(this.logSeqNum.get() - 1), oldFile);
this.outputfiles.put(Long.valueOf(this.logSeqNum.get()), oldFile);
}
}
return oldFile;

View File

@ -591,6 +591,64 @@ public class TestHLog {
assertEquals(COL_COUNT, visitor.increments);
}
@Test
public void testLogCleaning() throws Exception {
LOG.info("testLogCleaning");
final byte [] tableName = Bytes.toBytes("testLogCleaning");
final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
HLog log = new HLog(fs, dir, oldLogDir, conf);
HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
HRegionInfo hri2 = new HRegionInfo(new HTableDescriptor(tableName),
HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
// Add a single edit and make sure that rolling won't remove the file
// Before HBASE-3198 it used to delete it
addEdits(log, hri, tableName, 1);
log.rollWriter();
assertEquals(1, log.getNumLogFiles());
// See if there's anything wrong with more than 1 edit
addEdits(log, hri, tableName, 2);
log.rollWriter();
assertEquals(2, log.getNumLogFiles());
// Now mix edits from 2 regions, still no flushing
addEdits(log, hri, tableName, 1);
addEdits(log, hri2, tableName2, 1);
addEdits(log, hri, tableName, 1);
addEdits(log, hri2, tableName2, 1);
log.rollWriter();
assertEquals(3, log.getNumLogFiles());
// Flush the first region, we expect to see the first two files getting
// archived
long seqId = log.startCacheFlush();
log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
log.rollWriter();
assertEquals(2, log.getNumLogFiles());
// Flush the second region, which removes all the remaining output files
// since the oldest was completely flushed and the two others only contain
// flush information
seqId = log.startCacheFlush();
log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
log.rollWriter();
assertEquals(0, log.getNumLogFiles());
}
private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
int times) throws IOException {
final byte [] row = Bytes.toBytes("row");
for (int i = 0; i < times; i++) {
long timestamp = System.currentTimeMillis();
WALEdit cols = new WALEdit();
cols.add(new KeyValue(row, row, row, timestamp, row));
log.append(hri, tableName, cols, timestamp);
}
}
static class DumbWALObserver implements WALObserver {
int increments = 0;