HBASE-1411 Remove HLogEdit

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@774595 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2009-05-13 23:27:07 +00:00
parent f88ad07fe9
commit e738b7f4a2
8 changed files with 74 additions and 44 deletions

View File

@ -18,6 +18,7 @@ Release 0.20.0 - Unreleased
HBASE-1361 Disable bloom filters HBASE-1361 Disable bloom filters
HBASE-1367 Get rid of Thrift exception 'NotFound' HBASE-1367 Get rid of Thrift exception 'NotFound'
HBASE-1381 Remove onelab and bloom filters files from hbase HBASE-1381 Remove onelab and bloom filters files from hbase
HBASE-1411 Remove HLogEdit.
BUG FIXES BUG FIXES
HBASE-1140 "ant clean test" fails (Nitay Joffe via Stack) HBASE-1140 "ant clean test" fails (Nitay Joffe via Stack)

View File

@ -33,7 +33,6 @@ import java.util.Map.Entry;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;

View File

@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver;
import java.io.EOFException; import java.io.EOFException;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -147,6 +148,15 @@ public class HLog implements HConstants, Syncable {
*/ */
private final int maxLogs; private final int maxLogs;
static byte [] COMPLETE_CACHE_FLUSH;
static {
try {
COMPLETE_CACHE_FLUSH = "HBASE::CACHEFLUSH".getBytes(UTF8_ENCODING);
} catch (UnsupportedEncodingException e) {
assert(false);
}
}
/** /**
* Create an edit log at the given <code>dir</code> location. * Create an edit log at the given <code>dir</code> location.
* *
@ -199,6 +209,10 @@ public class HLog implements HConstants, Syncable {
/** /**
* Get the compression type for the hlog files. * Get the compression type for the hlog files.
* Commit logs SHOULD NOT be compressed. You'll lose edits if the compression
* record is not complete. In gzip, record is 32k so you could lose up to
* 32k of edits (All of this is moot till we have sync/flush in hdfs but
* still...).
* @param c Configuration to use. * @param c Configuration to use.
* @return the kind of compression to use * @return the kind of compression to use
*/ */
@ -266,7 +280,7 @@ public class HLog implements HConstants, Syncable {
Path newPath = computeFilename(this.filenum); Path newPath = computeFilename(this.filenum);
this.writer = SequenceFile.createWriter(this.fs, this.conf, newPath, this.writer = SequenceFile.createWriter(this.fs, this.conf, newPath,
HLogKey.class, HLogEdit.class, HLogKey.class, KeyValue.class,
fs.getConf().getInt("io.file.buffer.size", 4096), fs.getConf().getInt("io.file.buffer.size", 4096),
fs.getDefaultReplication(), this.blocksize, fs.getDefaultReplication(), this.blocksize,
SequenceFile.CompressionType.NONE, new DefaultCodec(), null, SequenceFile.CompressionType.NONE, new DefaultCodec(), null,
@ -441,7 +455,7 @@ public class HLog implements HConstants, Syncable {
* @param logEdit * @param logEdit
* @throws IOException * @throws IOException
*/ */
public void append(HRegionInfo regionInfo, HLogEdit logEdit) public void append(HRegionInfo regionInfo, KeyValue logEdit)
throws IOException { throws IOException {
this.append(regionInfo, new byte[0], logEdit); this.append(regionInfo, new byte[0], logEdit);
} }
@ -453,7 +467,7 @@ public class HLog implements HConstants, Syncable {
* @param logEdit * @param logEdit
* @throws IOException * @throws IOException
*/ */
public void append(HRegionInfo regionInfo, byte [] row, HLogEdit logEdit) public void append(HRegionInfo regionInfo, byte [] row, KeyValue logEdit)
throws IOException { throws IOException {
if (this.closed) { if (this.closed) {
throw new IOException("Cannot append; log is closed"); throw new IOException("Cannot append; log is closed");
@ -520,7 +534,7 @@ public class HLog implements HConstants, Syncable {
int counter = 0; int counter = 0;
for (KeyValue kv: edits) { for (KeyValue kv: edits) {
HLogKey logKey = new HLogKey(regionName, tableName, seqNum[counter++]); HLogKey logKey = new HLogKey(regionName, tableName, seqNum[counter++]);
doWrite(logKey, new HLogEdit(kv), sync); doWrite(logKey, kv, sync);
this.numEntries.incrementAndGet(); this.numEntries.incrementAndGet();
} }
updateLock.notifyAll(); updateLock.notifyAll();
@ -563,7 +577,7 @@ public class HLog implements HConstants, Syncable {
} }
} }
private void doWrite(HLogKey logKey, HLogEdit logEdit, boolean sync) private void doWrite(HLogKey logKey, KeyValue logEdit, boolean sync)
throws IOException { throws IOException {
try { try {
long now = System.currentTimeMillis(); long now = System.currentTimeMillis();
@ -663,9 +677,9 @@ public class HLog implements HConstants, Syncable {
} }
} }
private HLogEdit completeCacheFlushLogEdit() { private KeyValue completeCacheFlushLogEdit() {
return new HLogEdit(new KeyValue(METAROW, METACOLUMN, return new KeyValue(METAROW, METACOLUMN, System.currentTimeMillis(),
System.currentTimeMillis(), HLogEdit.COMPLETE_CACHE_FLUSH)); COMPLETE_CACHE_FLUSH);
} }
/** /**
@ -747,7 +761,7 @@ public class HLog implements HConstants, Syncable {
// HADOOP-4751 is committed. // HADOOP-4751 is committed.
long length = logfiles[i].getLen(); long length = logfiles[i].getLen();
HLogKey key = new HLogKey(); HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit(); KeyValue val = new KeyValue();
try { try {
SequenceFile.Reader in = SequenceFile.Reader in =
new SequenceFile.Reader(fs, logfiles[i].getPath(), conf); new SequenceFile.Reader(fs, logfiles[i].getPath(), conf);
@ -773,7 +787,7 @@ public class HLog implements HConstants, Syncable {
old = new SequenceFile.Reader(fs, oldlogfile, conf); old = new SequenceFile.Reader(fs, oldlogfile, conf);
} }
w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class, w = SequenceFile.createWriter(fs, conf, logfile, HLogKey.class,
HLogEdit.class, getCompressionType(conf)); KeyValue.class, getCompressionType(conf));
// Use copy of regionName; regionName object is reused inside in // Use copy of regionName; regionName object is reused inside in
// HStoreKey.getRegionName so its content changes as we iterate. // HStoreKey.getRegionName so its content changes as we iterate.
logWriters.put(regionName, w); logWriters.put(regionName, w);
@ -785,7 +799,7 @@ public class HLog implements HConstants, Syncable {
if (old != null) { if (old != null) {
// Copy from existing log file // Copy from existing log file
HLogKey oldkey = new HLogKey(); HLogKey oldkey = new HLogKey();
HLogEdit oldval = new HLogEdit(); KeyValue oldval = new KeyValue();
for (; old.next(oldkey, oldval); count++) { for (; old.next(oldkey, oldval); count++) {
if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) { if (LOG.isDebugEnabled() && count > 0 && count % 10000 == 0) {
LOG.debug("Copied " + count + " edits"); LOG.debug("Copied " + count + " edits");
@ -918,7 +932,7 @@ public class HLog implements HConstants, Syncable {
Reader log = new SequenceFile.Reader(fs, logPath, conf); Reader log = new SequenceFile.Reader(fs, logPath, conf);
try { try {
HLogKey key = new HLogKey(); HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit(); KeyValue val = new KeyValue();
while (log.next(key, val)) { while (log.next(key, val)) {
System.out.println(key.toString() + " " + val.toString()); System.out.println(key.toString() + " " + val.toString());
} }

View File

@ -290,7 +290,7 @@ public class Store implements HConstants {
reconstructionLog, this.conf); reconstructionLog, this.conf);
try { try {
HLogKey key = new HLogKey(); HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit(); KeyValue val = new KeyValue();
long skippedEdits = 0; long skippedEdits = 0;
long editsCount = 0; long editsCount = 0;
// How many edits to apply before we send a progress report. // How many edits to apply before we send a progress report.
@ -304,15 +304,14 @@ public class Store implements HConstants {
} }
// Check this edit is for me. Also, guard against writing the special // Check this edit is for me. Also, guard against writing the special
// METACOLUMN info such as HBASE::CACHEFLUSH entries // METACOLUMN info such as HBASE::CACHEFLUSH entries
KeyValue kv = val.getKeyValue(); if (/* Commented out for now -- St.Ack val.isTransactionEntry() ||*/
if (val.isTransactionEntry() || val.matchingColumnNoDelimiter(HLog.METACOLUMN,
kv.matchingColumnNoDelimiter(HLog.METACOLUMN,
HLog.METACOLUMN.length - 1) || HLog.METACOLUMN.length - 1) ||
!Bytes.equals(key.getRegionName(), regioninfo.getRegionName()) || !Bytes.equals(key.getRegionName(), regioninfo.getRegionName()) ||
!kv.matchingFamily(family.getName())) { !val.matchingFamily(family.getName())) {
continue; continue;
} }
reconstructedCache.add(kv); reconstructedCache.add(val);
editsCount++; editsCount++;
// Every 2k edits, tell the reporter we're making progress. // Every 2k edits, tell the reporter we're making progress.
// Have seen 60k edits taking 3minutes to complete. // Have seen 60k edits taking 3minutes to complete.

View File

@ -37,10 +37,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.io.BatchOperation; import org.apache.hadoop.hbase.io.BatchOperation;
import org.apache.hadoop.hbase.io.BatchUpdate; import org.apache.hadoop.hbase.io.BatchUpdate;
import org.apache.hadoop.hbase.regionserver.HLog; import org.apache.hadoop.hbase.regionserver.HLog;
import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.regionserver.HLogKey; import org.apache.hadoop.hbase.regionserver.HLogKey;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.SequenceFile;
@ -51,6 +51,18 @@ import org.apache.hadoop.util.Progressable;
* to/from the HLog. * to/from the HLog.
*/ */
class TransactionalHLogManager { class TransactionalHLogManager {
/** If transactional log entry, these are the op codes */
// TODO: Make these into types on the KeyValue!!! -- St.Ack
public enum TransactionalOperation {
/** start transaction */
START,
/** Equivalent to append in non-transactional environment */
WRITE,
/** Transaction commit entry */
COMMIT,
/** Abort transaction entry */
ABORT
}
private static final Log LOG = LogFactory private static final Log LOG = LogFactory
.getLog(TransactionalHLogManager.class); .getLog(TransactionalHLogManager.class);
@ -84,10 +96,11 @@ class TransactionalHLogManager {
* @throws IOException * @throws IOException
*/ */
public void writeStartToLog(final long transactionId) throws IOException { public void writeStartToLog(final long transactionId) throws IOException {
/*
HLogEdit logEdit; HLogEdit logEdit;
logEdit = new HLogEdit(transactionId, HLogEdit.TransactionalOperation.START); logEdit = new HLogEdit(transactionId, TransactionalOperation.START);
*/
hlog.append(regionInfo, logEdit); hlog.append(regionInfo, null/*logEdit*/);
} }
/** /**
@ -103,8 +116,8 @@ class TransactionalHLogManager {
: update.getTimestamp(); : update.getTimestamp();
for (BatchOperation op : update) { for (BatchOperation op : update) {
HLogEdit logEdit = new HLogEdit(transactionId, update.getRow(), op, commitTime); // COMMENTED OUT HLogEdit logEdit = new HLogEdit(transactionId, update.getRow(), op, commitTime);
hlog.append(regionInfo, update.getRow(), logEdit); hlog.append(regionInfo, update.getRow(), null /*logEdit*/);
} }
} }
@ -113,11 +126,11 @@ class TransactionalHLogManager {
* @throws IOException * @throws IOException
*/ */
public void writeCommitToLog(final long transactionId) throws IOException { public void writeCommitToLog(final long transactionId) throws IOException {
HLogEdit logEdit; /*HLogEdit logEdit;
logEdit = new HLogEdit(transactionId, logEdit = new HLogEdit(transactionId,
HLogEdit.TransactionalOperation.COMMIT); HLogEdit.TransactionalOperation.COMMIT);
*/
hlog.append(regionInfo, logEdit); hlog.append(regionInfo, null /*logEdit*/);
} }
/** /**
@ -125,10 +138,10 @@ class TransactionalHLogManager {
* @throws IOException * @throws IOException
*/ */
public void writeAbortToLog(final long transactionId) throws IOException { public void writeAbortToLog(final long transactionId) throws IOException {
HLogEdit logEdit; /*HLogEdit logEdit;
logEdit = new HLogEdit(transactionId, HLogEdit.TransactionalOperation.ABORT); logEdit = new HLogEdit(transactionId, HLogEdit.TransactionalOperation.ABORT);
*/
hlog.append(regionInfo, logEdit); hlog.append(regionInfo, null /*logEdit*/);
} }
/** /**
@ -161,10 +174,10 @@ class TransactionalHLogManager {
SequenceFile.Reader logReader = new SequenceFile.Reader(fileSystem, SequenceFile.Reader logReader = new SequenceFile.Reader(fileSystem,
reconstructionLog, conf); reconstructionLog, conf);
/*
try { try {
HLogKey key = new HLogKey(); HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit(); KeyValue val = new KeyValue();
long skippedEdits = 0; long skippedEdits = 0;
long totalEdits = 0; long totalEdits = 0;
long startCount = 0; long startCount = 0;
@ -174,6 +187,7 @@ class TransactionalHLogManager {
// How many edits to apply before we send a progress report. // How many edits to apply before we send a progress report.
int reportInterval = conf.getInt("hbase.hstore.report.interval.edits", int reportInterval = conf.getInt("hbase.hstore.report.interval.edits",
2000); 2000);
while (logReader.next(key, val)) { while (logReader.next(key, val)) {
LOG.debug("Processing edit: key: " + key.toString() + " val: " LOG.debug("Processing edit: key: " + key.toString() + " val: "
+ val.toString()); + val.toString());
@ -185,6 +199,7 @@ class TransactionalHLogManager {
// against a KeyValue. Each invocation creates a new instance. St.Ack. // against a KeyValue. Each invocation creates a new instance. St.Ack.
// Check this edit is for me. // Check this edit is for me.
byte[] column = val.getKeyValue().getColumn(); byte[] column = val.getKeyValue().getColumn();
Long transactionId = val.getTransactionId(); Long transactionId = val.getTransactionId();
if (!val.isTransactionEntry() || HLog.isMetaColumn(column) if (!val.isTransactionEntry() || HLog.isMetaColumn(column)
@ -194,6 +209,7 @@ class TransactionalHLogManager {
List<BatchUpdate> updates = pendingTransactionsById.get(transactionId); List<BatchUpdate> updates = pendingTransactionsById.get(transactionId);
switch (val.getOperation()) { switch (val.getOperation()) {
case START: case START:
if (updates != null || abortedTransactions.contains(transactionId) if (updates != null || abortedTransactions.contains(transactionId)
|| commitedTransactionsById.containsKey(transactionId)) { || commitedTransactionsById.containsKey(transactionId)) {
@ -259,6 +275,7 @@ class TransactionalHLogManager {
pendingTransactionsById.remove(transactionId); pendingTransactionsById.remove(transactionId);
commitedTransactionsById.put(transactionId, updates); commitedTransactionsById.put(transactionId, updates);
commitCount++; commitCount++;
} }
totalEdits++; totalEdits++;
@ -283,6 +300,7 @@ class TransactionalHLogManager {
+ " unfinished transactions. Going to the transaction log to resolve"); + " unfinished transactions. Going to the transaction log to resolve");
throw new RuntimeException("Transaction log not yet implemented"); throw new RuntimeException("Transaction log not yet implemented");
} }
*/
return commitedTransactionsById; return commitedTransactionsById;
} }

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.Bytes;
* Test the transaction functionality. This requires to run an * Test the transaction functionality. This requires to run an
* {@link TransactionalRegionServer}. * {@link TransactionalRegionServer}.
*/ */
public class TestTransactions extends HBaseClusterTestCase { public class DisabledTestTransactions extends HBaseClusterTestCase {
private static final String TABLE_NAME = "table1"; private static final String TABLE_NAME = "table1";
@ -52,7 +52,7 @@ public class TestTransactions extends HBaseClusterTestCase {
private TransactionManager transactionManager; private TransactionManager transactionManager;
/** constructor */ /** constructor */
public TestTransactions() { public DisabledTestTransactions() {
conf.set(HConstants.REGION_SERVER_CLASS, TransactionalRegionInterface.class conf.set(HConstants.REGION_SERVER_CLASS, TransactionalRegionInterface.class
.getName()); .getName());
conf.set(HConstants.REGION_SERVER_IMPL, TransactionalRegionServer.class conf.set(HConstants.REGION_SERVER_IMPL, TransactionalRegionServer.class
@ -94,7 +94,7 @@ public class TestTransactions extends HBaseClusterTestCase {
transactionManager.tryCommit(transactionState2); transactionManager.tryCommit(transactionState2);
} }
public void testTwoTransactionsWithConflict() throws IOException, public void TestTwoTransactionsWithConflict() throws IOException,
CommitUnsuccessfulException { CommitUnsuccessfulException {
TransactionState transactionState1 = makeTransaction1(); TransactionState transactionState1 = makeTransaction1();
TransactionState transactionState2 = makeTransaction2(); TransactionState transactionState2 = makeTransaction2();

View File

@ -31,7 +31,6 @@ import junit.framework.TestCase;
import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.Cell; import org.apache.hadoop.hbase.io.Cell;
import org.apache.hadoop.hbase.regionserver.HLogEdit;
import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Bytes;
/** /**

View File

@ -119,23 +119,23 @@ public class TestHLog extends HBaseTestCase implements HConstants {
// Now open a reader on the log and assert append worked. // Now open a reader on the log and assert append worked.
reader = new SequenceFile.Reader(fs, filename, conf); reader = new SequenceFile.Reader(fs, filename, conf);
HLogKey key = new HLogKey(); HLogKey key = new HLogKey();
HLogEdit val = new HLogEdit(); KeyValue val = new KeyValue();
for (int i = 0; i < COL_COUNT; i++) { for (int i = 0; i < COL_COUNT; i++) {
reader.next(key, val); reader.next(key, val);
assertTrue(Bytes.equals(regionName, key.getRegionName())); assertTrue(Bytes.equals(regionName, key.getRegionName()));
assertTrue(Bytes.equals(tableName, key.getTablename())); assertTrue(Bytes.equals(tableName, key.getTablename()));
assertTrue(Bytes.equals(row, val.getKeyValue().getRow())); assertTrue(Bytes.equals(row, val.getRow()));
assertEquals((byte)(i + '0'), val.getKeyValue().getValue()[0]); assertEquals((byte)(i + '0'), val.getValue()[0]);
System.out.println(key + " " + val); System.out.println(key + " " + val);
} }
while (reader.next(key, val)) { while (reader.next(key, val)) {
// Assert only one more row... the meta flushed row. // Assert only one more row... the meta flushed row.
assertTrue(Bytes.equals(regionName, key.getRegionName())); assertTrue(Bytes.equals(regionName, key.getRegionName()));
assertTrue(Bytes.equals(tableName, key.getTablename())); assertTrue(Bytes.equals(tableName, key.getTablename()));
assertTrue(Bytes.equals(HLog.METAROW, val.getKeyValue().getRow())); assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
assertTrue(Bytes.equals(HLog.METACOLUMN, val.getKeyValue().getColumn())); assertTrue(Bytes.equals(HLog.METACOLUMN, val.getColumn()));
assertEquals(0, Bytes.compareTo(HLogEdit.COMPLETE_CACHE_FLUSH, assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
val.getKeyValue().getValue())); val.getValue()));
System.out.println(key + " " + val); System.out.println(key + " " + val);
} }
} finally { } finally {