From b94e6738d920cb5bf297214883e0cf29522597b9 Mon Sep 17 00:00:00 2001
From: stack
Date: Tue, 18 Nov 2014 12:12:35 -0800
Subject: [PATCH] HBASE-10378 Refactor write-ahead-log implementation --
ADDEDNUM
---
bin/hbase | 7 +-
bin/hbase.cmd | 11 +-
.../hbase/DroppedSnapshotException.java | 2 +-
.../org/apache/hadoop/hbase/client/Admin.java | 15 +-
.../client/replication/ReplicationAdmin.java | 2 +-
.../RegionServerCoprocessorRpcChannel.java | 2 +-
.../hbase/protobuf/ResponseConverter.java | 16 -
.../hbase/replication/ReplicationQueues.java | 24 +-
.../replication/ReplicationQueuesClient.java | 6 +-
.../ReplicationQueuesClientZKImpl.java | 2 +-
.../replication/ReplicationQueuesZKImpl.java | 70 +-
.../apache/hadoop/hbase/zookeeper/ZKUtil.java | 12 +-
.../org/apache/hadoop/hbase/HConstants.java | 4 +-
.../org/apache/hadoop/hbase/KeyValue.java | 2 +-
.../src/main/resources/hbase-default.xml | 10 +-
.../master/MetricsMasterFileSystemSource.java | 6 +-
.../MetricsRegionServerSource.java | 8 +-
.../MetricsRegionServerWrapper.java | 8 +-
.../wal/MetricsEditsReplaySource.java | 2 +-
.../regionserver/wal/MetricsWALSource.java | 12 +-
.../wal/TestMetricsHLogSource.java | 32 -
.../MetricsRegionServerSourceImpl.java | 4 +-
.../wal/MetricsWALSourceImpl.java | 3 +-
.../IntegrationTestIngestWithEncryption.java | 7 +-
.../hbase/mttr/IntegrationTestMTTR.java | 4 +-
.../hbase/protobuf/generated/AdminProtos.java | 12 +
.../generated/RegionServerStatusProtos.java | 16 +-
.../hbase/protobuf/generated/WALProtos.java | 16 +-
.../protobuf/generated/ZooKeeperProtos.java | 4 +-
hbase-protocol/src/main/protobuf/Admin.proto | 4 +
.../main/protobuf/RegionServerStatus.proto | 2 +-
hbase-protocol/src/main/protobuf/WAL.proto | 9 +-
.../src/main/protobuf/ZooKeeper.proto | 2 +-
.../tmpl/regionserver/ServerMetricsTmpl.jamon | 16 +-
.../apache/hadoop/hbase/SplitLogCounters.java | 2 +-
.../org/apache/hadoop/hbase/SplitLogTask.java | 1 -
.../SplitLogWorkerCoordination.java | 8 +-
.../ZKSplitLogManagerCoordination.java | 8 +-
.../ZkSplitLogWorkerCoordination.java | 12 +-
.../hbase/coprocessor/BaseRegionObserver.java | 19 +
.../hbase/coprocessor/BaseWALObserver.java | 25 +-
.../hbase/coprocessor/CoprocessorHost.java | 74 +
.../hbase/coprocessor/RegionObserver.java | 64 +-
.../WALCoprocessorEnvironment.java | 6 +-
.../hadoop/hbase/coprocessor/WALObserver.java | 63 +-
.../apache/hadoop/hbase/fs/HFileSystem.java | 16 +-
.../org/apache/hadoop/hbase/io/HLogLink.java | 69 -
.../hbase/mapreduce/HFileOutputFormat2.java | 4 +-
.../hbase/mapreduce/HLogInputFormat.java | 235 +--
.../mapreduce/MultiTableOutputFormat.java | 4 +-
.../mapreduce/TableSnapshotInputFormat.java | 2 +-
.../hadoop/hbase/mapreduce/WALPlayer.java | 52 +-
.../hbase/master/AssignmentManager.java | 14 +-
.../hadoop/hbase/master/MasterFileSystem.java | 45 +-
.../hbase/master/MetricsMasterFileSystem.java | 4 +-
.../hadoop/hbase/master/RegionStates.java | 4 +-
.../hadoop/hbase/master/ServerManager.java | 8 +-
.../hadoop/hbase/master/SplitLogManager.java | 40 +-
.../hbase/master/cleaner/LogCleaner.java | 6 +-
.../master/cleaner/TimeToLiveLogCleaner.java | 2 +-
.../handler/MetaServerShutdownHandler.java | 4 +-
.../master/handler/ServerShutdownHandler.java | 20 +-
.../master/snapshot/SnapshotLogCleaner.java | 8 +-
.../hbase/migration/NamespaceUpgrade.java | 17 +-
.../protobuf/ReplicationProtbufUtil.java | 26 +-
.../hbase/regionserver/HRegionServer.java | 234 ++-
.../hadoop/hbase/regionserver/HStore.java | 6 +-
.../hadoop/hbase/regionserver/LogRoller.java | 109 +-
.../hbase/regionserver/MemStoreFlusher.java | 4 +-
.../hbase/regionserver/MetaLogRoller.java | 38 -
.../MetricsRegionServerWrapperImpl.java | 29 +-
.../hbase/regionserver/RSRpcServices.java | 37 +-
.../regionserver/RegionCoprocessorHost.java | 60 +-
.../regionserver/RegionServerAccounting.java | 2 +-
.../regionserver/RegionServerServices.java | 6 +-
.../hbase/regionserver/SplitLogWorker.java | 12 +-
.../handler/HLogSplitterHandler.java | 106 --
.../regionserver/wal/CompressionContext.java | 11 +-
.../hbase/regionserver/wal/Compressor.java | 18 +-
.../hadoop/hbase/regionserver/wal/FSHLog.java | 681 ++++----
.../hbase/regionserver/wal/FSWALEntry.java | 11 +-
.../hadoop/hbase/regionserver/wal/HLog.java | 445 -----
.../hbase/regionserver/wal/HLogFactory.java | 207 ---
.../hbase/regionserver/wal/HLogKey.java | 430 +----
.../regionserver/wal/HLogPrettyPrinter.java | 315 +---
.../hbase/regionserver/wal/HLogUtil.java | 367 -----
.../hbase/regionserver/wal/MetricsWAL.java | 13 +-
.../regionserver/wal/ProtobufLogReader.java | 40 +-
.../regionserver/wal/ProtobufLogWriter.java | 14 +-
.../hbase/regionserver/wal/ReaderBase.java | 29 +-
.../wal/SecureProtobufLogWriter.java | 6 +-
.../wal/SequenceFileLogReader.java | 21 +-
.../regionserver/wal/WALActionsListener.java | 76 +-
.../hbase/regionserver/wal/WALCellCodec.java | 2 +-
.../regionserver/wal/WALCoprocessorHost.java | 67 +-
.../hbase/regionserver/wal/WALEdit.java | 8 +-
.../regionserver/wal/WALEditsReplaySink.java | 31 +-
.../hbase/regionserver/wal/WriterBase.java | 4 +-
.../replication/ChainWALEntryFilter.java | 2 +-
.../replication/ReplicationEndpoint.java | 8 +-
.../replication/ScopeWALEntryFilter.java | 2 +-
.../SystemTableWALEntryFilter.java | 2 +-
.../replication/TableCfWALEntryFilter.java | 2 +-
.../hbase/replication/WALEntryFilter.java | 10 +-
.../master/ReplicationLogCleaner.java | 26 +-
.../HBaseInterClusterReplicationEndpoint.java | 6 +-
.../RegionReplicaReplicationEndpoint.java | 26 +-
.../replication/regionserver/Replication.java | 36 +-
.../ReplicationHLogReaderManager.java | 144 --
.../regionserver/ReplicationSink.java | 2 +-
.../regionserver/ReplicationSource.java | 38 +-
.../ReplicationSourceManager.java | 104 +-
.../security/access/AccessController.java | 2 +-
.../HbaseObjectWritableFor96Migration.java | 38 +-
.../hadoop/hbase/snapshot/ExportSnapshot.java | 14 +-
.../hadoop/hbase/snapshot/SnapshotInfo.java | 6 +-
.../hbase/snapshot/SnapshotReferenceUtil.java | 8 +-
.../apache/hadoop/hbase/util/FSHDFSUtils.java | 2 +-
.../apache/hadoop/hbase/util/FSVisitor.java | 14 +-
.../apache/hadoop/hbase/util/HBaseFsck.java | 6 +-
.../org/apache/hadoop/hbase/util/HMerge.java | 18 +-
.../org/apache/hadoop/hbase/util/Merge.java | 7 +-
.../apache/hadoop/hbase/util/MetaUtils.java | 35 +-
.../hadoop/hbase/util/RegionSplitter.java | 2 +-
.../hadoop/hbase/zookeeper/ZKSplitLog.java | 2 +-
.../apache/hadoop/hbase/HBaseTestCase.java | 2 +-
.../hadoop/hbase/HBaseTestingUtility.java | 22 +-
.../hbase/MockRegionServerServices.java | 4 +-
.../apache/hadoop/hbase/TestIOFencing.java | 13 +-
.../hbase/backup/TestHFileArchiving.java | 2 +-
.../hadoop/hbase/client/TestAdmin2.java | 10 +-
.../coprocessor/SampleRegionWALObserver.java | 77 +-
.../coprocessor/SimpleRegionObserver.java | 49 +-
.../TestRegionObserverInterface.java | 69 +-
.../TestRegionObserverScannerOpenHook.java | 4 +-
.../hbase/coprocessor/TestWALObserver.java | 199 ++-
.../hadoop/hbase/filter/TestFilter.java | 14 +-
.../filter/TestInvocationRecordFilter.java | 8 +-
.../hadoop/hbase/fs/TestBlockReorder.java | 108 +-
.../apache/hadoop/hbase/io/TestHeapSize.java | 2 +-
.../hbase/mapreduce/TestHLogRecordReader.java | 230 +--
.../hbase/mapreduce/TestImportExport.java | 55 +-
.../mapreduce/TestTableMapReduceUtil.java | 8 +-
.../hadoop/hbase/mapreduce/TestWALPlayer.java | 31 +-
.../hadoop/hbase/master/MockRegionServer.java | 4 +-
.../master/TestDistributedLogSplitting.java | 131 +-
.../hbase/master/TestMasterFailover.java | 2 +-
.../snapshot/TestSnapshotFileCache.java | 17 +-
.../MetricsRegionServerWrapperStub.java | 6 +-
.../regionserver/TestAtomicOperation.java | 4 +-
.../TestCacheOnWriteInSchema.java | 20 +-
.../hbase/regionserver/TestCompaction.java | 6 +-
.../TestDefaultCompactSelection.java | 19 +-
.../TestGetClosestAtOrBefore.java | 6 +-
.../hbase/regionserver/TestHRegion.java | 233 +--
.../regionserver/TestMajorCompaction.java | 6 +-
.../regionserver/TestMinorCompaction.java | 6 +-
.../TestRegionMergeTransaction.java | 29 +-
.../regionserver/TestSplitTransaction.java | 22 +-
.../hadoop/hbase/regionserver/TestStore.java | 16 +-
.../TestStoreFileRefresherChore.java | 12 +-
.../hbase/regionserver/wal/FaultyHLog.java | 70 -
.../wal/FaultySequenceFileLogReader.java | 12 +-
.../wal/HLogPerformanceEvaluation.java | 566 -------
.../regionserver/wal/HLogUtilsForTests.java | 43 -
.../InstrumentedSequenceFileLogWriter.java | 40 -
.../wal/SequenceFileLogWriter.java | 19 +-
.../regionserver/wal/TestDurability.java | 55 +-
.../hbase/regionserver/wal/TestHLog.java | 1343 ---------------
.../regionserver/wal/TestHLogFiltering.java | 154 --
.../regionserver/wal/TestHLogMethods.java | 171 --
.../wal/TestHLogReaderOnSecureHLog.java | 198 ---
.../hbase/regionserver/wal/TestHLogSplit.java | 1454 -----------------
.../wal/TestHLogSplitCompressed.java | 36 -
.../regionserver/wal/TestLogRollAbort.java | 37 +-
.../regionserver/wal/TestLogRollPeriod.java | 23 +-
.../regionserver/wal/TestLogRolling.java | 151 +-
.../wal/TestLogRollingNoCluster.java | 30 +-
.../wal/TestReadOldRootAndMetaEdits.java | 39 +-
.../regionserver/wal/TestSecureHLog.java | 130 --
.../regionserver/wal/TestSecureWALReplay.java | 6 +-
.../wal/TestWALActionsListener.java | 63 +-
.../hbase/regionserver/wal/TestWALReplay.java | 133 +-
.../TestMultiSlaveReplication.java | 63 +-
...tReplicationChangingPeerRegionservers.java | 2 +-
.../replication/TestReplicationEndpoint.java | 7 +-
...TestReplicationKillMasterRSCompressed.java | 2 +-
.../TestReplicationSmallTests.java | 8 +-
.../replication/TestReplicationSource.java | 24 +-
.../TestReplicationWALEntryFilters.java | 39 +-
.../TestRegionReplicaReplicationEndpoint.java | 8 +-
...ionReplicaReplicationEndpointNoMaster.java | 16 +-
.../TestReplicationHLogReaderManager.java | 239 ---
.../TestReplicationSourceManager.java | 47 +-
.../hadoop/hbase/util/TestFSVisitor.java | 19 +-
.../hadoop/hbase/util/TestHBaseFsck.java | 16 +-
.../hadoop/hbase/util/TestMergeTool.java | 56 +-
hbase-shell/src/main/ruby/hbase/admin.rb | 8 +-
hbase-shell/src/main/ruby/shell.rb | 8 +-
.../src/main/ruby/shell/commands/hlog_roll.rb | 39 -
src/main/docbkx/book.xml | 29 +-
src/main/docbkx/ops_mgt.xml | 19 +-
src/main/docbkx/performance.xml | 4 +-
src/main/docbkx/troubleshooting.xml | 10 +-
204 files changed, 2936 insertions(+), 9161 deletions(-)
delete mode 100644 hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsHLogSource.java
delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetaLogRoller.java
delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java
delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogFactory.java
delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtil.java
delete mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationHLogReaderManager.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/FaultyHLog.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogPerformanceEvaluation.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/HLogUtilsForTests.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/InstrumentedSequenceFileLogWriter.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogMethods.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogReaderOnSecureHLog.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplitCompressed.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestSecureHLog.java
delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationHLogReaderManager.java
delete mode 100644 hbase-shell/src/main/ruby/shell/commands/hlog_roll.rb
diff --git a/bin/hbase b/bin/hbase
index 8678eb8fcc9..4573c570b9e 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -83,7 +83,7 @@ if [ $# = 0 ]; then
echo "Some commands take arguments. Pass no args or -h for usage."
echo " shell Run the HBase shell"
echo " hbck Run the hbase 'fsck' tool"
- echo " hlog Write-ahead-log analyzer"
+ echo " wal Write-ahead-log analyzer"
echo " hfile Store file analyzer"
echo " zkcli Run the ZooKeeper shell"
echo " upgrade Upgrade hbase"
@@ -288,8 +288,9 @@ if [ "$COMMAND" = "shell" ] ; then
CLASS="org.jruby.Main -X+O ${JRUBY_OPTS} ${HBASE_HOME}/bin/hirb.rb"
elif [ "$COMMAND" = "hbck" ] ; then
CLASS='org.apache.hadoop.hbase.util.HBaseFsck'
-elif [ "$COMMAND" = "hlog" ] ; then
- CLASS='org.apache.hadoop.hbase.regionserver.wal.HLogPrettyPrinter'
+# TODO remove old 'hlog' version
+elif [ "$COMMAND" = "hlog" -o "$COMMAND" = "wal" ] ; then
+ CLASS='org.apache.hadoop.hbase.wal.WALPrettyPrinter'
elif [ "$COMMAND" = "hfile" ] ; then
CLASS='org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter'
elif [ "$COMMAND" = "zkcli" ] ; then
diff --git a/bin/hbase.cmd b/bin/hbase.cmd
index 1b2227b9644..68f97c32f48 100644
--- a/bin/hbase.cmd
+++ b/bin/hbase.cmd
@@ -200,7 +200,7 @@ goto :MakeCmdArgsLoop
set hbase-command-arguments=%_hbasearguments%
@rem figure out which class to run
-set corecommands=shell master regionserver thrift thrift2 rest avro hlog hbck hfile zookeeper zkcli upgrade mapredcp
+set corecommands=shell master regionserver thrift thrift2 rest avro hlog wal hbck hfile zookeeper zkcli upgrade mapredcp
for %%i in ( %corecommands% ) do (
if "%hbase-command%"=="%%i" set corecommand=true
)
@@ -364,8 +364,13 @@ goto :eof
set CLASS=org.apache.hadoop.hbase.util.HBaseFsck
goto :eof
+@rem TODO remove older 'hlog' command
:hlog
- set CLASS=org.apache.hadoop.hbase.regionserver.wal.HLogPrettyPrinter
+ set CLASS=org.apache.hadoop.hbase.wal.WALPrettyPrinter
+ goto :eof
+
+:wal
+ set CLASS=org.apache.hadoop.hbase.wal.WALPrettyPrinter
goto :eof
:hfile
@@ -405,7 +410,7 @@ goto :eof
echo Some commands take arguments. Pass no args or -h for usage."
echo shell Run the HBase shell
echo hbck Run the hbase 'fsck' tool
- echo hlog Write-ahead-log analyzer
+ echo wal Write-ahead-log analyzer
echo hfile Store file analyzer
echo zkcli Run the ZooKeeper shell
echo upgrade Upgrade hbase
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
index 6ff1b1f7ca1..bdb7f531dd8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
/**
* Thrown during flush if the possibility snapshot content was not properly
- * persisted into store files. Response should include replay of hlog content.
+ * persisted into store files. Response should include replay of wal content.
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 69b33d092ac..a0ce1795b58 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -890,18 +890,17 @@ public interface Admin extends Abortable, Closeable {
HTableDescriptor[] getTableDescriptors(List names) throws IOException;
/**
- * Roll the log writer. That is, start writing log messages to a new file.
+ * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
*
- * @param serverName The servername of the regionserver. A server name is made of host, port and
- * startcode. This is mandatory. Here is an example:
- * host187.example.com,60020,1289493121758
- * @return If lots of logs, flush the returned regions so next time through we can clean logs.
- * Returns null if nothing to flush. Names are actual region names as returned by {@link
- * HRegionInfo#getEncodedName()}
+ * Note that the actual rolling of the log writer is asynchronous and may not be complete when
+ * this method returns. As a side effect of this call, the named region server may schedule
+ * store flushes at the request of the wal.
+ *
+ * @param serverName The servername of the regionserver.
* @throws IOException if a remote or network exception occurs
* @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException
*/
- byte[][] rollHLogWriter(String serverName) throws IOException, FailedLogCloseException;
+ void rollWALWriter(ServerName serverName) throws IOException, FailedLogCloseException;
/**
* Helper delegage to getClusterStatus().getMasterCoprocessors().
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 3a337602a12..73551af8b8d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -64,7 +64,7 @@ import com.google.common.collect.Lists;
* Adding a new peer results in creating new outbound connections from every
* region server to a subset of region servers on the slave cluster. Each
* new stream of replication will start replicating from the beginning of the
- * current HLog, meaning that edits from that past will be replicated.
+ * current WAL, meaning that edits from that past will be replicated.
*
*
* Removing a peer is a destructive and irreversible operation that stops
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerCoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerCoprocessorRpcChannel.java
index 122bfdc03c4..027fde4a3af 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerCoprocessorRpcChannel.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RegionServerCoprocessorRpcChannel.java
@@ -15,7 +15,7 @@ import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.ClusterConnection;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
index ced458a504b..70da40c0389 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRespons
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetServerInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.RollWALWriterResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
@@ -183,21 +182,6 @@ public final class ResponseConverter {
// End utilities for Client
// Start utilities for Admin
- /**
- * Get the list of regions to flush from a RollLogWriterResponse
- *
- * @param proto the RollLogWriterResponse
- * @return the the list of regions to flush
- */
- public static byte[][] getRegions(final RollWALWriterResponse proto) {
- if (proto == null || proto.getRegionToFlushCount() == 0) return null;
- List regions = new ArrayList();
- for (ByteString region: proto.getRegionToFlushList()) {
- regions.add(region.toByteArray());
- }
- return (byte[][])regions.toArray();
- }
-
/**
* Get the list of region info from a GetOnlineRegionResponse
*
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
index 611f6631c8a..3dbbc336540 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
/**
* This provides an interface for maintaining a region server's replication queues. These queues
- * keep track of the HLogs that still need to be replicated to remote clusters.
+ * keep track of the WALs that still need to be replicated to remote clusters.
*/
@InterfaceAudience.Private
public interface ReplicationQueues {
@@ -45,31 +45,31 @@ public interface ReplicationQueues {
void removeQueue(String queueId);
/**
- * Add a new HLog file to the given queue. If the queue does not exist it is created.
+ * Add a new WAL file to the given queue. If the queue does not exist it is created.
* @param queueId a String that identifies the queue.
- * @param filename name of the HLog
+ * @param filename name of the WAL
*/
void addLog(String queueId, String filename) throws ReplicationException;
/**
- * Remove an HLog file from the given queue.
+ * Remove an WAL file from the given queue.
* @param queueId a String that identifies the queue.
- * @param filename name of the HLog
+ * @param filename name of the WAL
*/
void removeLog(String queueId, String filename);
/**
- * Set the current position for a specific HLog in a given queue.
+ * Set the current position for a specific WAL in a given queue.
* @param queueId a String that identifies the queue
- * @param filename name of the HLog
+ * @param filename name of the WAL
* @param position the current position in the file
*/
void setLogPosition(String queueId, String filename, long position);
/**
- * Get the current position for a specific HLog in a given queue.
+ * Get the current position for a specific WAL in a given queue.
* @param queueId a String that identifies the queue
- * @param filename name of the HLog
+ * @param filename name of the WAL
* @return the current position in the file
*/
long getLogPosition(String queueId, String filename) throws ReplicationException;
@@ -80,9 +80,9 @@ public interface ReplicationQueues {
void removeAllQueues();
/**
- * Get a list of all HLogs in the given queue.
+ * Get a list of all WALs in the given queue.
* @param queueId a String that identifies the queue
- * @return a list of HLogs, null if this region server is dead and has no outstanding queues
+ * @return a list of WALs, null if this region server is dead and has no outstanding queues
*/
List getLogsInQueue(String queueId);
@@ -95,7 +95,7 @@ public interface ReplicationQueues {
/**
* Take ownership for the set of queues belonging to a dead region server.
* @param regionserver the id of the dead region server
- * @return A SortedMap of the queues that have been claimed, including a SortedSet of HLogs in
+ * @return A SortedMap of the queues that have been claimed, including a SortedSet of WALs in
* each queue. Returns an empty map if no queues were failed-over.
*/
SortedMap> claimQueues(String regionserver);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java
index 689afba98be..e8fa4df24ba 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClient.java
@@ -23,7 +23,7 @@ import java.util.List;
/**
* This provides an interface for clients of replication to view replication queues. These queues
- * keep track of the HLogs that still need to be replicated to remote clusters.
+ * keep track of the WALs that still need to be replicated to remote clusters.
*/
public interface ReplicationQueuesClient {
@@ -40,10 +40,10 @@ public interface ReplicationQueuesClient {
List getListOfReplicators();
/**
- * Get a list of all HLogs in the given queue on the given region server.
+ * Get a list of all WALs in the given queue on the given region server.
* @param serverName the server name of the region server that owns the queue
* @param queueId a String that identifies the queue
- * @return a list of HLogs, null if this region server is dead and has no outstanding queues
+ * @return a list of WALs, null if this region server is dead and has no outstanding queues
*/
List getLogsInQueue(String serverName, String queueId);
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
index a7d36c1903b..fba1fef018a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
@@ -51,7 +51,7 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem
try {
result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
} catch (KeeperException e) {
- this.abortable.abort("Failed to get list of hlogs for queueId=" + queueId
+ this.abortable.abort("Failed to get list of wals for queueId=" + queueId
+ " and serverName=" + serverName, e);
}
return result;
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 28285467387..3ed51c73a71 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -42,21 +42,21 @@ import org.apache.zookeeper.KeeperException;
/**
* This class provides an implementation of the ReplicationQueues interface using Zookeeper. The
* base znode that this class works at is the myQueuesZnode. The myQueuesZnode contains a list of
- * all outstanding HLog files on this region server that need to be replicated. The myQueuesZnode is
+ * all outstanding WAL files on this region server that need to be replicated. The myQueuesZnode is
* the regionserver name (a concatenation of the region server’s hostname, client port and start
* code). For example:
*
* /hbase/replication/rs/hostname.example.org,6020,1234
*
- * Within this znode, the region server maintains a set of HLog replication queues. These queues are
+ * Within this znode, the region server maintains a set of WAL replication queues. These queues are
* represented by child znodes named using there give queue id. For example:
*
* /hbase/replication/rs/hostname.example.org,6020,1234/1
* /hbase/replication/rs/hostname.example.org,6020,1234/2
*
- * Each queue has one child znode for every HLog that still needs to be replicated. The value of
- * these HLog child znodes is the latest position that has been replicated. This position is updated
- * every time a HLog entry is replicated. For example:
+ * Each queue has one child znode for every WAL that still needs to be replicated. The value of
+ * these WAL child znodes is the latest position that has been replicated. This position is updated
+ * every time a WAL entry is replicated. For example:
*
* /hbase/replication/rs/hostname.example.org,6020,1234/1/23522342.23422 [VALUE: 254]
*/
@@ -113,7 +113,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
znode = ZKUtil.joinZNode(znode, filename);
ZKUtil.deleteNode(this.zookeeper, znode);
} catch (KeeperException e) {
- this.abortable.abort("Failed to remove hlog from queue (queueId=" + queueId + ", filename="
+ this.abortable.abort("Failed to remove wal from queue (queueId=" + queueId + ", filename="
+ filename + ")", e);
}
}
@@ -126,7 +126,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
// Why serialize String of Long and not Long as bytes?
ZKUtil.setData(this.zookeeper, znode, ZKUtil.positionToByteArray(position));
} catch (KeeperException e) {
- this.abortable.abort("Failed to write replication hlog position (filename=" + filename
+ this.abortable.abort("Failed to write replication wal position (filename=" + filename
+ ", position=" + position + ")", e);
}
}
@@ -146,12 +146,12 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
return 0;
}
try {
- return ZKUtil.parseHLogPositionFrom(bytes);
+ return ZKUtil.parseWALPositionFrom(bytes);
} catch (DeserializationException de) {
- LOG.warn("Failed to parse HLogPosition for queueId=" + queueId + " and hlog=" + filename
+ LOG.warn("Failed to parse WALPosition for queueId=" + queueId + " and wal=" + filename
+ "znode content, continuing.");
}
- // if we can not parse the position, start at the beginning of the hlog file
+ // if we can not parse the position, start at the beginning of the wal file
// again
return 0;
}
@@ -166,10 +166,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
SortedMap> newQueues = new TreeMap>();
// check whether there is multi support. If yes, use it.
if (conf.getBoolean(HConstants.ZOOKEEPER_USEMULTI, true)) {
- LOG.info("Atomically moving " + regionserverZnode + "'s hlogs to my queue");
+ LOG.info("Atomically moving " + regionserverZnode + "'s wals to my queue");
newQueues = copyQueuesFromRSUsingMulti(regionserverZnode);
} else {
- LOG.info("Moving " + regionserverZnode + "'s hlogs to my queue");
+ LOG.info("Moving " + regionserverZnode + "'s wals to my queue");
if (!lockOtherRS(regionserverZnode)) {
return newQueues;
}
@@ -200,7 +200,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
try {
result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
} catch (KeeperException e) {
- this.abortable.abort("Failed to get list of hlogs for queueId=" + queueId, e);
+ this.abortable.abort("Failed to get list of wals for queueId=" + queueId, e);
}
return result;
}
@@ -283,10 +283,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
}
/**
- * It "atomically" copies all the hlogs queues from another region server and returns them all
+ * It "atomically" copies all the wals queues from another region server and returns them all
* sorted per peer cluster (appended with the dead server's znode).
* @param znode pertaining to the region server to copy the queues from
- * @return HLog queues sorted per peer cluster
+ * @return WAL queues sorted per peer cluster
*/
private SortedMap> copyQueuesFromRSUsingMulti(String znode) {
SortedMap> queues = new TreeMap>();
@@ -308,8 +308,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
String newPeerZnode = ZKUtil.joinZNode(this.myQueuesZnode, newPeerId);
// check the logs queue for the old peer cluster
String oldClusterZnode = ZKUtil.joinZNode(deadRSZnodePath, peerId);
- List hlogs = ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode);
- if (hlogs == null || hlogs.size() == 0) {
+ List wals = ZKUtil.listChildrenNoWatch(this.zookeeper, oldClusterZnode);
+ if (wals == null || wals.size() == 0) {
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
continue; // empty log queue.
}
@@ -319,15 +319,15 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
ZKUtilOp op = ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY);
listOfOps.add(op);
// get the offset of the logs and set it to new znodes
- for (String hlog : hlogs) {
- String oldHlogZnode = ZKUtil.joinZNode(oldClusterZnode, hlog);
- byte[] logOffset = ZKUtil.getData(this.zookeeper, oldHlogZnode);
- LOG.debug("Creating " + hlog + " with data " + Bytes.toString(logOffset));
- String newLogZnode = ZKUtil.joinZNode(newPeerZnode, hlog);
+ for (String wal : wals) {
+ String oldWalZnode = ZKUtil.joinZNode(oldClusterZnode, wal);
+ byte[] logOffset = ZKUtil.getData(this.zookeeper, oldWalZnode);
+ LOG.debug("Creating " + wal + " with data " + Bytes.toString(logOffset));
+ String newLogZnode = ZKUtil.joinZNode(newPeerZnode, wal);
listOfOps.add(ZKUtilOp.createAndFailSilent(newLogZnode, logOffset));
// add ops for deleting
- listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldHlogZnode));
- logQueue.add(hlog);
+ listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalZnode));
+ logQueue.add(wal);
}
// add delete op for peer
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldClusterZnode));
@@ -350,10 +350,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
}
/**
- * This methods copies all the hlogs queues from another region server and returns them all sorted
+ * This methods copies all the wals queues from another region server and returns them all sorted
* per peer cluster (appended with the dead server's znode)
* @param znode server names to copy
- * @return all hlogs for all peers of that cluster, null if an error occurred
+ * @return all wals for all peers of that cluster, null if an error occurred
*/
private SortedMap> copyQueuesFromRS(String znode) {
// TODO this method isn't atomic enough, we could start copying and then
@@ -381,31 +381,31 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
String newCluster = cluster + "-" + znode;
String newClusterZnode = ZKUtil.joinZNode(this.myQueuesZnode, newCluster);
String clusterPath = ZKUtil.joinZNode(nodePath, cluster);
- List hlogs = ZKUtil.listChildrenNoWatch(this.zookeeper, clusterPath);
+ List wals = ZKUtil.listChildrenNoWatch(this.zookeeper, clusterPath);
// That region server didn't have anything to replicate for this cluster
- if (hlogs == null || hlogs.size() == 0) {
+ if (wals == null || wals.size() == 0) {
continue;
}
ZKUtil.createNodeIfNotExistsAndWatch(this.zookeeper, newClusterZnode,
HConstants.EMPTY_BYTE_ARRAY);
SortedSet logQueue = new TreeSet();
queues.put(newCluster, logQueue);
- for (String hlog : hlogs) {
- String z = ZKUtil.joinZNode(clusterPath, hlog);
+ for (String wal : wals) {
+ String z = ZKUtil.joinZNode(clusterPath, wal);
byte[] positionBytes = ZKUtil.getData(this.zookeeper, z);
long position = 0;
try {
- position = ZKUtil.parseHLogPositionFrom(positionBytes);
+ position = ZKUtil.parseWALPositionFrom(positionBytes);
} catch (DeserializationException e) {
- LOG.warn("Failed parse of hlog position from the following znode: " + z
+ LOG.warn("Failed parse of wal position from the following znode: " + z
+ ", Exception: " + e);
}
- LOG.debug("Creating " + hlog + " with data " + position);
- String child = ZKUtil.joinZNode(newClusterZnode, hlog);
+ LOG.debug("Creating " + wal + " with data " + position);
+ String child = ZKUtil.joinZNode(newClusterZnode, wal);
// Position doesn't actually change, we are just deserializing it for
// logging, so just use the already serialized version
ZKUtil.createAndWatch(this.zookeeper, child, positionBytes);
- logQueue.add(hlog);
+ logQueue.add(wal);
}
}
} catch (KeeperException e) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index 31f273e59c9..da0d8b28af1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -1670,7 +1670,7 @@ public class ZKUtil {
if (data != null && data.length > 0) { // log position
long position = 0;
try {
- position = ZKUtil.parseHLogPositionFrom(ZKUtil.getData(zkw, znodeToProcess));
+ position = ZKUtil.parseWALPositionFrom(ZKUtil.getData(zkw, znodeToProcess));
sb.append(position);
} catch (DeserializationException ignored) {
} catch (InterruptedException e) {
@@ -1884,7 +1884,7 @@ public class ZKUtil {
/**
* @param position
* @return Serialized protobuf of position with pb magic prefix prepended suitable
- * for use as content of an hlog position in a replication queue.
+ * for use as content of an wal position in a replication queue.
*/
public static byte[] positionToByteArray(final long position) {
byte[] bytes = ZooKeeperProtos.ReplicationHLogPosition.newBuilder().setPosition(position)
@@ -1893,13 +1893,13 @@ public class ZKUtil {
}
/**
- * @param bytes - Content of a HLog position znode.
- * @return long - The current HLog position.
+ * @param bytes - Content of a WAL position znode.
+ * @return long - The current WAL position.
* @throws DeserializationException
*/
- public static long parseHLogPositionFrom(final byte[] bytes) throws DeserializationException {
+ public static long parseWALPositionFrom(final byte[] bytes) throws DeserializationException {
if (bytes == null) {
- throw new DeserializationException("Unable to parse null HLog position.");
+ throw new DeserializationException("Unable to parse null WAL position.");
}
if (ProtobufUtil.isPBMagicPrefix(bytes)) {
int pblen = ProtobufUtil.lengthOfPBMagic();
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index d4d17c9690b..595325acc2e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -899,7 +899,7 @@ public final class HConstants {
/** File permission umask to use when creating hbase data files */
public static final String DATA_FILE_UMASK_KEY = "hbase.data.umask";
- /** Configuration name of HLog Compression */
+ /** Configuration name of WAL Compression */
public static final String ENABLE_WAL_COMPRESSION =
"hbase.regionserver.wal.enablecompression";
@@ -1031,7 +1031,7 @@ public final class HConstants {
/** Configuration key for the name of the master WAL encryption key for the cluster, a string */
public static final String CRYPTO_WAL_KEY_NAME_CONF_KEY = "hbase.crypto.wal.key.name";
- /** Configuration key for enabling HLog encryption, a boolean */
+ /** Configuration key for enabling WAL encryption, a boolean */
public static final String ENABLE_WAL_ENCRYPTION = "hbase.regionserver.wal.encryption";
/** Configuration key for setting RPC codec class name */
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 22b40ec223c..516fd811c7e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -1135,7 +1135,7 @@ public class KeyValue implements Cell, HeapSize, Cloneable, SettableSequenceId,
/**
* Produces a string map for this key/value pair. Useful for programmatic use
- * and manipulation of the data stored in an HLogKey, for example, printing
+ * and manipulation of the data stored in an WALKey, for example, printing
* as JSON. Values are left out due to their tendency to be large. If needed,
* they can be added manually.
*
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index 90b2c0fae31..b9e96db8b54 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -112,8 +112,8 @@ possible configurations would overwhelm and obscure the important.
hbase.master.logcleaner.pluginsorg.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleanerA comma-separated list of BaseLogCleanerDelegate invoked by
- the LogsCleaner service. These WAL/HLog cleaners are called in order,
- so put the HLog cleaner that prunes the most HLog files in front. To
+ the LogsCleaner service. These WAL cleaners are called in order,
+ so put the cleaner that prunes the most files in front. To
implement your own BaseLogCleanerDelegate, just put it in HBase's classpath
and add the fully qualified class name here. Always add the above
default log cleaners in the list.
@@ -121,7 +121,7 @@ possible configurations would overwhelm and obscure the important.
hbase.master.logcleaner.ttl600000
- Maximum time a HLog can stay in the .oldlogdir directory,
+ Maximum time a WAL can stay in the .oldlogdir directory,
after which it will be cleaned by a Master thread.
@@ -265,12 +265,12 @@ possible configurations would overwhelm and obscure the important.
hbase.regionserver.hlog.reader.implorg.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader
- The HLog file reader implementation.
+ The WAL file reader implementation.hbase.regionserver.hlog.writer.implorg.apache.hadoop.hbase.regionserver.wal.ProtobufLogWriter
- The HLog file writer implementation.
+ The WAL file writer implementation.hbase.master.distributed.log.replay
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
index 2307599c22d..6cf942b2bd1 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java
@@ -48,9 +48,9 @@ public interface MetricsMasterFileSystemSource extends BaseSource {
String SPLIT_SIZE_NAME = "hlogSplitSize";
String META_SPLIT_TIME_DESC = "Time it takes to finish splitMetaLog()";
- String META_SPLIT_SIZE_DESC = "Size of hbase:meta HLog files being split";
- String SPLIT_TIME_DESC = "Time it takes to finish HLog.splitLog()";
- String SPLIT_SIZE_DESC = "Size of HLog files being split";
+ String META_SPLIT_SIZE_DESC = "Size of hbase:meta WAL files being split";
+ String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()";
+ String SPLIT_SIZE_DESC = "Size of WAL files being split";
void updateMetaWALSplitTime(long time);
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index 458ed01910e..7ac2501607f 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -129,10 +129,10 @@ public interface MetricsRegionServerSource extends BaseSource {
String REGION_COUNT_DESC = "Number of regions";
String STORE_COUNT = "storeCount";
String STORE_COUNT_DESC = "Number of Stores";
- String HLOGFILE_COUNT = "hlogFileCount";
- String HLOGFILE_COUNT_DESC = "Number of HLog Files";
- String HLOGFILE_SIZE = "hlogFileSize";
- String HLOGFILE_SIZE_DESC = "Size of all HLog Files";
+ String WALFILE_COUNT = "hlogFileCount";
+ String WALFILE_COUNT_DESC = "Number of WAL Files";
+ String WALFILE_SIZE = "hlogFileSize";
+ String WALFILE_SIZE_DESC = "Size of all WAL Files";
String STOREFILE_COUNT = "storeFileCount";
String STOREFILE_COUNT_DESC = "Number of Store Files";
String MEMSTORE_SIZE = "memStoreSize";
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 513a0db8351..e0b5e1b0d24 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -68,14 +68,14 @@ public interface MetricsRegionServerWrapper {
long getNumStores();
/**
- * Get the number of HLog files of this region server.
+ * Get the number of WAL files of this region server.
*/
- public long getNumHLogFiles();
+ public long getNumWALFiles();
/**
- * Get the size of HLog files of this region server.
+ * Get the size of WAL files of this region server.
*/
- public long getHLogFileSize();
+ public long getWALFileSize();
/**
* Get the number of store files hosted on this region server.
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java
index 793429d7a42..4f8cb36d3d1 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsEditsReplaySource.java
@@ -39,7 +39,7 @@ public interface MetricsEditsReplaySource extends BaseSource {
/**
* Description
*/
- String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog Edits Replay";
+ String METRICS_DESCRIPTION = "Metrics about HBase RegionServer WAL Edits Replay";
/**
* The name of the metrics context that metrics will be under in jmx
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
index 1c59f657139..ba0df80e1d0 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
- * Interface of the source that will export metrics about the region server's HLog.
+ * Interface of the source that will export metrics about the region server's WAL.
*/
public interface MetricsWALSource extends BaseSource {
@@ -39,7 +39,7 @@ public interface MetricsWALSource extends BaseSource {
/**
* Description
*/
- String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog";
+ String METRICS_DESCRIPTION = "Metrics about HBase RegionServer WAL";
/**
* The name of the metrics context that metrics will be under in jmx
@@ -52,11 +52,11 @@ public interface MetricsWALSource extends BaseSource {
String APPEND_COUNT = "appendCount";
String APPEND_COUNT_DESC = "Number of appends to the write ahead log.";
String APPEND_SIZE = "appendSize";
- String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the HLog.";
+ String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the WAL.";
String SLOW_APPEND_COUNT = "slowAppendCount";
String SLOW_APPEND_COUNT_DESC = "Number of appends that were slow.";
String SYNC_TIME = "syncTime";
- String SYNC_TIME_DESC = "The time it took to sync the HLog to HDFS.";
+ String SYNC_TIME_DESC = "The time it took to sync the WAL to HDFS.";
/**
* Add the append size.
@@ -69,7 +69,7 @@ public interface MetricsWALSource extends BaseSource {
void incrementAppendTime(long time);
/**
- * Increment the count of hlog appends
+ * Increment the count of wal appends
*/
void incrementAppendCount();
@@ -79,7 +79,7 @@ public interface MetricsWALSource extends BaseSource {
void incrementSlowAppendCount();
/**
- * Add the time it took to sync the hlog.
+ * Add the time it took to sync the wal.
*/
void incrementSyncTime(long time);
diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsHLogSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsHLogSource.java
deleted file mode 100644
index b2bf1f208f2..00000000000
--- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsHLogSource.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.regionserver.wal;
-
-import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
-import org.junit.Test;
-
-public class TestMetricsHLogSource {
-
- @Test(expected=RuntimeException.class)
- public void testGetInstanceNoHadoopCompat() throws Exception {
- //This should throw an exception because there is no compat lib on the class path.
- CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
-
- }
-}
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index bc1aa079f26..d3414e20cb8 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -168,8 +168,8 @@ public class MetricsRegionServerSourceImpl
if (rsWrap != null) {
mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions())
.addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores())
- .addGauge(Interns.info(HLOGFILE_COUNT, HLOGFILE_COUNT_DESC), rsWrap.getNumHLogFiles())
- .addGauge(Interns.info(HLOGFILE_SIZE, HLOGFILE_SIZE_DESC), rsWrap.getHLogFileSize())
+ .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles())
+ .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize())
.addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles())
.addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemstoreSize())
.addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize())
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
index ad8f24cf263..d602d2f917e 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java
@@ -25,9 +25,10 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
/**
- * Class that transitions metrics from HLog's MetricsWAL into the metrics subsystem.
+ * Class that transitions metrics from MetricsWAL into the metrics subsystem.
*
* Implements BaseSource through BaseSourceImpl, following the pattern.
+ * @see org.apache.hadoop.hbase.regionserver.wal.MetricsWAL
*/
@InterfaceAudience.Private
public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSource {
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
index 9212095b882..66ac62fcc94 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
@@ -26,7 +26,8 @@ import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.io.hfile.HFileReaderV3;
import org.apache.hadoop.hbase.io.hfile.HFileWriterV3;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.wal.WAL.Reader;
+import org.apache.hadoop.hbase.wal.WALProvider.Writer;
import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogReader;
import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
@@ -61,9 +62,9 @@ public class IntegrationTestIngestWithEncryption extends IntegrationTestIngest {
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
- HLog.Reader.class);
+ Reader.class);
conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
- HLog.Writer.class);
+ Writer.class);
conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
}
super.setUpCluster();
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index 12adc80dc04..f64528b02d8 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -82,7 +82,7 @@ import com.google.common.base.Objects;
*
*
* Load Test Tool.
- * This runs so that all RegionServers will have some load and HLogs will be full.
+ * This runs so that all RegionServers will have some load and WALs will be full.
*
*
* Scan thread.
@@ -151,7 +151,7 @@ public class IntegrationTestMTTR {
private static Action restartMasterAction;
/**
- * The load test tool used to create load and make sure that HLogs aren't empty.
+ * The load test tool used to create load and make sure that WALs aren't empty.
*/
private static LoadTestTool loadTool;
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
index 9ffe0fdce66..382874289df 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
@@ -16956,6 +16956,12 @@ public final class AdminProtos {
}
/**
* Protobuf type {@code RollWALWriterResponse}
+ *
+ *
+ *
+ * Roll request responses no longer include regions to flush
+ * this list will always be empty when talking to a 1.0 server
+ *
*/
public static final class RollWALWriterResponse extends
com.google.protobuf.GeneratedMessage
@@ -17241,6 +17247,12 @@ public final class AdminProtos {
}
/**
* Protobuf type {@code RollWALWriterResponse}
+ *
+ *
+ *
+ * Roll request responses no longer include regions to flush
+ * this list will always be empty when talking to a 1.0 server
+ *
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java
index 19a8d74a0bb..ec169d54f2f 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java
@@ -4496,7 +4496,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
boolean hasLastFlushedSequenceId();
@@ -4504,7 +4504,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
long getLastFlushedSequenceId();
@@ -4612,7 +4612,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
public boolean hasLastFlushedSequenceId() {
@@ -4622,7 +4622,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
public long getLastFlushedSequenceId() {
@@ -4908,7 +4908,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
public boolean hasLastFlushedSequenceId() {
@@ -4918,7 +4918,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
public long getLastFlushedSequenceId() {
@@ -4928,7 +4928,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
public Builder setLastFlushedSequenceId(long value) {
@@ -4941,7 +4941,7 @@ public final class RegionServerStatusProtos {
* required uint64 last_flushed_sequence_id = 1;
*
*
- ** the last HLog sequence id flushed from MemStore to HFile for the region
+ * the last WAL sequence id flushed from MemStore to HFile for the region
*
*/
public Builder clearLastFlushedSequenceId() {
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
index af61d47d6b5..977db42bbd0 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
@@ -1242,7 +1242,9 @@ public final class WALProtos {
* Protobuf type {@code WALKey}
*
*
- * Protocol buffer version of HLogKey; see HLogKey comment, not really a key but WALEdit header for some KVs
+ *
+ * Protocol buffer version of WALKey; see WALKey comment, not really a key but WALEdit header
+ * for some KVs
*
*/
public static final class WALKey extends
@@ -2033,7 +2035,9 @@ public final class WALProtos {
* Protobuf type {@code WALKey}
*
*
- * Protocol buffer version of HLogKey; see HLogKey comment, not really a key but WALEdit header for some KVs
+ *
+ * Protocol buffer version of WALKey; see WALKey comment, not really a key but WALEdit header
+ * for some KVs
*
*/
public static final class Builder extends
@@ -10021,8 +10025,10 @@ public final class WALProtos {
*
*
**
- * A trailer that is appended to the end of a properly closed HLog WAL file.
+ * A trailer that is appended to the end of a properly closed WAL file.
* If missing, this is either a legacy or a corrupted WAL file.
+ * N.B. This trailer currently doesn't contain any information and we
+ * purposefully don't expose it in the WAL APIs. It's for future growth.
*
*/
public static final class WALTrailer extends
@@ -10246,8 +10252,10 @@ public final class WALProtos {
*
*
**
- * A trailer that is appended to the end of a properly closed HLog WAL file.
+ * A trailer that is appended to the end of a properly closed WAL file.
* If missing, this is either a legacy or a corrupted WAL file.
+ * N.B. This trailer currently doesn't contain any information and we
+ * purposefully don't expose it in the WAL APIs. It's for future growth.
*
*/
public static final class Builder extends
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index d46bc1b09c0..5a1fbf13bed 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -6020,7 +6020,7 @@ public final class ZooKeeperProtos {
*
*
**
- * Used by replication. Holds the current position in an HLog file.
+ * Used by replication. Holds the current position in an WAL file.
*
*/
public static final class ReplicationHLogPosition extends
@@ -6288,7 +6288,7 @@ public final class ZooKeeperProtos {
*
*
**
- * Used by replication. Holds the current position in an HLog file.
+ * Used by replication. Holds the current position in an WAL file.
*
*/
public static final class Builder extends
diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto
index ec34e72546b..fcc4e1d0248 100644
--- a/hbase-protocol/src/main/protobuf/Admin.proto
+++ b/hbase-protocol/src/main/protobuf/Admin.proto
@@ -204,6 +204,10 @@ message ReplicateWALEntryResponse {
message RollWALWriterRequest {
}
+/*
+ * Roll request responses no longer include regions to flush
+ * this list will always be empty when talking to a 1.0 server
+ */
message RollWALWriterResponse {
// A list of encoded name of regions to flush
repeated bytes region_to_flush = 1;
diff --git a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto
index df9a521121c..75e5ae4903b 100644
--- a/hbase-protocol/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol/src/main/protobuf/RegionServerStatus.proto
@@ -74,7 +74,7 @@ message GetLastFlushedSequenceIdRequest {
}
message GetLastFlushedSequenceIdResponse {
- /** the last HLog sequence id flushed from MemStore to HFile for the region */
+ /* the last WAL sequence id flushed from MemStore to HFile for the region */
required uint64 last_flushed_sequence_id = 1;
}
diff --git a/hbase-protocol/src/main/protobuf/WAL.proto b/hbase-protocol/src/main/protobuf/WAL.proto
index dae92d2eabd..f8a15344dca 100644
--- a/hbase-protocol/src/main/protobuf/WAL.proto
+++ b/hbase-protocol/src/main/protobuf/WAL.proto
@@ -31,7 +31,10 @@ message WALHeader {
optional string cell_codec_cls_name = 5;
}
-// Protocol buffer version of HLogKey; see HLogKey comment, not really a key but WALEdit header for some KVs
+/*
+ * Protocol buffer version of WALKey; see WALKey comment, not really a key but WALEdit header
+ * for some KVs
+ */
message WALKey {
required bytes encoded_region_name = 1;
required bytes table_name = 2;
@@ -144,8 +147,10 @@ message RegionEventDescriptor {
}
/**
- * A trailer that is appended to the end of a properly closed HLog WAL file.
+ * A trailer that is appended to the end of a properly closed WAL file.
* If missing, this is either a legacy or a corrupted WAL file.
+ * N.B. This trailer currently doesn't contain any information and we
+ * purposefully don't expose it in the WAL APIs. It's for future growth.
*/
message WALTrailer {
}
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index 9512bd69a28..bac881bc1f2 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -128,7 +128,7 @@ message ReplicationState {
}
/**
- * Used by replication. Holds the current position in an HLog file.
+ * Used by replication. Holds the current position in an WAL file.
*/
message ReplicationHLogPosition {
required int64 position = 1;
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index d09c4bf4b64..c496bf66584 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -40,7 +40,7 @@ java.lang.management.ManagementFactory;
%def>
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java
index b9614b4ef4c..f1a8c59d36c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogCounters.java
@@ -32,7 +32,7 @@ public class SplitLogCounters {
public final static AtomicLong tot_mgr_log_split_batch_start = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_batch_success = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_batch_err = new AtomicLong(0);
- public final static AtomicLong tot_mgr_new_unexpected_hlogs = new AtomicLong(0);
+ public final static AtomicLong tot_mgr_new_unexpected_wals = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_start = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_success = new AtomicLong(0);
public final static AtomicLong tot_mgr_log_split_err = new AtomicLong(0);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
index bb7a14e8e2d..e0caf321029 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/SplitLogTask.java
@@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.util.Bytes;
import com.google.protobuf.InvalidProtocolBufferException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
index ed3b2320607..63990ef4a00 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/SplitLogWorkerCoordination.java
@@ -29,12 +29,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSeq
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
-import org.apache.hadoop.hbase.regionserver.handler.HLogSplitterHandler;
+import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler;
import com.google.common.annotations.VisibleForTesting;
/**
- * Coordinated operations for {@link SplitLogWorker} and {@link HLogSplitterHandler} Important
+ * Coordinated operations for {@link SplitLogWorker} and {@link WALSplitterHandler} Important
* methods for SplitLogWorker:
* {@link #isReady()} called from {@link SplitLogWorker#run()} to check whether the coordination is
* ready to supply the tasks
@@ -44,7 +44,7 @@ import com.google.common.annotations.VisibleForTesting;
* for external changes in coordination (if required)
* {@link #endTask(SplitLogTask, AtomicLong, SplitTaskDetails)} notify coordination engine that
*
- * Important methods for HLogSplitterHandler:
+ * Important methods for WALSplitterHandler:
* splitting task has completed.
*/
@InterfaceAudience.Private
@@ -112,7 +112,7 @@ public interface SplitLogWorkerCoordination {
*/
void removeListener();
- /* HLogSplitterHandler part */
+ /* WALSplitterHandler part */
/**
* Notify coordination engine that splitting task has completed.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 0f8baa3691f..b67a1c49ee9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -54,8 +54,8 @@ import org.apache.hadoop.hbase.master.SplitLogManager.Task;
import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
-import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -115,7 +115,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
@Override
public Status finish(ServerName workerName, String logfile) {
try {
- HLogSplitter.finishSplitLogFile(logfile, manager.getServer().getConfiguration());
+ WALSplitter.finishSplitLogFile(logfile, manager.getServer().getConfiguration());
} catch (IOException e) {
LOG.warn("Could not finish splitting of log file " + logfile, e);
return Status.ERR;
@@ -716,7 +716,7 @@ public class ZKSplitLogManagerCoordination extends ZooKeeperListener implements
}
// decode the file name
t = ZKSplitLog.getFileName(t);
- ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(new Path(t));
+ ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(new Path(t));
if (serverName != null) {
knownFailedServers.add(serverName.getServerName());
} else {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
index 2945565a586..0e14618d9a5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZkSplitLogWorkerCoordination.java
@@ -46,8 +46,8 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.RegionServerServices;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
-import org.apache.hadoop.hbase.regionserver.handler.HLogSplitterHandler;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.regionserver.handler.WALSplitterHandler;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
@@ -318,8 +318,8 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
splitTaskDetails.setTaskNode(curTask);
splitTaskDetails.setCurTaskZKVersion(zkVersion);
- HLogSplitterHandler hsh =
- new HLogSplitterHandler(server, this, splitTaskDetails, reporter,
+ WALSplitterHandler hsh =
+ new WALSplitterHandler(server, this, splitTaskDetails, reporter,
this.tasksInProgress, splitTaskExecutor, mode);
server.getExecutorService().submit(hsh);
}
@@ -418,7 +418,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
// pick meta wal firstly
int offset = (int) (Math.random() * paths.size());
for (int i = 0; i < paths.size(); i++) {
- if (HLogUtil.isMetaFile(paths.get(i))) {
+ if (DefaultWALProvider.isMetaFile(paths.get(i))) {
offset = i;
break;
}
@@ -581,7 +581,7 @@ public class ZkSplitLogWorkerCoordination extends ZooKeeperListener implements
}
/*
- * Next part is related to HLogSplitterHandler
+ * Next part is related to WALSplitterHandler
*/
/**
* endTask() can fail and the only way to recover out of it is for the {@link SplitLogManager} to
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index 8b26eea71ea..215ff16696b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Pair;
@@ -444,14 +445,32 @@ public abstract class BaseRegionObserver implements RegionObserver {
final InternalScanner s) throws IOException {
}
+ /**
+ * Implementers should override this version of the method and leave the deprecated one as-is.
+ */
+ @Override
+ public void preWALRestore(ObserverContext extends RegionCoprocessorEnvironment> env,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
+ }
+
@Override
public void preWALRestore(ObserverContext env, HRegionInfo info,
HLogKey logKey, WALEdit logEdit) throws IOException {
+ preWALRestore(env, info, (WALKey)logKey, logEdit);
+ }
+
+ /**
+ * Implementers should override this version of the method and leave the deprecated one as-is.
+ */
+ @Override
+ public void postWALRestore(ObserverContext extends RegionCoprocessorEnvironment> env,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
}
@Override
public void postWALRestore(ObserverContext env,
HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException {
+ postWALRestore(env, info, (WALKey)logKey, logEdit);
}
@Override
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java
index 0836da93747..cfddcd4d4d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseWALObserver.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
/**
@@ -42,13 +43,31 @@ public class BaseWALObserver implements WALObserver {
@Override
public void stop(CoprocessorEnvironment e) throws IOException { }
+ /**
+ * Implementers should override this method and leave the deprecated version as-is.
+ */
@Override
- public boolean preWALWrite(ObserverContext ctx, HRegionInfo info,
- HLogKey logKey, WALEdit logEdit) throws IOException {
+ public boolean preWALWrite(ObserverContext extends WALCoprocessorEnvironment> ctx,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
return false;
}
+ @Override
+ public boolean preWALWrite(ObserverContext ctx, HRegionInfo info,
+ HLogKey logKey, WALEdit logEdit) throws IOException {
+ return preWALWrite(ctx, info, (WALKey)logKey, logEdit);
+ }
+
+ /**
+ * Implementers should override this method and leave the deprecated version as-is.
+ */
+ @Override
+ public void postWALWrite(ObserverContext extends WALCoprocessorEnvironment> ctx,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException { }
+
@Override
public void postWALWrite(ObserverContext ctx, HRegionInfo info,
- HLogKey logKey, WALEdit logEdit) throws IOException { }
+ HLogKey logKey, WALEdit logEdit) throws IOException {
+ postWALWrite(ctx, info, (WALKey)logKey, logEdit);
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index 17fcabc58b7..f819fbc8ece 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -29,6 +29,7 @@ import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.UUID;
+import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.atomic.AtomicInteger;
@@ -557,4 +558,77 @@ public abstract class CoprocessorHost {
"coprocessor set.", e);
}
}
+
+ /**
+ * Used to gracefully handle fallback to deprecated methods when we
+ * evolve coprocessor APIs.
+ *
+ * When a particular Coprocessor API is updated to change methods, hosts can support fallback
+ * to the deprecated API by using this method to determine if an instance implements the new API.
+ * In the event that said support is partial, then in the face of a runtime issue that prevents
+ * proper operation {@link #legacyWarning(Class, String)} should be used to let operators know.
+ *
+ * For examples of this in action, see the implementation of
+ *
+ *
+ * @param clazz Coprocessor you wish to evaluate
+ * @param methodName the name of the non-deprecated method version
+ * @param parameterTypes the Class of the non-deprecated method's arguments in the order they are
+ * declared.
+ */
+ @InterfaceAudience.Private
+ protected static boolean useLegacyMethod(final Class extends Coprocessor> clazz,
+ final String methodName, final Class>... parameterTypes) {
+ boolean useLegacy;
+ // Use reflection to see if they implement the non-deprecated version
+ try {
+ clazz.getDeclaredMethod(methodName, parameterTypes);
+ LOG.debug("Found an implementation of '" + methodName + "' that uses updated method " +
+ "signature. Skipping legacy support for invocations in '" + clazz +"'.");
+ useLegacy = false;
+ } catch (NoSuchMethodException exception) {
+ useLegacy = true;
+ } catch (SecurityException exception) {
+ LOG.warn("The Security Manager denied our attempt to detect if the coprocessor '" + clazz +
+ "' requires legacy support; assuming it does. If you get later errors about legacy " +
+ "coprocessor use, consider updating your security policy to allow access to the package" +
+ " and declared members of your implementation.");
+ LOG.debug("Details of Security Manager rejection.", exception);
+ useLegacy = true;
+ }
+ return useLegacy;
+ }
+
+ /**
+ * Used to limit legacy handling to once per Coprocessor class per classloader.
+ */
+ private static final Set> legacyWarning =
+ new ConcurrentSkipListSet>(
+ new Comparator>() {
+ @Override
+ public int compare(Class extends Coprocessor> c1, Class extends Coprocessor> c2) {
+ if (c1.equals(c2)) {
+ return 0;
+ }
+ return c1.getName().compareTo(c2.getName());
+ }
+ });
+
+ /**
+ * limits the amount of logging to once per coprocessor class.
+ * Used in concert with {@link #useLegacyMethod(Class, String, Class[])} when a runtime issue
+ * prevents properly supporting the legacy version of a coprocessor API.
+ * Since coprocessors can be in tight loops this serves to limit the amount of log spam we create.
+ */
+ @InterfaceAudience.Private
+ protected void legacyWarning(final Class extends Coprocessor> clazz, final String message) {
+ if(legacyWarning.add(clazz)) {
+ LOG.error("You have a legacy coprocessor loaded and there are events we can't map to the " +
+ " deprecated API. Your coprocessor will not see these events. Please update '" + clazz +
+ "'. Details of the problem: " + message);
+ }
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index e526d63278c..ee43cba03f4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Pair;
@@ -67,6 +68,9 @@ import com.google.common.collect.ImmutableList;
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
+// TODO as method signatures need to break, update to
+// ObserverContext extends RegionCoprocessorEnvironment>
+// so we can use additional environment state that isn't exposed to coprocessors.
public interface RegionObserver extends Coprocessor {
/** Mutation type for postMutationBeforeWAL hook */
@@ -1109,26 +1113,62 @@ public interface RegionObserver extends Coprocessor {
/**
* Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
* replayed for this region.
- *
- * @param ctx
- * @param info
- * @param logKey
- * @param logEdit
- * @throws IOException
*/
+ void preWALRestore(final ObserverContext extends RegionCoprocessorEnvironment> ctx,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+ /**
+ * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+ * replayed for this region.
+ *
+ * This method is left in place to maintain binary compatibility with older
+ * {@link RegionObserver}s. If an implementation directly overrides
+ * {@link #preWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+ * won't be called at all, barring problems with the Security Manager. To work correctly
+ * in the presence of a strict Security Manager, or in the case of an implementation that
+ * relies on a parent class to implement preWALRestore, you should implement this method
+ * as a call to the non-deprecated version.
+ *
+ * Users of this method will see all edits that can be treated as HLogKey. If there are
+ * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+ * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+ * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+ * classloader.
+ *
+ * @deprecated use {@link #preWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)}
+ */
+ @Deprecated
void preWALRestore(final ObserverContext ctx,
HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
/**
* Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
* replayed for this region.
- *
- * @param ctx
- * @param info
- * @param logKey
- * @param logEdit
- * @throws IOException
*/
+ void postWALRestore(final ObserverContext extends RegionCoprocessorEnvironment> ctx,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+ /**
+ * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+ * replayed for this region.
+ *
+ * This method is left in place to maintain binary compatibility with older
+ * {@link RegionObserver}s. If an implementation directly overrides
+ * {@link #postWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+ * won't be called at all, barring problems with the Security Manager. To work correctly
+ * in the presence of a strict Security Manager, or in the case of an implementation that
+ * relies on a parent class to implement preWALRestore, you should implement this method
+ * as a call to the non-deprecated version.
+ *
+ * Users of this method will see all edits that can be treated as HLogKey. If there are
+ * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+ * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+ * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+ * classloader.
+ *
+ * @deprecated use {@link #postWALRestore(ObserverContext, HRegionInfo, WALKey, WALEdit)}
+ */
+ @Deprecated
void postWALRestore(final ObserverContext ctx,
HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
index d16eed80a8a..a4ce5f1a0bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALCoprocessorEnvironment.java
@@ -23,11 +23,11 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.CoprocessorEnvironment;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
+import org.apache.hadoop.hbase.wal.WAL;
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
public interface WALCoprocessorEnvironment extends CoprocessorEnvironment {
- /** @return reference to the region server services */
- HLog getWAL();
+ /** @return reference to the region server's WAL */
+ WAL getWAL();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java
index 49d84ed6327..bba83cc3838 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/WALObserver.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hbase.Coprocessor;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import java.io.IOException;
@@ -40,7 +41,7 @@ import java.io.IOException;
* hooks for adding logic for WALEdits in the region context during reconstruction,
*
* Defines coprocessor hooks for interacting with operations on the
- * {@link org.apache.hadoop.hbase.regionserver.wal.HLog}.
+ * {@link org.apache.hadoop.hbase.wal.WAL}.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
@InterfaceStability.Evolving
@@ -50,27 +51,65 @@ public interface WALObserver extends Coprocessor {
* Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
* is writen to WAL.
*
- * @param ctx
- * @param info
- * @param logKey
- * @param logEdit
* @return true if default behavior should be bypassed, false otherwise
- * @throws IOException
*/
// TODO: return value is not used
+ boolean preWALWrite(ObserverContext extends WALCoprocessorEnvironment> ctx,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+ /**
+ * Called before a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+ * is writen to WAL.
+ *
+ * This method is left in place to maintain binary compatibility with older
+ * {@link WALObserver}s. If an implementation directly overrides
+ * {@link #preWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+ * won't be called at all, barring problems with the Security Manager. To work correctly
+ * in the presence of a strict Security Manager, or in the case of an implementation that
+ * relies on a parent class to implement preWALWrite, you should implement this method
+ * as a call to the non-deprecated version.
+ *
+ * Users of this method will see all edits that can be treated as HLogKey. If there are
+ * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+ * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+ * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+ * classloader.
+ *
+ * @return true if default behavior should be bypassed, false otherwise
+ * @deprecated use {@link #preWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)}
+ */
+ @Deprecated
boolean preWALWrite(ObserverContext ctx,
HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
/**
* Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
* is writen to WAL.
- *
- * @param ctx
- * @param info
- * @param logKey
- * @param logEdit
- * @throws IOException
*/
+ void postWALWrite(ObserverContext extends WALCoprocessorEnvironment> ctx,
+ HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException;
+
+ /**
+ * Called after a {@link org.apache.hadoop.hbase.regionserver.wal.WALEdit}
+ * is writen to WAL.
+ *
+ * This method is left in place to maintain binary compatibility with older
+ * {@link WALObserver}s. If an implementation directly overrides
+ * {@link #postWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)} then this version
+ * won't be called at all, barring problems with the Security Manager. To work correctly
+ * in the presence of a strict Security Manager, or in the case of an implementation that
+ * relies on a parent class to implement preWALWrite, you should implement this method
+ * as a call to the non-deprecated version.
+ *
+ * Users of this method will see all edits that can be treated as HLogKey. If there are
+ * edits that can't be treated as HLogKey they won't be offered to coprocessors that rely
+ * on this method. If a coprocessor gets skipped because of this mechanism, a log message
+ * at ERROR will be generated per coprocessor on the logger for {@link CoprocessorHost} once per
+ * classloader.
+ *
+ * @deprecated use {@link #postWALWrite(ObserverContext, HRegionInfo, WALKey, WALEdit)}
+ */
+ @Deprecated
void postWALWrite(ObserverContext ctx,
HRegionInfo info, HLogKey logKey, WALEdit logEdit) throws IOException;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
index f8cf7b30ccf..fb5836055d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -54,8 +54,8 @@ import org.apache.hadoop.util.ReflectionUtils;
/**
* An encapsulation for the FileSystem object that hbase uses to access
* data. This class allows the flexibility of using
- * separate filesystem objects for reading and writing hfiles and hlogs.
- * In future, if we want to make hlogs be in a different filesystem,
+ * separate filesystem objects for reading and writing hfiles and wals.
+ * In future, if we want to make wals be in a different filesystem,
* this is the place to make it happen.
*/
public class HFileSystem extends FilterFileSystem {
@@ -322,7 +322,7 @@ public class HFileSystem extends FilterFileSystem {
}
/**
- * We're putting at lowest priority the hlog files blocks that are on the same datanode
+ * We're putting at lowest priority the wal files blocks that are on the same datanode
* as the original regionserver which created these files. This because we fear that the
* datanode is actually dead, so if we use it it will timeout.
*/
@@ -330,17 +330,17 @@ public class HFileSystem extends FilterFileSystem {
public void reorderBlocks(Configuration conf, LocatedBlocks lbs, String src)
throws IOException {
- ServerName sn = HLogUtil.getServerNameFromHLogDirectoryName(conf, src);
+ ServerName sn = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, src);
if (sn == null) {
- // It's not an HLOG
+ // It's not an WAL
return;
}
- // Ok, so it's an HLog
+ // Ok, so it's an WAL
String hostName = sn.getHostname();
if (LOG.isTraceEnabled()) {
LOG.trace(src +
- " is an HLog file, so reordering blocks, last hostname will be:" + hostName);
+ " is an WAL file, so reordering blocks, last hostname will be:" + hostName);
}
// Just check for all blocks
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
deleted file mode 100644
index e62eb141960..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HLogLink.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.io;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.util.FSUtils;
-
-/**
- * HLogLink describes a link to a WAL.
- *
- * An hlog can be in /hbase/.logs//
- * or it can be in /hbase/.oldlogs/
- *
- * The link checks first in the original path,
- * if it is not present it fallbacks to the archived path.
- */
-@InterfaceAudience.Private
-public class HLogLink extends FileLink {
- /**
- * @param conf {@link Configuration} from which to extract specific archive locations
- * @param serverName Region Server owner of the log
- * @param logName WAL file name
- * @throws IOException on unexpected error.
- */
- public HLogLink(final Configuration conf,
- final String serverName, final String logName) throws IOException {
- this(FSUtils.getRootDir(conf), serverName, logName);
- }
-
- /**
- * @param rootDir Path to the root directory where hbase files are stored
- * @param serverName Region Server owner of the log
- * @param logName WAL file name
- */
- public HLogLink(final Path rootDir, final String serverName, final String logName) {
- final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- final Path logDir = new Path(new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), serverName);
- setLocations(new Path(logDir, logName), new Path(oldLogDir, logName));
- }
-
- /**
- * @param originPath Path to the wal in the log directory
- * @param archivePath Path to the wal in the archived log directory
- */
- public HLogLink(final Path originPath, final Path archivePath) {
- setLocations(originPath, archivePath);
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 2c0efc8356d..e35071e9fbc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -186,12 +186,12 @@ public class HFileOutputFormat2
rollWriters();
}
- // create a new HLog writer, if necessary
+ // create a new WAL writer, if necessary
if (wl == null || wl.writer == null) {
wl = getNewWriter(family, conf);
}
- // we now have the proper HLog writer. full steam ahead
+ // we now have the proper WAL writer. full steam ahead
kv.updateLatestStamp(this.now);
wl.writer.append(kv);
wl.written += length;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
index 4f604f86f86..4ed0672ee26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HLogInputFormat.java
@@ -17,26 +17,15 @@
*/
package org.apache.hadoop.hbase.mapreduce;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.EOFException;
import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
+import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
@@ -44,227 +33,51 @@ import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
/**
- * Simple {@link InputFormat} for {@link HLog} files.
+ * Simple {@link InputFormat} for {@link WAL} files.
+ * @deprecated use {@link WALInputFormat}
*/
+@Deprecated
@InterfaceAudience.Public
public class HLogInputFormat extends InputFormat {
private static final Log LOG = LogFactory.getLog(HLogInputFormat.class);
-
public static final String START_TIME_KEY = "hlog.start.time";
public static final String END_TIME_KEY = "hlog.end.time";
- /**
- * {@link InputSplit} for {@link HLog} files. Each split represent
- * exactly one log file.
- */
- static class HLogSplit extends InputSplit implements Writable {
- private String logFileName;
- private long fileSize;
- private long startTime;
- private long endTime;
-
- /** for serialization */
- public HLogSplit() {}
-
- /**
- * Represent an HLogSplit, i.e. a single HLog file.
- * Start- and EndTime are managed by the split, so that HLog files can be
- * filtered before WALEdits are passed to the mapper(s).
- * @param logFileName
- * @param fileSize
- * @param startTime
- * @param endTime
- */
- public HLogSplit(String logFileName, long fileSize, long startTime, long endTime) {
- this.logFileName = logFileName;
- this.fileSize = fileSize;
- this.startTime = startTime;
- this.endTime = endTime;
- }
-
- @Override
- public long getLength() throws IOException, InterruptedException {
- return fileSize;
- }
-
- @Override
- public String[] getLocations() throws IOException, InterruptedException {
- // TODO: Find the data node with the most blocks for this HLog?
- return new String[] {};
- }
-
- public String getLogFileName() {
- return logFileName;
- }
-
- public long getStartTime() {
- return startTime;
- }
-
- public long getEndTime() {
- return endTime;
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- logFileName = in.readUTF();
- fileSize = in.readLong();
- startTime = in.readLong();
- endTime = in.readLong();
- }
-
- @Override
- public void write(DataOutput out) throws IOException {
- out.writeUTF(logFileName);
- out.writeLong(fileSize);
- out.writeLong(startTime);
- out.writeLong(endTime);
- }
-
- @Override
- public String toString() {
- return logFileName + " (" + startTime + ":" + endTime + ") length:" + fileSize;
- }
- }
+ // Delegate to WALInputFormat for implementation.
+ private final WALInputFormat delegate = new WALInputFormat();
/**
- * {@link RecordReader} for an {@link HLog} file.
+ * {@link RecordReader} that pulls out the legacy HLogKey format directly.
*/
- static class HLogRecordReader extends RecordReader {
- private HLog.Reader reader = null;
- private HLog.Entry currentEntry = new HLog.Entry();
- private long startTime;
- private long endTime;
-
- @Override
- public void initialize(InputSplit split, TaskAttemptContext context)
- throws IOException, InterruptedException {
- HLogSplit hsplit = (HLogSplit)split;
- Path logFile = new Path(hsplit.getLogFileName());
- Configuration conf = context.getConfiguration();
- LOG.info("Opening reader for "+split);
- try {
- this.reader = HLogFactory.createReader(logFile.getFileSystem(conf),
- logFile, conf);
- } catch (EOFException x) {
- LOG.info("Ignoring corrupted HLog file: " + logFile
- + " (This is normal when a RegionServer crashed.)");
- }
- this.startTime = hsplit.getStartTime();
- this.endTime = hsplit.getEndTime();
- }
-
- @Override
- public boolean nextKeyValue() throws IOException, InterruptedException {
- if (reader == null) return false;
-
- HLog.Entry temp;
- long i = -1;
- do {
- // skip older entries
- try {
- temp = reader.next(currentEntry);
- i++;
- } catch (EOFException x) {
- LOG.info("Corrupted entry detected. Ignoring the rest of the file."
- + " (This is normal when a RegionServer crashed.)");
- return false;
- }
- }
- while(temp != null && temp.getKey().getWriteTime() < startTime);
-
- if (temp == null) {
- if (i > 0) LOG.info("Skipped " + i + " entries.");
- LOG.info("Reached end of file.");
- return false;
- } else if (i > 0) {
- LOG.info("Skipped " + i + " entries, until ts: " + temp.getKey().getWriteTime() + ".");
- }
- boolean res = temp.getKey().getWriteTime() <= endTime;
- if (!res) {
- LOG.info("Reached ts: " + temp.getKey().getWriteTime() + " ignoring the rest of the file.");
- }
- return res;
- }
-
+ static class HLogKeyRecordReader extends WALInputFormat.WALRecordReader {
@Override
public HLogKey getCurrentKey() throws IOException, InterruptedException {
- return currentEntry.getKey();
- }
-
- @Override
- public WALEdit getCurrentValue() throws IOException, InterruptedException {
- return currentEntry.getEdit();
- }
-
- @Override
- public float getProgress() throws IOException, InterruptedException {
- // N/A depends on total number of entries, which is unknown
- return 0;
- }
-
- @Override
- public void close() throws IOException {
- LOG.info("Closing reader");
- if (reader != null) this.reader.close();
+ if (!(currentEntry.getKey() instanceof HLogKey)) {
+ final IllegalStateException exception = new IllegalStateException(
+ "HLogInputFormat only works when given entries that have HLogKey for keys. This" +
+ " one had '" + currentEntry.getKey().getClass() + "'");
+ LOG.error("The deprecated HLogInputFormat has to work with the deprecated HLogKey class, " +
+ " but HBase internals read the wal entry using some other class." +
+ " This is a bug; please file an issue or email the developer mailing list. It is " +
+ "likely that you would not have this problem if you updated to use WALInputFormat. " +
+ "You will need the following exception details when seeking help from the HBase " +
+ "community.",
+ exception);
+ throw exception;
+ }
+ return (HLogKey)currentEntry.getKey();
}
}
@Override
public List getSplits(JobContext context) throws IOException,
InterruptedException {
- Configuration conf = context.getConfiguration();
- Path inputDir = new Path(conf.get("mapreduce.input.fileinputformat.inputdir"));
-
- long startTime = conf.getLong(START_TIME_KEY, Long.MIN_VALUE);
- long endTime = conf.getLong(END_TIME_KEY, Long.MAX_VALUE);
-
- FileSystem fs = inputDir.getFileSystem(conf);
- List files = getFiles(fs, inputDir, startTime, endTime);
-
- List splits = new ArrayList(files.size());
- for (FileStatus file : files) {
- splits.add(new HLogSplit(file.getPath().toString(), file.getLen(), startTime, endTime));
- }
- return splits;
- }
-
- private List getFiles(FileSystem fs, Path dir, long startTime, long endTime)
- throws IOException {
- List result = new ArrayList();
- LOG.debug("Scanning " + dir.toString() + " for HLog files");
-
- FileStatus[] files = fs.listStatus(dir);
- if (files == null) return Collections.emptyList();
- for (FileStatus file : files) {
- if (file.isDirectory()) {
- // recurse into sub directories
- result.addAll(getFiles(fs, file.getPath(), startTime, endTime));
- } else {
- String name = file.getPath().toString();
- int idx = name.lastIndexOf('.');
- if (idx > 0) {
- try {
- long fileStartTime = Long.parseLong(name.substring(idx+1));
- if (fileStartTime <= endTime) {
- LOG.info("Found: " + name);
- result.add(file);
- }
- } catch (NumberFormatException x) {
- idx = 0;
- }
- }
- if (idx == 0) {
- LOG.warn("File " + name + " does not appear to be an HLog file. Skipping...");
- }
- }
- }
- return result;
+ return delegate.getSplits(context, START_TIME_KEY, END_TIME_KEY);
}
@Override
public RecordReader createRecordReader(InputSplit split,
TaskAttemptContext context) throws IOException, InterruptedException {
- return new HLogRecordReader();
+ return new HLogKeyRecordReader();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
index c1d837358e6..62a9626c926 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
*
*
*
- * Write-ahead logging (HLog) for Puts can be disabled by setting
+ * Write-ahead logging (WAL) for Puts can be disabled by setting
* {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}.
* Note that disabling write-ahead logging is only appropriate for jobs where
* loss of data due to region server failure can be tolerated (for example,
@@ -61,7 +61,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class MultiTableOutputFormat extends OutputFormat {
- /** Set this to {@link #WAL_OFF} to turn off write-ahead logging (HLog) */
+ /** Set this to {@link #WAL_OFF} to turn off write-ahead logging (WAL) */
public static final String WAL_PROPERTY = "hbase.mapreduce.multitableoutputformat.wal";
/** Property value to use write-ahead logging */
public static final boolean WAL_ON = true;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
index 4d451a47c5c..79d52617c28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
@@ -49,7 +49,7 @@ import com.google.common.annotations.VisibleForTesting;
/**
* TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job
* bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits,
- * hlogs, etc) directly to provide maximum performance. The snapshot is not required to be
+ * wals, etc) directly to provide maximum performance. The snapshot is not required to be
* restored to the live cluster or cloned. This also allows to run the mapreduce job from an
* online or offline hbase cluster. The snapshot files can be exported by using the
* {@link ExportSnapshot} tool, to a pure-hdfs cluster, and this InputFormat can be used to
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index cf9dc56adb5..26fab5a6517 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -23,6 +23,8 @@ import java.text.SimpleDateFormat;
import java.util.Map;
import java.util.TreeMap;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -39,7 +41,7 @@ import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.mapreduce.Job;
@@ -63,11 +65,21 @@ import org.apache.hadoop.util.ToolRunner;
@InterfaceAudience.Public
@InterfaceStability.Stable
public class WALPlayer extends Configured implements Tool {
+ final static Log LOG = LogFactory.getLog(WALPlayer.class);
final static String NAME = "WALPlayer";
- final static String BULK_OUTPUT_CONF_KEY = "hlog.bulk.output";
- final static String HLOG_INPUT_KEY = "hlog.input.dir";
- final static String TABLES_KEY = "hlog.input.tables";
- final static String TABLE_MAP_KEY = "hlog.input.tablesmap";
+ final static String BULK_OUTPUT_CONF_KEY = "wal.bulk.output";
+ final static String TABLES_KEY = "wal.input.tables";
+ final static String TABLE_MAP_KEY = "wal.input.tablesmap";
+
+ // This relies on Hadoop Configuration to handle warning about deprecated configs and
+ // to set the correct non-deprecated configs when an old one shows up.
+ static {
+ Configuration.addDeprecation("hlog.bulk.output", BULK_OUTPUT_CONF_KEY);
+ Configuration.addDeprecation("hlog.input.tables", TABLES_KEY);
+ Configuration.addDeprecation("hlog.input.tablesmap", TABLE_MAP_KEY);
+ Configuration.addDeprecation(HLogInputFormat.START_TIME_KEY, WALInputFormat.START_TIME_KEY);
+ Configuration.addDeprecation(HLogInputFormat.END_TIME_KEY, WALInputFormat.END_TIME_KEY);
+ }
private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name";
@@ -75,12 +87,12 @@ public class WALPlayer extends Configured implements Tool {
* A mapper that just writes out KeyValues.
* This one can be used together with {@link KeyValueSortReducer}
*/
- static class HLogKeyValueMapper
- extends Mapper {
+ static class WALKeyValueMapper
+ extends Mapper {
private byte[] table;
@Override
- public void map(HLogKey key, WALEdit value,
+ public void map(WALKey key, WALEdit value,
Context context)
throws IOException {
try {
@@ -102,7 +114,7 @@ public class WALPlayer extends Configured implements Tool {
// only a single table is supported when HFiles are generated with HFileOutputFormat
String tables[] = context.getConfiguration().getStrings(TABLES_KEY);
if (tables == null || tables.length != 1) {
- // this can only happen when HLogMapper is used directly by a class other than WALPlayer
+ // this can only happen when WALMapper is used directly by a class other than WALPlayer
throw new IOException("Exactly one table must be specified for bulk HFile case.");
}
table = Bytes.toBytes(tables[0]);
@@ -113,13 +125,13 @@ public class WALPlayer extends Configured implements Tool {
* A mapper that writes out {@link Mutation} to be directly applied to
* a running HBase instance.
*/
- static class HLogMapper
- extends Mapper {
+ static class WALMapper
+ extends Mapper {
private Map tables =
new TreeMap();
@Override
- public void map(HLogKey key, WALEdit value,
+ public void map(WALKey key, WALEdit value,
Context context)
throws IOException {
try {
@@ -132,7 +144,7 @@ public class WALPlayer extends Configured implements Tool {
Delete del = null;
Cell lastCell = null;
for (Cell cell : value.getCells()) {
- // filtering HLog meta entries
+ // filtering WAL meta entries
if (WALEdit.isMetaEditFamily(cell.getFamily())) continue;
// A WALEdit may contain multiple operations (HBASE-3584) and/or
@@ -172,7 +184,7 @@ public class WALPlayer extends Configured implements Tool {
String[] tableMap = context.getConfiguration().getStrings(TABLE_MAP_KEY);
String[] tablesToUse = context.getConfiguration().getStrings(TABLES_KEY);
if (tablesToUse == null || tableMap == null || tablesToUse.length != tableMap.length) {
- // this can only happen when HLogMapper is used directly by a class other than WALPlayer
+ // this can only happen when WALMapper is used directly by a class other than WALPlayer
throw new IOException("No tables or incorrect table mapping specified.");
}
int i = 0;
@@ -192,7 +204,7 @@ public class WALPlayer extends Configured implements Tool {
void setupTime(Configuration conf, String option) throws IOException {
String val = conf.get(option);
- if (val == null) return;
+ if (null == val) return;
long ms;
try {
// first try to parse in user friendly form
@@ -239,7 +251,7 @@ public class WALPlayer extends Configured implements Tool {
Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + inputDir));
job.setJarByClass(WALPlayer.class);
FileInputFormat.setInputPaths(job, inputDir);
- job.setInputFormatClass(HLogInputFormat.class);
+ job.setInputFormatClass(WALInputFormat.class);
job.setMapOutputKeyClass(ImmutableBytesWritable.class);
String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY);
if (hfileOutPath != null) {
@@ -248,7 +260,7 @@ public class WALPlayer extends Configured implements Tool {
throw new IOException("Exactly one table must be specified for the bulk export option");
}
HTable table = new HTable(conf, TableName.valueOf(tables[0]));
- job.setMapperClass(HLogKeyValueMapper.class);
+ job.setMapperClass(WALKeyValueMapper.class);
job.setReducerClass(KeyValueSortReducer.class);
Path outputDir = new Path(hfileOutPath);
FileOutputFormat.setOutputPath(job, outputDir);
@@ -258,7 +270,7 @@ public class WALPlayer extends Configured implements Tool {
com.google.common.base.Preconditions.class);
} else {
// output to live cluster
- job.setMapperClass(HLogMapper.class);
+ job.setMapperClass(WALMapper.class);
job.setOutputFormatClass(MultiTableOutputFormat.class);
TableMapReduceUtil.addDependencyJars(job);
TableMapReduceUtil.initCredentials(job);
@@ -288,8 +300,8 @@ public class WALPlayer extends Configured implements Tool {
System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output");
System.err.println(" (Only one table can be specified, and no mapping is allowed!)");
System.err.println("Other options: (specify time range to WAL edit to consider)");
- System.err.println(" -D" + HLogInputFormat.START_TIME_KEY + "=[date|ms]");
- System.err.println(" -D" + HLogInputFormat.END_TIME_KEY + "=[date|ms]");
+ System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]");
+ System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]");
System.err.println(" -D " + JOB_NAME_CONF_KEY
+ "=jobName - use the specified mapreduce job name for the wal player");
System.err.println("For performance also consider the following options:\n"
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 7c7f0b67799..d23f1397729 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -76,8 +76,7 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.KeyLocker;
@@ -463,17 +462,20 @@ public class AssignmentManager {
}
if (!failover) {
// If we get here, we have a full cluster restart. It is a failover only
- // if there are some HLogs are not split yet. For meta HLogs, they should have
+ // if there are some WALs are not split yet. For meta WALs, they should have
// been split already, if any. We can walk through those queued dead servers,
- // if they don't have any HLogs, this restart should be considered as a clean one
+ // if they don't have any WALs, this restart should be considered as a clean one
Set queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
if (!queuedDeadServers.isEmpty()) {
Configuration conf = server.getConfiguration();
Path rootdir = FSUtils.getRootDir(conf);
FileSystem fs = rootdir.getFileSystem(conf);
for (ServerName serverName: queuedDeadServers) {
- Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
- Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
+ // In the case of a clean exit, the shutdown handler would have presplit any WALs and
+ // removed empty directories.
+ Path logDir = new Path(rootdir,
+ DefaultWALProvider.getWALDirectoryName(serverName.toString()));
+ Path splitDir = logDir.suffix(DefaultWALProvider.SPLITTING_EXT);
if (fs.exists(logDir) || fs.exists(splitDir)) {
LOG.debug("Found queued dead server " + serverName);
failover = true;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
index 115cc3539da..9bf70e92e8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
@@ -51,8 +51,8 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSTableDescriptors;
@@ -94,14 +94,14 @@ public class MasterFileSystem {
final static PathFilter META_FILTER = new PathFilter() {
@Override
public boolean accept(Path p) {
- return HLogUtil.isMetaFile(p);
+ return DefaultWALProvider.isMetaFile(p);
}
};
final static PathFilter NON_META_FILTER = new PathFilter() {
@Override
public boolean accept(Path p) {
- return !HLogUtil.isMetaFile(p);
+ return !DefaultWALProvider.isMetaFile(p);
}
};
@@ -216,7 +216,7 @@ public class MasterFileSystem {
*/
Set getFailedServersFromLogFolders() {
boolean retrySplitting = !conf.getBoolean("hbase.hlog.split.skip.errors",
- HLog.SPLIT_SKIP_ERRORS_DEFAULT);
+ WALSplitter.SPLIT_SKIP_ERRORS_DEFAULT);
Set serverNames = new HashSet();
Path logsDirPath = new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME);
@@ -239,13 +239,13 @@ public class MasterFileSystem {
return serverNames;
}
for (FileStatus status : logFolders) {
- String sn = status.getPath().getName();
- // truncate splitting suffix if present (for ServerName parsing)
- if (sn.endsWith(HLog.SPLITTING_EXT)) {
- sn = sn.substring(0, sn.length() - HLog.SPLITTING_EXT.length());
- }
- ServerName serverName = ServerName.parseServerName(sn);
- if (!onlineServers.contains(serverName)) {
+ final ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(
+ status.getPath());
+ if (null == serverName) {
+ LOG.warn("Log folder " + status.getPath() + " doesn't look like its name includes a " +
+ "region server name; leaving in place. If you see later errors about missing " +
+ "write ahead logs they may be saved in this location.");
+ } else if (!onlineServers.contains(serverName)) {
LOG.info("Log folder " + status.getPath() + " doesn't belong "
+ "to a known region server, splitting");
serverNames.add(serverName);
@@ -283,7 +283,7 @@ public class MasterFileSystem {
}
/**
- * Specialized method to handle the splitting for meta HLog
+ * Specialized method to handle the splitting for meta WAL
* @param serverName
* @throws IOException
*/
@@ -294,7 +294,7 @@ public class MasterFileSystem {
}
/**
- * Specialized method to handle the splitting for meta HLog
+ * Specialized method to handle the splitting for meta WAL
* @param serverNames
* @throws IOException
*/
@@ -302,6 +302,9 @@ public class MasterFileSystem {
splitLog(serverNames, META_FILTER);
}
+ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="UL_UNRELEASED_LOCK", justification=
+ "We only release this lock when we set it. Updates to code that uses it should verify use " +
+ "of the guard boolean.")
private List getLogDirs(final Set serverNames) throws IOException {
List logDirs = new ArrayList();
boolean needReleaseLock = false;
@@ -312,9 +315,10 @@ public class MasterFileSystem {
}
try {
for (ServerName serverName : serverNames) {
- Path logDir = new Path(this.rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
- Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
- // Rename the directory so a rogue RS doesn't create more HLogs
+ Path logDir = new Path(this.rootdir,
+ DefaultWALProvider.getWALDirectoryName(serverName.toString()));
+ Path splitDir = logDir.suffix(DefaultWALProvider.SPLITTING_EXT);
+ // Rename the directory so a rogue RS doesn't create more WALs
if (fs.exists(logDir)) {
if (!this.fs.rename(logDir, splitDir)) {
throw new IOException("Failed fs.rename for log split: " + logDir);
@@ -367,9 +371,10 @@ public class MasterFileSystem {
}
/**
- * This method is the base split method that splits HLog files matching a filter. Callers should
- * pass the appropriate filter for meta and non-meta HLogs.
- * @param serverNames
+ * This method is the base split method that splits WAL files matching a filter. Callers should
+ * pass the appropriate filter for meta and non-meta WALs.
+ * @param serverNames logs belonging to these servers will be split; this will rename the log
+ * directory out from under a soft-failed server
* @param filter
* @throws IOException
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
index 34547ef5858..45dbeb8595b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystem.java
@@ -31,7 +31,7 @@ public class MetricsMasterFileSystem {
/**
* Record a single instance of a split
* @param time time that the split took
- * @param size length of original HLogs that were split
+ * @param size length of original WALs that were split
*/
public synchronized void addSplit(long time, long size) {
source.updateSplitTime(time);
@@ -41,7 +41,7 @@ public class MetricsMasterFileSystem {
/**
* Record a single instance of a split
* @param time time that the split took
- * @param size length of original HLogs that were split
+ * @param size length of original WALs that were split
*/
public synchronized void addMetaWALSplit(long time, long size) {
source.updateMetaWALSplitTime(time);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 95d41ed4332..b96aaee7b61 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -448,7 +448,7 @@ public class RegionStates {
}
/**
- * A dead server's hlogs have been split so that all the regions
+ * A dead server's wals have been split so that all the regions
* used to be open on it can be safely assigned now. Mark them assignable.
*/
public synchronized void logSplit(final ServerName serverName) {
@@ -688,7 +688,7 @@ public class RegionStates {
/**
* Checking if a region was assigned to a server which is not online now.
- * If so, we should hold re-assign this region till SSH has split its hlogs.
+ * If so, we should hold re-assign this region till SSH has split its wals.
* Once logs are split, the last assignment of this region will be reset,
* which means a null last assignment server is ok for re-assigning.
*
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 369362b24af..39d0a0f00ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -591,7 +591,7 @@ public class ServerManager {
this.processDeadServer(serverName, false);
}
- public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitHlog) {
+ public synchronized void processDeadServer(final ServerName serverName, boolean shouldSplitWal) {
// When assignment manager is cleaning up the zookeeper nodes and rebuilding the
// in-memory region states, region servers could be down. Meta table can and
// should be re-assigned, log splitting can be done too. However, it is better to
@@ -601,14 +601,14 @@ public class ServerManager {
// the handler threads and meta table could not be re-assigned in case
// the corresponding server is down. So we queue them up here instead.
if (!services.getAssignmentManager().isFailoverCleanupDone()) {
- requeuedDeadServers.put(serverName, shouldSplitHlog);
+ requeuedDeadServers.put(serverName, shouldSplitWal);
return;
}
this.deadservers.add(serverName);
this.services.getExecutorService().submit(
new ServerShutdownHandler(this.master, this.services, this.deadservers, serverName,
- shouldSplitHlog));
+ shouldSplitWal));
}
/**
@@ -947,7 +947,7 @@ public class ServerManager {
/**
* During startup, if we figure it is not a failover, i.e. there is
- * no more HLog files to split, we won't try to recover these dead servers.
+ * no more WAL files to split, we won't try to recover these dead servers.
* So we just remove them from the queue. Use caution in calling this.
*/
void removeRequeuedDeadServers() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index bf28a44493d..6dd5cf1233f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -59,7 +59,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Pair;
@@ -102,8 +102,7 @@ public class SplitLogManager {
private Server server;
private final Stoppable stopper;
- private FileSystem fs;
- private Configuration conf;
+ private final Configuration conf;
public static final int DEFAULT_UNASSIGNED_TIMEOUT = (3 * 60 * 1000); // 3 min
@@ -161,16 +160,34 @@ public class SplitLogManager {
}
private FileStatus[] getFileList(List logDirs, PathFilter filter) throws IOException {
+ return getFileList(conf, logDirs, filter);
+ }
+
+ /**
+ * Get a list of paths that need to be split given a set of server-specific directories and
+ * optinally a filter.
+ *
+ * See {@link DefaultWALProvider#getServerNameFromWALDirectoryName} for more info on directory
+ * layout.
+ *
+ * Should be package-private, but is needed by
+ * {@link org.apache.hadoop.hbase.wal.WALSplitter#split(Path, Path, Path, FileSystem,
+ * Configuration, WALFactory)} for tests.
+ */
+ @VisibleForTesting
+ public static FileStatus[] getFileList(final Configuration conf, final List logDirs,
+ final PathFilter filter)
+ throws IOException {
List fileStatus = new ArrayList();
- for (Path hLogDir : logDirs) {
- this.fs = hLogDir.getFileSystem(conf);
- if (!fs.exists(hLogDir)) {
- LOG.warn(hLogDir + " doesn't exist. Nothing to do!");
+ for (Path logDir : logDirs) {
+ final FileSystem fs = logDir.getFileSystem(conf);
+ if (!fs.exists(logDir)) {
+ LOG.warn(logDir + " doesn't exist. Nothing to do!");
continue;
}
- FileStatus[] logfiles = FSUtils.listStatus(fs, hLogDir, filter);
+ FileStatus[] logfiles = FSUtils.listStatus(fs, logDir, filter);
if (logfiles == null || logfiles.length == 0) {
- LOG.info(hLogDir + " is empty dir, no logs to split");
+ LOG.info(logDir + " is empty dir, no logs to split");
} else {
Collections.addAll(fileStatus, logfiles);
}
@@ -180,7 +197,7 @@ public class SplitLogManager {
}
/**
- * @param logDir one region sever hlog dir path in .logs
+ * @param logDir one region sever wal dir path in .logs
* @throws IOException if there was an error while splitting any log file
* @return cumulative size of the logfiles split
* @throws IOException
@@ -206,7 +223,7 @@ public class SplitLogManager {
Set serverNames = new HashSet();
for (Path logDir : logDirs) {
try {
- ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(logDir);
+ ServerName serverName = DefaultWALProvider.getServerNameFromWALDirectoryName(logDir);
if (serverName != null) {
serverNames.add(serverName);
}
@@ -273,6 +290,7 @@ public class SplitLogManager {
}
for (Path logDir : logDirs) {
status.setStatus("Cleaning up log directory...");
+ final FileSystem fs = logDir.getFileSystem(conf);
try {
if (fs.exists(logDir) && !fs.delete(logDir, false)) {
LOG.warn("Unable to delete log src dir. Ignoring. " + logDir);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
index 6c8e4288ae6..f68bfa22db3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/LogCleaner.java
@@ -26,10 +26,10 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
/**
- * This Chore, every time it runs, will attempt to delete the HLogs in the old logs folder. The HLog
+ * This Chore, every time it runs, will attempt to delete the WALs in the old logs folder. The WAL
* is only deleted if none of the cleaner delegates says otherwise.
* @see BaseLogCleanerDelegate
*/
@@ -51,6 +51,6 @@ public class LogCleaner extends CleanerChore {
@Override
protected boolean validate(Path file) {
- return HLogUtil.validateHLogFilename(file.getName());
+ return DefaultWALProvider.validateWALFilename(file.getName());
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
index 3a39fb4492c..9d68601fb13 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/TimeToLiveLogCleaner.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
/**
- * Log cleaner that uses the timestamp of the hlog to determine if it should
+ * Log cleaner that uses the timestamp of the wal to determine if it should
* be deleted. By default they are allowed to live for 10 minutes.
*/
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
index 648c835f7b7..73208bc15b1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/MetaServerShutdownHandler.java
@@ -67,7 +67,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
boolean distributedLogReplay =
(this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
try {
- if (this.shouldSplitHlog) {
+ if (this.shouldSplitWal) {
LOG.info("Splitting hbase:meta logs for " + serverName);
if (distributedLogReplay) {
Set regions = new HashSet();
@@ -95,7 +95,7 @@ public class MetaServerShutdownHandler extends ServerShutdownHandler {
}
try {
- if (this.shouldSplitHlog && distributedLogReplay) {
+ if (this.shouldSplitWal && distributedLogReplay) {
if (!am.waitOnRegionToClearRegionsInTransition(HRegionInfo.FIRST_META_REGIONINFO,
regionAssignmentWaitTimeout)) {
// Wait here is to avoid log replay hits current dead server and incur a RPC timeout
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index c44396896c7..5b7b27b3d2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -56,19 +56,19 @@ public class ServerShutdownHandler extends EventHandler {
protected final ServerName serverName;
protected final MasterServices services;
protected final DeadServer deadServers;
- protected final boolean shouldSplitHlog; // whether to split HLog or not
+ protected final boolean shouldSplitWal; // whether to split WAL or not
protected final int regionAssignmentWaitTimeout;
public ServerShutdownHandler(final Server server, final MasterServices services,
final DeadServer deadServers, final ServerName serverName,
- final boolean shouldSplitHlog) {
+ final boolean shouldSplitWal) {
this(server, services, deadServers, serverName, EventType.M_SERVER_SHUTDOWN,
- shouldSplitHlog);
+ shouldSplitWal);
}
ServerShutdownHandler(final Server server, final MasterServices services,
final DeadServer deadServers, final ServerName serverName, EventType type,
- final boolean shouldSplitHlog) {
+ final boolean shouldSplitWal) {
super(server, type);
this.serverName = serverName;
this.server = server;
@@ -77,7 +77,7 @@ public class ServerShutdownHandler extends EventHandler {
if (!this.deadServers.isDeadServer(this.serverName)) {
LOG.warn(this.serverName + " is NOT in deadservers; it should be!");
}
- this.shouldSplitHlog = shouldSplitHlog;
+ this.shouldSplitWal = shouldSplitWal;
this.regionAssignmentWaitTimeout = server.getConfiguration().getInt(
HConstants.LOG_REPLAY_WAIT_REGION_TIMEOUT, 15000);
}
@@ -133,7 +133,7 @@ public class ServerShutdownHandler extends EventHandler {
AssignmentManager am = services.getAssignmentManager();
ServerManager serverManager = services.getServerManager();
if (isCarryingMeta() /* hbase:meta */ || !am.isFailoverCleanupDone()) {
- serverManager.processDeadServer(serverName, this.shouldSplitHlog);
+ serverManager.processDeadServer(serverName, this.shouldSplitWal);
return;
}
@@ -180,7 +180,7 @@ public class ServerShutdownHandler extends EventHandler {
(this.services.getMasterFileSystem().getLogRecoveryMode() == RecoveryMode.LOG_REPLAY);
try {
- if (this.shouldSplitHlog) {
+ if (this.shouldSplitWal) {
if (distributedLogReplay) {
LOG.info("Mark regions in recovery for crashed server " + serverName +
" before assignment; regions=" + hris);
@@ -275,13 +275,13 @@ public class ServerShutdownHandler extends EventHandler {
throw (InterruptedIOException)new InterruptedIOException().initCause(ie);
} catch (IOException ioe) {
LOG.info("Caught " + ioe + " during region assignment, will retry");
- // Only do HLog splitting if shouldSplitHlog and in DLR mode
+ // Only do wal splitting if shouldSplitWal and in DLR mode
serverManager.processDeadServer(serverName,
- this.shouldSplitHlog && distributedLogReplay);
+ this.shouldSplitWal && distributedLogReplay);
return;
}
- if (this.shouldSplitHlog && distributedLogReplay) {
+ if (this.shouldSplitWal && distributedLogReplay) {
// wait for region assignment completes
for (HRegionInfo hri : toAssignRegions) {
try {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
index d5e174d88df..a927db314cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotLogCleaner.java
@@ -46,11 +46,11 @@ public class SnapshotLogCleaner extends BaseLogCleanerDelegate {
* Conf key for the frequency to attempt to refresh the cache of hfiles currently used in
* snapshots (ms)
*/
- static final String HLOG_CACHE_REFRESH_PERIOD_CONF_KEY =
+ static final String WAL_CACHE_REFRESH_PERIOD_CONF_KEY =
"hbase.master.hlogcleaner.plugins.snapshot.period";
/** Refresh cache, by default, every 5 minutes */
- private static final long DEFAULT_HLOG_CACHE_REFRESH_PERIOD = 300000;
+ private static final long DEFAULT_WAL_CACHE_REFRESH_PERIOD = 300000;
private SnapshotFileCache cache;
@@ -77,14 +77,14 @@ public class SnapshotLogCleaner extends BaseLogCleanerDelegate {
super.setConf(conf);
try {
long cacheRefreshPeriod = conf.getLong(
- HLOG_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_HLOG_CACHE_REFRESH_PERIOD);
+ WAL_CACHE_REFRESH_PERIOD_CONF_KEY, DEFAULT_WAL_CACHE_REFRESH_PERIOD);
final FileSystem fs = FSUtils.getCurrentFileSystem(conf);
Path rootDir = FSUtils.getRootDir(conf);
cache = new SnapshotFileCache(fs, rootDir, cacheRefreshPeriod, cacheRefreshPeriod,
"snapshot-log-cleaner-cache-refresher", new SnapshotFileCache.SnapshotFileInspector() {
public Collection filesUnderSnapshot(final Path snapshotDir)
throws IOException {
- return SnapshotReferenceUtil.getHLogNames(fs, snapshotDir);
+ return SnapshotReferenceUtil.getWALNames(fs, snapshotDir);
}
});
} catch (IOException e) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
index 0310733fbc0..19bfa8cc6c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java
@@ -50,9 +50,8 @@ import org.apache.hadoop.hbase.client.TableState;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.security.access.AccessControlLists;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.util.Bytes;
@@ -382,13 +381,11 @@ public class NamespaceUpgrade implements Tool {
ServerName fakeServer = ServerName.valueOf("nsupgrade", 96, 123);
- String metaLogName = HLogUtil.getHLogDirectoryName(fakeServer.toString());
- HLog metaHLog = HLogFactory.createMetaHLog(fs, rootDir,
- metaLogName, conf, null,
- fakeServer.toString());
+ final WALFactory walFactory = new WALFactory(conf, null, fakeServer.toString());
+ WAL metawal = walFactory.getMetaWAL(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes());
FSTableDescriptors fst = new FSTableDescriptors(conf);
HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO,
- fst.get(TableName.META_TABLE_NAME), metaHLog, conf);
+ fst.get(TableName.META_TABLE_NAME), metawal, conf);
HRegion region = null;
try {
for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) {
@@ -405,7 +402,7 @@ public class NamespaceUpgrade implements Tool {
new HRegion(
HRegionFileSystem.openRegionFromFileSystem(conf, fs, oldTablePath,
oldRegionInfo, false),
- metaHLog,
+ metawal,
conf,
oldDesc,
null);
@@ -442,7 +439,7 @@ public class NamespaceUpgrade implements Tool {
meta.flushcache();
meta.waitForFlushesAndCompactions();
meta.close();
- metaHLog.closeAndDelete();
+ metawal.close();
if(region != null) {
region.close();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
index 2e5fc418bc7..d6a120b1040 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Pair;
@@ -50,14 +50,14 @@ import com.google.protobuf.ServiceException;
@InterfaceAudience.Private
public class ReplicationProtbufUtil {
/**
- * A helper to replicate a list of HLog entries using admin protocol.
+ * A helper to replicate a list of WAL entries using admin protocol.
*
* @param admin
* @param entries
* @throws java.io.IOException
*/
public static void replicateWALEntry(final AdminService.BlockingInterface admin,
- final HLog.Entry[] entries) throws IOException {
+ final Entry[] entries) throws IOException {
Pair p =
buildReplicateWALEntryRequest(entries, null);
PayloadCarryingRpcController controller = new PayloadCarryingRpcController(p.getSecond());
@@ -69,27 +69,27 @@ public class ReplicationProtbufUtil {
}
/**
- * Create a new ReplicateWALEntryRequest from a list of HLog entries
+ * Create a new ReplicateWALEntryRequest from a list of WAL entries
*
- * @param entries the HLog entries to be replicated
+ * @param entries the WAL entries to be replicated
* @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values
* found.
*/
public static Pair
- buildReplicateWALEntryRequest(final HLog.Entry[] entries) {
+ buildReplicateWALEntryRequest(final Entry[] entries) {
return buildReplicateWALEntryRequest(entries, null);
}
/**
- * Create a new ReplicateWALEntryRequest from a list of HLog entries
+ * Create a new ReplicateWALEntryRequest from a list of WAL entries
*
- * @param entries the HLog entries to be replicated
+ * @param entries the WAL entries to be replicated
* @param encodedRegionName alternative region name to use if not null
* @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values
* found.
*/
public static Pair
- buildReplicateWALEntryRequest(final HLog.Entry[] entries, byte[] encodedRegionName) {
+ buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName) {
// Accumulate all the Cells seen in here.
List> allCells = new ArrayList>(entries.length);
int size = 0;
@@ -98,11 +98,11 @@ public class ReplicationProtbufUtil {
AdminProtos.ReplicateWALEntryRequest.Builder builder =
AdminProtos.ReplicateWALEntryRequest.newBuilder();
HBaseProtos.UUID.Builder uuidBuilder = HBaseProtos.UUID.newBuilder();
- for (HLog.Entry entry: entries) {
+ for (Entry entry: entries) {
entryBuilder.clear();
- // TODO: this duplicates a lot in HLogKey#getBuilder
+ // TODO: this duplicates a lot in WALKey#getBuilder
WALProtos.WALKey.Builder keyBuilder = entryBuilder.getKeyBuilder();
- HLogKey key = entry.getKey();
+ WALKey key = entry.getKey();
keyBuilder.setEncodedRegionName(
ByteStringer.wrap(encodedRegionName == null
? key.getEncodedRegionName()
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 7952a87fec7..464ad7eaea2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -44,6 +44,7 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.net.InetAddress;
@@ -124,9 +125,10 @@ import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
+import org.apache.hadoop.hbase.regionserver.wal.MetricsWAL;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
@@ -330,15 +332,13 @@ public class HRegionServer extends HasThread implements
*/
Chore periodicFlusher;
- // HLog and HLog roller. log is protected rather than private to avoid
- // eclipse warning when accessed by inner classes
- protected volatile HLog hlog;
- // The meta updates are written to a different hlog. If this
- // regionserver holds meta regions, then this field will be non-null.
- protected volatile HLog hlogForMeta;
+ protected volatile WALFactory walFactory;
- LogRoller hlogRoller;
- LogRoller metaHLogRoller;
+ // WAL roller. log is protected rather than private to avoid
+ // eclipse warning when accessed by inner classes
+ final LogRoller walRoller;
+ // Lazily initialized if this RegionServer hosts a meta table.
+ final AtomicReference metawalRoller = new AtomicReference();
// flag set after we're done setting up server threads
final AtomicBoolean online = new AtomicBoolean(false);
@@ -546,6 +546,7 @@ public class HRegionServer extends HasThread implements
rpcServices.start();
putUpWebUI();
+ this.walRoller = new LogRoller(this, this);
}
protected void login(UserProvider user, String host) throws IOException {
@@ -974,7 +975,7 @@ public class HRegionServer extends HasThread implements
//fsOk flag may be changed when closing regions throws exception.
if (this.fsOk) {
- closeWAL(!abortRequested);
+ shutdownWAL(!abortRequested);
}
// Make sure the proxy is down.
@@ -1076,7 +1077,8 @@ public class HRegionServer extends HasThread implements
}
}
- ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
+ ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime)
+ throws IOException {
// We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
// per second, and other metrics As long as metrics are part of ServerLoad it's best to use
// the wrapper to compute those numbers in one place.
@@ -1095,7 +1097,7 @@ public class HRegionServer extends HasThread implements
serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount());
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));
serverLoad.setMaxHeapMB((int) (memory.getMax() / 1024 / 1024));
- Set coprocessors = this.hlog.getCoprocessorHost().getCoprocessors();
+ Set coprocessors = getWAL(null).getCoprocessorHost().getCoprocessors();
for (String coprocessor : coprocessors) {
serverLoad.addCoprocessors(
Coprocessor.newBuilder().setName(coprocessor).build());
@@ -1104,6 +1106,10 @@ public class HRegionServer extends HasThread implements
RegionSpecifier.Builder regionSpecifier = RegionSpecifier.newBuilder();
for (HRegion region : regions) {
serverLoad.addRegionLoads(createRegionLoad(region, regionLoadBldr, regionSpecifier));
+ for (String coprocessor :
+ getWAL(region.getRegionInfo()).getCoprocessorHost().getCoprocessors()) {
+ serverLoad.addCoprocessors(Coprocessor.newBuilder().setName(coprocessor).build());
+ }
}
serverLoad.setReportStartTime(reportStartTime);
serverLoad.setReportEndTime(reportEndTime);
@@ -1192,35 +1198,24 @@ public class HRegionServer extends HasThread implements
return interrupted;
}
- private void closeWAL(final boolean delete) {
- if (this.hlogForMeta != null) {
- // All hlogs (meta and non-meta) are in the same directory. Don't call
- // closeAndDelete here since that would delete all hlogs not just the
- // meta ones. We will just 'close' the hlog for meta here, and leave
- // the directory cleanup to the follow-on closeAndDelete call.
+ private void shutdownWAL(final boolean close) {
+ if (this.walFactory != null) {
try {
- this.hlogForMeta.close();
- } catch (Throwable e) {
- e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
- LOG.error("Metalog close and delete failed", e);
- }
- }
- if (this.hlog != null) {
- try {
- if (delete) {
- hlog.closeAndDelete();
+ if (close) {
+ walFactory.close();
} else {
- hlog.close();
+ walFactory.shutdown();
}
} catch (Throwable e) {
e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
- LOG.error("Close and delete failed", e);
+ LOG.error("Shutdown / close of WAL failed: " + e);
+ LOG.debug("Shutdown / close exception details:", e);
}
}
}
/*
- * Run init. Sets up hlog and starts up all server threads.
+ * Run init. Sets up wal and starts up all server threads.
*
* @param c Extra configuration.
*/
@@ -1258,7 +1253,7 @@ public class HRegionServer extends HasThread implements
ZNodeClearer.writeMyEphemeralNodeOnDisk(getMyEphemeralNodePath());
this.cacheConfig = new CacheConfig(conf);
- this.hlog = setupWALAndReplication();
+ this.walFactory = setupWALAndReplication();
// Init in here rather than in constructor after thread name has been set
this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this));
@@ -1502,10 +1497,10 @@ public class HRegionServer extends HasThread implements
* @return A WAL instance.
* @throws IOException
*/
- private HLog setupWALAndReplication() throws IOException {
+ private WALFactory setupWALAndReplication() throws IOException {
+ // TODO Replication make assumptions here based on the default filesystem impl
final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
- final String logName
- = HLogUtil.getHLogDirectoryName(this.serverName.toString());
+ final String logName = DefaultWALProvider.getWALDirectoryName(this.serverName.toString());
Path logdir = new Path(rootDir, logName);
if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir);
@@ -1518,66 +1513,44 @@ public class HRegionServer extends HasThread implements
// log directories.
createNewReplicationInstance(conf, this, this.fs, logdir, oldLogDir);
- return instantiateHLog(rootDir, logName);
- }
-
- private HLog getMetaWAL() throws IOException {
- if (this.hlogForMeta != null) return this.hlogForMeta;
- final String logName = HLogUtil.getHLogDirectoryName(this.serverName.toString());
- Path logdir = new Path(rootDir, logName);
- if (LOG.isDebugEnabled()) LOG.debug("logdir=" + logdir);
- this.hlogForMeta = HLogFactory.createMetaHLog(this.fs.getBackingFs(), rootDir, logName,
- this.conf, getMetaWALActionListeners(), this.serverName.toString());
- return this.hlogForMeta;
- }
-
- /**
- * Called by {@link #setupWALAndReplication()} creating WAL instance.
- * @param rootdir
- * @param logName
- * @return WAL instance.
- * @throws IOException
- */
- protected HLog instantiateHLog(Path rootdir, String logName) throws IOException {
- return HLogFactory.createHLog(this.fs.getBackingFs(), rootdir, logName, this.conf,
- getWALActionListeners(), this.serverName.toString());
- }
-
- /**
- * Called by {@link #instantiateHLog(Path, String)} setting up WAL instance.
- * Add any {@link WALActionsListener}s you want inserted before WAL startup.
- * @return List of WALActionsListener that will be passed in to
- * {@link org.apache.hadoop.hbase.regionserver.wal.FSHLog} on construction.
- */
- protected List getWALActionListeners() {
- List listeners = new ArrayList();
- // Log roller.
- this.hlogRoller = new LogRoller(this, this);
- listeners.add(this.hlogRoller);
+ // listeners the wal factory will add to wals it creates.
+ final List listeners = new ArrayList();
+ listeners.add(new MetricsWAL());
if (this.replicationSourceHandler != null &&
this.replicationSourceHandler.getWALActionsListener() != null) {
// Replication handler is an implementation of WALActionsListener.
listeners.add(this.replicationSourceHandler.getWALActionsListener());
}
- return listeners;
+
+ return new WALFactory(conf, listeners, serverName.toString());
}
- protected List getMetaWALActionListeners() {
- List listeners = new ArrayList();
+ /**
+ * We initialize the roller for the wal that handles meta lazily
+ * since we don't know if this regionserver will handle it. All calls to
+ * this method return a reference to the that same roller. As newly referenced
+ * meta regions are brought online, they will be offered to the roller for maintenance.
+ * As a part of that registration process, the roller will add itself as a
+ * listener on the wal.
+ */
+ protected LogRoller ensureMetaWALRoller() {
// Using a tmp log roller to ensure metaLogRoller is alive once it is not
// null
- MetaLogRoller tmpLogRoller = new MetaLogRoller(this, this);
- String n = Thread.currentThread().getName();
- Threads.setDaemonThreadRunning(tmpLogRoller.getThread(),
- n + "-MetaLogRoller", uncaughtExceptionHandler);
- this.metaHLogRoller = tmpLogRoller;
- tmpLogRoller = null;
- listeners.add(this.metaHLogRoller);
- return listeners;
- }
-
- protected LogRoller getLogRoller() {
- return hlogRoller;
+ LogRoller roller = metawalRoller.get();
+ if (null == roller) {
+ LogRoller tmpLogRoller = new LogRoller(this, this);
+ String n = Thread.currentThread().getName();
+ Threads.setDaemonThreadRunning(tmpLogRoller.getThread(),
+ n + "-MetaLogRoller", uncaughtExceptionHandler);
+ if (metawalRoller.compareAndSet(null, tmpLogRoller)) {
+ roller = tmpLogRoller;
+ } else {
+ // Another thread won starting the roller
+ Threads.shutdown(tmpLogRoller.getThread());
+ roller = metawalRoller.get();
+ }
+ }
+ return roller;
}
public MetricsRegionServer getRegionServerMetrics() {
@@ -1620,7 +1593,7 @@ public class HRegionServer extends HasThread implements
this.service.startExecutorService(ExecutorType.RS_LOG_REPLAY_OPS, conf.getInt(
"hbase.regionserver.wal.max.splitters", SplitLogWorkerCoordination.DEFAULT_MAX_SPLITTERS));
- Threads.setDaemonThreadRunning(this.hlogRoller.getThread(), getName() + ".logRoller",
+ Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + ".logRoller",
uncaughtExceptionHandler);
this.cacheFlusher.start(uncaughtExceptionHandler);
Threads.setDaemonThreadRunning(this.compactionChecker.getThread(), getName() +
@@ -1667,7 +1640,7 @@ public class HRegionServer extends HasThread implements
sinkConf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
conf.getInt("hbase.log.replay.rpc.timeout", 30000)); // default 30 seconds
sinkConf.setInt("hbase.client.serverside.retries.multiplier", 1);
- this.splitLogWorker = new SplitLogWorker(this, sinkConf, this, this);
+ this.splitLogWorker = new SplitLogWorker(this, sinkConf, this, this, walFactory);
splitLogWorker.start();
}
@@ -1730,38 +1703,37 @@ public class HRegionServer extends HasThread implements
}
// Verify that all threads are alive
if (!(leases.isAlive()
- && cacheFlusher.isAlive() && hlogRoller.isAlive()
+ && cacheFlusher.isAlive() && walRoller.isAlive()
&& this.compactionChecker.isAlive()
&& this.periodicFlusher.isAlive())) {
stop("One or more threads are no longer alive -- stop");
return false;
}
- if (metaHLogRoller != null && !metaHLogRoller.isAlive()) {
- stop("Meta HLog roller thread is no longer alive -- stop");
+ final LogRoller metawalRoller = this.metawalRoller.get();
+ if (metawalRoller != null && !metawalRoller.isAlive()) {
+ stop("Meta WAL roller thread is no longer alive -- stop");
return false;
}
return true;
}
- public HLog getWAL() {
- try {
- return getWAL(null);
- } catch (IOException e) {
- LOG.warn("getWAL threw exception " + e);
- return null;
- }
- }
+ private static final byte[] UNSPECIFIED_REGION = new byte[]{};
@Override
- public HLog getWAL(HRegionInfo regionInfo) throws IOException {
- //TODO: at some point this should delegate to the HLogFactory
- //currently, we don't care about the region as much as we care about the
- //table.. (hence checking the tablename below)
+ public WAL getWAL(HRegionInfo regionInfo) throws IOException {
+ WAL wal;
+ LogRoller roller = walRoller;
//_ROOT_ and hbase:meta regions have separate WAL.
if (regionInfo != null && regionInfo.isMetaTable()) {
- return getMetaWAL();
+ roller = ensureMetaWALRoller();
+ wal = walFactory.getMetaWAL(regionInfo.getEncodedNameAsBytes());
+ } else if (regionInfo == null) {
+ wal = walFactory.getWAL(UNSPECIFIED_REGION);
+ } else {
+ wal = walFactory.getWAL(regionInfo.getEncodedNameAsBytes());
}
- return this.hlog;
+ roller.addWAL(wal);
+ return wal;
}
@Override
@@ -2006,11 +1978,12 @@ public class HRegionServer extends HasThread implements
if (this.spanReceiverHost != null) {
this.spanReceiverHost.closeReceivers();
}
- if (this.hlogRoller != null) {
- Threads.shutdown(this.hlogRoller.getThread());
+ if (this.walRoller != null) {
+ Threads.shutdown(this.walRoller.getThread());
}
- if (this.metaHLogRoller != null) {
- Threads.shutdown(this.metaHLogRoller.getThread());
+ final LogRoller metawalRoller = this.metawalRoller.get();
+ if (metawalRoller != null) {
+ Threads.shutdown(metawalRoller.getThread());
}
if (this.compactSplitThread != null) {
this.compactSplitThread.join();
@@ -2518,7 +2491,7 @@ public class HRegionServer extends HasThread implements
* @see org.apache.hadoop.hbase.regionserver.HRegionServerCommandLine
*/
public static void main(String[] args) throws Exception {
- VersionInfo.logVersion();
+ VersionInfo.logVersion();
Configuration conf = HBaseConfiguration.create();
@SuppressWarnings("unchecked")
Class extends HRegionServer> regionServerClass = (Class extends HRegionServer>) conf
@@ -2569,11 +2542,24 @@ public class HRegionServer extends HasThread implements
// used by org/apache/hbase/tmpl/regionserver/RSStatusTmpl.jamon (HBASE-4070).
public String[] getRegionServerCoprocessors() {
- TreeSet coprocessors = new TreeSet(
- this.hlog.getCoprocessorHost().getCoprocessors());
+ TreeSet coprocessors = new TreeSet();
+ try {
+ coprocessors.addAll(getWAL(null).getCoprocessorHost().getCoprocessors());
+ } catch (IOException exception) {
+ LOG.warn("Exception attempting to fetch wal coprocessor information for the common wal; " +
+ "skipping.");
+ LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception);
+ }
Collection regions = getOnlineRegionsLocalContext();
for (HRegion region: regions) {
coprocessors.addAll(region.getCoprocessorHost().getCoprocessors());
+ try {
+ coprocessors.addAll(getWAL(region.getRegionInfo()).getCoprocessorHost().getCoprocessors());
+ } catch (IOException exception) {
+ LOG.warn("Exception attempting to fetch wal coprocessor information for region " + region +
+ "; skipping.");
+ LOG.debug("Exception details for failure to fetch wal coprocessor information.", exception);
+ }
}
return coprocessors.toArray(new String[coprocessors.size()]);
}
@@ -2696,16 +2682,22 @@ public class HRegionServer extends HasThread implements
HRegion toReturn = this.onlineRegions.remove(r.getRegionInfo().getEncodedName());
if (destination != null) {
- HLog wal = getWAL();
- long closeSeqNum = wal.getEarliestMemstoreSeqNum(r.getRegionInfo().getEncodedNameAsBytes());
- if (closeSeqNum == HConstants.NO_SEQNUM) {
- // No edits in WAL for this region; get the sequence number when the region was opened.
- closeSeqNum = r.getOpenSeqNum();
+ try {
+ WAL wal = getWAL(r.getRegionInfo());
+ long closeSeqNum = wal.getEarliestMemstoreSeqNum(r.getRegionInfo().getEncodedNameAsBytes());
if (closeSeqNum == HConstants.NO_SEQNUM) {
- closeSeqNum = 0;
+ // No edits in WAL for this region; get the sequence number when the region was opened.
+ closeSeqNum = r.getOpenSeqNum();
+ if (closeSeqNum == HConstants.NO_SEQNUM) {
+ closeSeqNum = 0;
+ }
}
+ addToMovedRegions(r.getRegionInfo().getEncodedName(), destination, closeSeqNum);
+ } catch (IOException exception) {
+ LOG.error("Could not retrieve WAL information for region " + r.getRegionInfo() +
+ "; not adding to moved regions.");
+ LOG.debug("Exception details for failure to get wal", exception);
}
- addToMovedRegions(r.getRegionInfo().getEncodedName(), destination, closeSeqNum);
}
this.regionFavoredNodesMap.remove(r.getRegionInfo().getEncodedName());
return toReturn != null;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 85b7676079f..381114220a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -78,7 +78,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor;
import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours;
-import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
+import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
import org.apache.hadoop.hbase.security.EncryptionUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.util.Bytes;
@@ -1216,7 +1216,7 @@ public class HStore implements Store {
*/
private void writeCompactionWalRecord(Collection filesCompacted,
Collection newFiles) throws IOException {
- if (region.getLog() == null) return;
+ if (region.getWAL() == null) return;
List inputPaths = new ArrayList(filesCompacted.size());
for (StoreFile f : filesCompacted) {
inputPaths.add(f.getPath());
@@ -1228,7 +1228,7 @@ public class HStore implements Store {
HRegionInfo info = this.region.getRegionInfo();
CompactionDescriptor compactionDescriptor = ProtobufUtil.toCompactionDescriptor(info,
family.getName(), inputPaths, outputPaths, fs.getStoreDir(getFamily().getNameAsString()));
- HLogUtil.writeCompactionMarker(region.getLog(), this.region.getTableDesc(),
+ WALUtil.writeCompactionMarker(region.getWAL(), this.region.getTableDesc(),
this.region.getRegionInfo(), compactionDescriptor, this.region.getSequenceId());
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
index e8873ff6276..12c7c562dd8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
@@ -19,38 +19,39 @@
package org.apache.hadoop.hbase.regionserver;
import java.io.IOException;
+import java.util.Map.Entry;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.ipc.RemoteException;
/**
- * Runs periodically to determine if the HLog should be rolled.
+ * Runs periodically to determine if the WAL should be rolled.
*
* NOTE: This class extends Thread rather than Chore because the sleep time
* can be interrupted when there is something to do, rather than the Chore
* sleep time which is invariant.
+ *
+ * TODO: change to a pool of threads
*/
@InterfaceAudience.Private
-class LogRoller extends HasThread implements WALActionsListener {
+class LogRoller extends HasThread {
static final Log LOG = LogFactory.getLog(LogRoller.class);
private final ReentrantLock rollLock = new ReentrantLock();
private final AtomicBoolean rollLog = new AtomicBoolean(false);
+ private final ConcurrentHashMap walNeedsRoll =
+ new ConcurrentHashMap();
private final Server server;
protected final RegionServerServices services;
private volatile long lastrolltime = System.currentTimeMillis();
@@ -58,6 +59,32 @@ class LogRoller extends HasThread implements WALActionsListener {
private final long rollperiod;
private final int threadWakeFrequency;
+ public void addWAL(final WAL wal) {
+ if (null == walNeedsRoll.putIfAbsent(wal, Boolean.FALSE)) {
+ wal.registerWALActionsListener(new WALActionsListener.Base() {
+ @Override
+ public void logRollRequested() {
+ walNeedsRoll.put(wal, Boolean.TRUE);
+ // TODO logs will contend with each other here, replace with e.g. DelayedQueue
+ synchronized(rollLog) {
+ rollLog.set(true);
+ rollLog.notifyAll();
+ }
+ }
+ });
+ }
+ }
+
+ public void requestRollAll() {
+ for (WAL wal : walNeedsRoll.keySet()) {
+ walNeedsRoll.put(wal, Boolean.TRUE);
+ }
+ synchronized(rollLog) {
+ rollLog.set(true);
+ rollLog.notifyAll();
+ }
+ }
+
/** @param server */
public LogRoller(final Server server, final RegionServerServices services) {
super();
@@ -88,19 +115,24 @@ class LogRoller extends HasThread implements WALActionsListener {
}
// Time for periodic roll
if (LOG.isDebugEnabled()) {
- LOG.debug("Hlog roll period " + this.rollperiod + "ms elapsed");
+ LOG.debug("Wal roll period " + this.rollperiod + "ms elapsed");
}
} else if (LOG.isDebugEnabled()) {
- LOG.debug("HLog roll requested");
+ LOG.debug("WAL roll requested");
}
rollLock.lock(); // FindBugs UL_UNRELEASED_LOCK_EXCEPTION_PATH
try {
this.lastrolltime = now;
- // Force the roll if the logroll.period is elapsed or if a roll was requested.
- // The returned value is an array of actual region names.
- byte [][] regionsToFlush = getWAL().rollWriter(periodic || rollLog.get());
- if (regionsToFlush != null) {
- for (byte [] r: regionsToFlush) scheduleFlush(r);
+ for (Entry entry : walNeedsRoll.entrySet()) {
+ final WAL wal = entry.getKey();
+ // Force the roll if the logroll.period is elapsed or if a roll was requested.
+ // The returned value is an array of actual region names.
+ final byte [][] regionsToFlush = wal.rollWriter(periodic ||
+ entry.getValue().booleanValue());
+ walNeedsRoll.put(wal, Boolean.FALSE);
+ if (regionsToFlush != null) {
+ for (byte [] r: regionsToFlush) scheduleFlush(r);
+ }
}
} catch (FailedLogCloseException e) {
server.abort("Failed log close in log roller", e);
@@ -145,51 +177,4 @@ class LogRoller extends HasThread implements WALActionsListener {
}
}
- public void logRollRequested() {
- synchronized (rollLog) {
- rollLog.set(true);
- rollLog.notifyAll();
- }
- }
-
- protected HLog getWAL() throws IOException {
- return this.services.getWAL(null);
- }
-
- @Override
- public void preLogRoll(Path oldPath, Path newPath) throws IOException {
- // Not interested
- }
-
- @Override
- public void postLogRoll(Path oldPath, Path newPath) throws IOException {
- // Not interested
- }
-
- @Override
- public void preLogArchive(Path oldPath, Path newPath) throws IOException {
- // Not interested
- }
-
- @Override
- public void postLogArchive(Path oldPath, Path newPath) throws IOException {
- // Not interested
- }
-
- @Override
- public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
- WALEdit logEdit) {
- // Not interested.
- }
-
- @Override
- public void visitLogEntryBeforeWrite(HTableDescriptor htd, HLogKey logKey,
- WALEdit logEdit) {
- //Not interested
- }
-
- @Override
- public void logCloseRequested() {
- // not interested
- }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index 34576f70484..4d4f1467086 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -460,11 +460,11 @@ class MemStoreFlusher implements FlushRequester {
}
} catch (DroppedSnapshotException ex) {
// Cache flush can fail in a few places. If it fails in a critical
- // section, we get a DroppedSnapshotException and a replay of hlog
+ // section, we get a DroppedSnapshotException and a replay of wal
// is required. Currently the only way to do this is a restart of
// the server. Abort because hdfs is probably bad (HBASE-644 is a case
// where hdfs was bad but passed the hdfs check).
- server.abort("Replay of HLog required. Forcing server shutdown", ex);
+ server.abort("Replay of WAL required. Forcing server shutdown", ex);
return false;
} catch (IOException ex) {
ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetaLogRoller.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetaLogRoller.java
deleted file mode 100644
index 467cfdf8ace..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetaLogRoller.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-
-@InterfaceAudience.Private
-class MetaLogRoller extends LogRoller {
- public MetaLogRoller(Server server, RegionServerServices services) {
- super(server, services);
- }
- @Override
- protected HLog getWAL() throws IOException {
- //The argument to getWAL below could either be HRegionInfo.FIRST_META_REGIONINFO or
- //HRegionInfo.ROOT_REGIONINFO. Both these share the same WAL.
- return services.getWAL(HRegionInfo.FIRST_META_REGIONINFO);
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 327f55caf1a..52eafb94fd3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
+import org.apache.hadoop.hbase.wal.DefaultWALProvider;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -53,8 +54,8 @@ class MetricsRegionServerWrapperImpl
private BlockCache blockCache;
private volatile long numStores = 0;
- private volatile long numHLogFiles = 0;
- private volatile long hlogFileSize = 0;
+ private volatile long numWALFiles = 0;
+ private volatile long walFileSize = 0;
private volatile long numStoreFiles = 0;
private volatile long memstoreSize = 0;
private volatile long storeFileSize = 0;
@@ -286,13 +287,13 @@ class MetricsRegionServerWrapperImpl
}
@Override
- public long getNumHLogFiles() {
- return numHLogFiles;
+ public long getNumWALFiles() {
+ return numWALFiles;
}
@Override
- public long getHLogFileSize() {
- return hlogFileSize;
+ public long getWALFileSize() {
+ return walFileSize;
}
@Override
@@ -490,21 +491,11 @@ class MetricsRegionServerWrapperImpl
}
lastRan = currentTime;
+ numWALFiles = DefaultWALProvider.getNumLogFiles(regionServer.walFactory);
+ walFileSize = DefaultWALProvider.getLogFileSize(regionServer.walFactory);
+
//Copy over computed values so that no thread sees half computed values.
numStores = tempNumStores;
- long tempNumHLogFiles = regionServer.hlog.getNumLogFiles();
- // meta logs
- if (regionServer.hlogForMeta != null) {
- tempNumHLogFiles += regionServer.hlogForMeta.getNumLogFiles();
- }
- numHLogFiles = tempNumHLogFiles;
-
- long tempHlogFileSize = regionServer.hlog.getLogFileSize();
- if (regionServer.hlogForMeta != null) {
- tempHlogFileSize += regionServer.hlogForMeta.getLogFileSize();
- }
- hlogFileSize = tempHlogFileSize;
-
numStoreFiles = tempNumStoreFiles;
memstoreSize = tempMemstoreSize;
storeFileSize = tempStoreFileSize;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 58f1a40aae0..41f1a99b0ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -149,9 +149,9 @@ import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
-import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
-import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.ByteStringer;
import org.apache.hadoop.hbase.util.Bytes;
@@ -693,13 +693,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
* @throws IOException
*/
private OperationStatus [] doReplayBatchOp(final HRegion region,
- final List mutations, long replaySeqId) throws IOException {
+ final List mutations, long replaySeqId) throws IOException {
long before = EnvironmentEdgeManager.currentTime();
boolean batchContainsPuts = false, batchContainsDelete = false;
try {
- for (Iterator it = mutations.iterator(); it.hasNext();) {
- HLogSplitter.MutationReplay m = it.next();
+ for (Iterator it = mutations.iterator(); it.hasNext();) {
+ WALSplitter.MutationReplay m = it.next();
if (m.type == MutationType.PUT) {
batchContainsPuts = true;
@@ -724,7 +724,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
regionServer.cacheFlusher.reclaimMemStoreMemory();
}
return region.batchReplay(mutations.toArray(
- new HLogSplitter.MutationReplay[mutations.size()]), replaySeqId);
+ new WALSplitter.MutationReplay[mutations.size()]), replaySeqId);
} finally {
if (regionServer.metricsRegionServer != null) {
long after = EnvironmentEdgeManager.currentTime();
@@ -1097,10 +1097,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
return builder.build();
} catch (DroppedSnapshotException ex) {
// Cache flush can fail in a few places. If it fails in a critical
- // section, we get a DroppedSnapshotException and a replay of hlog
+ // section, we get a DroppedSnapshotException and a replay of wal
// is required. Currently the only way to do this is a restart of
// the server.
- regionServer.abort("Replay of HLog required. Forcing server shutdown", ex);
+ regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
throw new ServiceException(ex);
} catch (IOException ie) {
throw new ServiceException(ie);
@@ -1431,7 +1431,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo())
? region.getCoprocessorHost()
: null; // do not invoke coprocessors if this is a secondary region replica
- List> walEntries = new ArrayList>();
+ List> walEntries = new ArrayList>();
// when tag is enabled, we need tag replay edits with log sequence number
boolean needAddReplayTag = (HFile.getFormatVersion(regionServer.conf) >= 3);
@@ -1451,9 +1451,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
regionServer.nonceManager.reportOperationFromWal(nonceGroup, nonce, entry.getKey().getWriteTime());
}
- Pair walEntry = (coprocessorHost == null) ? null :
- new Pair();
- List edits = HLogSplitter.getMutationsFromWALEntry(entry,
+ Pair walEntry = (coprocessorHost == null) ? null :
+ new Pair();
+ List edits = WALSplitter.getMutationsFromWALEntry(entry,
cells, walEntry, needAddReplayTag, durability);
if (coprocessorHost != null) {
// Start coprocessor replay here. The coprocessor is for each WALEdit instead of a
@@ -1482,7 +1482,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
region.syncWal();
if (coprocessorHost != null) {
- for (Pair wal : walEntries) {
+ for (Pair wal : walEntries) {
coprocessorHost.postWALRestore(region.getRegionInfo(), wal.getFirst(),
wal.getSecond());
}
@@ -1535,14 +1535,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
checkOpen();
requestCount.increment();
regionServer.getRegionServerCoprocessorHost().preRollWALWriterRequest();
- HLog wal = regionServer.getWAL();
- byte[][] regionsToFlush = wal.rollWriter(true);
+ regionServer.walRoller.requestRollAll();
+ regionServer.getRegionServerCoprocessorHost().postRollWALWriterRequest();
RollWALWriterResponse.Builder builder = RollWALWriterResponse.newBuilder();
- if (regionsToFlush != null) {
- for (byte[] region: regionsToFlush) {
- builder.addRegionToFlush(ByteStringer.wrap(region));
- }
- }
return builder.build();
} catch (IOException ie) {
throw new ServiceException(ie);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index d5f1ff84d31..e671e5052af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
+import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Pair;
@@ -107,6 +108,8 @@ public class RegionCoprocessorHost
private static final int LATENCY_BUFFER_SIZE = 100;
private final BlockingQueue coprocessorTimeNanos = new ArrayBlockingQueue(
LATENCY_BUFFER_SIZE);
+ private final boolean useLegacyPre;
+ private final boolean useLegacyPost;
/**
* Constructor
@@ -120,6 +123,14 @@ public class RegionCoprocessorHost
this.region = region;
this.rsServices = services;
this.sharedData = sharedData;
+ // Pick which version of the WAL related events we'll call.
+ // This way we avoid calling the new version on older RegionObservers so
+ // we can maintain binary compatibility.
+ // See notes in javadoc for RegionObserver
+ useLegacyPre = useLegacyMethod(impl.getClass(), "preWALRestore", ObserverContext.class,
+ HRegionInfo.class, WALKey.class, WALEdit.class);
+ useLegacyPost = useLegacyMethod(impl.getClass(), "postWALRestore", ObserverContext.class,
+ HRegionInfo.class, WALKey.class, WALEdit.class);
}
/** @return the region */
@@ -1307,34 +1318,75 @@ public class RegionCoprocessorHost
* @return true if default behavior should be bypassed, false otherwise
* @throws IOException
*/
- public boolean preWALRestore(final HRegionInfo info, final HLogKey logKey,
+ public boolean preWALRestore(final HRegionInfo info, final WALKey logKey,
final WALEdit logEdit) throws IOException {
return execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext ctx)
throws IOException {
- oserver.preWALRestore(ctx, info, logKey, logEdit);
+ // Once we don't need to support the legacy call, replace RegionOperation with a version
+ // that's ObserverContext and avoid this cast.
+ final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
+ if (env.useLegacyPre) {
+ if (logKey instanceof HLogKey) {
+ oserver.preWALRestore(ctx, info, (HLogKey)logKey, logEdit);
+ } else {
+ legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
+ }
+ } else {
+ oserver.preWALRestore(ctx, info, logKey, logEdit);
+ }
}
});
}
+ /**
+ * @return true if default behavior should be bypassed, false otherwise
+ * @deprecated use {@link #preWALRestore(HRegionInfo, WALKey, WALEdit)}
+ */
+ @Deprecated
+ public boolean preWALRestore(final HRegionInfo info, final HLogKey logKey,
+ final WALEdit logEdit) throws IOException {
+ return preWALRestore(info, (WALKey)logKey, logEdit);
+ }
+
/**
* @param info
* @param logKey
* @param logEdit
* @throws IOException
*/
- public void postWALRestore(final HRegionInfo info, final HLogKey logKey, final WALEdit logEdit)
+ public void postWALRestore(final HRegionInfo info, final WALKey logKey, final WALEdit logEdit)
throws IOException {
execOperation(coprocessors.isEmpty() ? null : new RegionOperation() {
@Override
public void call(RegionObserver oserver, ObserverContext ctx)
throws IOException {
- oserver.postWALRestore(ctx, info, logKey, logEdit);
+ // Once we don't need to support the legacy call, replace RegionOperation with a version
+ // that's ObserverContext and avoid this cast.
+ final RegionEnvironment env = (RegionEnvironment)ctx.getEnvironment();
+ if (env.useLegacyPost) {
+ if (logKey instanceof HLogKey) {
+ oserver.postWALRestore(ctx, info, (HLogKey)logKey, logEdit);
+ } else {
+ legacyWarning(oserver.getClass(), "There are wal keys present that are not HLogKey.");
+ }
+ } else {
+ oserver.postWALRestore(ctx, info, logKey, logEdit);
+ }
}
});
}
+ /**
+ * @deprecated use {@link #postWALRestore(HRegionInfo, WALKey, WALEdit)}
+ */
+ @Deprecated
+ public void postWALRestore(final HRegionInfo info, final HLogKey logKey, final WALEdit logEdit)
+ throws IOException {
+ postWALRestore(info, (WALKey)logKey, logEdit);
+ }
+
/**
* @param familyPaths pairs of { CF, file path } submitted for bulk load
* @return true if the default operation should be bypassed
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
index 479aced5ab1..879b573e405 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java
@@ -34,7 +34,7 @@ public class RegionServerAccounting {
private final AtomicLong atomicGlobalMemstoreSize = new AtomicLong(0);
- // Store the edits size during replaying HLog. Use this to roll back the
+ // Store the edits size during replaying WAL. Use this to roll back the
// global memstore size once a region opening failed.
private final ConcurrentMap replayEditsPerRegion =
new ConcurrentSkipListMap(Bytes.BYTES_COMPARATOR);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index f02b8baa8fc..08d038c8669 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcServerInterface;
import org.apache.hadoop.hbase.master.TableLockManager;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
+import org.apache.hadoop.hbase.wal.WAL;
import org.apache.zookeeper.KeeperException;
/**
@@ -48,9 +48,9 @@ public interface RegionServerServices
*/
boolean isStopping();
- /** @return the HLog for a particular region. Pass null for getting the
+ /** @return the WAL for a particular region. Pass null for getting the
* default (common) WAL */
- HLog getWAL(HRegionInfo regionInfo) throws IOException;
+ WAL getWAL(HRegionInfo regionInfo) throws IOException;
/**
* @return Implementation of {@link CompactionRequestor} or null.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
index 0052b002677..a182aa1a8d4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
@@ -36,7 +36,8 @@ import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
import org.apache.hadoop.hbase.master.SplitLogManager;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -70,6 +71,7 @@ public class SplitLogWorker implements Runnable {
private SplitLogWorkerCoordination coordination;
private Configuration conf;
private RegionServerServices server;
+
public SplitLogWorker(Server hserver, Configuration conf, RegionServerServices server,
TaskExecutor splitTaskExecutor) {
this.server = server;
@@ -82,7 +84,8 @@ public class SplitLogWorker implements Runnable {
}
public SplitLogWorker(final Server hserver, final Configuration conf,
- final RegionServerServices server, final LastSequenceId sequenceIdChecker) {
+ final RegionServerServices server, final LastSequenceId sequenceIdChecker,
+ final WALFactory factory) {
this(server, conf, server, new TaskExecutor() {
@Override
public Status exec(String filename, RecoveryMode mode, CancelableProgressable p) {
@@ -99,8 +102,8 @@ public class SplitLogWorker implements Runnable {
// interrupted or has encountered a transient error and when it has
// encountered a bad non-retry-able persistent error.
try {
- if (!HLogSplitter.splitLogFile(rootdir, fs.getFileStatus(new Path(rootdir, filename)),
- fs, conf, p, sequenceIdChecker, server.getCoordinatedStateManager(), mode)) {
+ if (!WALSplitter.splitLogFile(rootdir, fs.getFileStatus(new Path(rootdir, filename)),
+ fs, conf, p, sequenceIdChecker, server.getCoordinatedStateManager(), mode, factory)) {
return Status.PREEMPTED;
}
} catch (InterruptedIOException iioe) {
@@ -153,6 +156,7 @@ public class SplitLogWorker implements Runnable {
LOG.info("SplitLogWorker " + server.getServerName() + " exiting");
}
}
+
/**
* If the worker is doing a task i.e. splitting a log file then stop the task.
* It doesn't exit the worker thread.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java
deleted file mode 100644
index b0f3f0b5396..00000000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/HLogSplitterHandler.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver.handler;
-
-import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.SplitLogCounters;
-import org.apache.hadoop.hbase.SplitLogTask;
-import org.apache.hadoop.hbase.coordination.SplitLogWorkerCoordination;
-import org.apache.hadoop.hbase.executor.EventHandler;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor;
-import org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status;
-import org.apache.hadoop.hbase.util.CancelableProgressable;
-
-/**
- * Handles log splitting a wal
- */
-@InterfaceAudience.Private
-public class HLogSplitterHandler extends EventHandler {
- private static final Log LOG = LogFactory.getLog(HLogSplitterHandler.class);
- private final ServerName serverName;
- private final CancelableProgressable reporter;
- private final AtomicInteger inProgressTasks;
- private final TaskExecutor splitTaskExecutor;
- private final RecoveryMode mode;
- private final SplitLogWorkerCoordination.SplitTaskDetails splitTaskDetails;
- private final SplitLogWorkerCoordination coordination;
-
-
- public HLogSplitterHandler(final Server server, SplitLogWorkerCoordination coordination,
- SplitLogWorkerCoordination.SplitTaskDetails splitDetails, CancelableProgressable reporter,
- AtomicInteger inProgressTasks, TaskExecutor splitTaskExecutor, RecoveryMode mode) {
- super(server, EventType.RS_LOG_REPLAY);
- this.splitTaskDetails = splitDetails;
- this.coordination = coordination;
- this.reporter = reporter;
- this.inProgressTasks = inProgressTasks;
- this.inProgressTasks.incrementAndGet();
- this.serverName = server.getServerName();
- this.splitTaskExecutor = splitTaskExecutor;
- this.mode = mode;
- }
-
- @Override
- public void process() throws IOException {
- long startTime = System.currentTimeMillis();
- try {
- Status status = this.splitTaskExecutor.exec(splitTaskDetails.getWALFile(), mode, reporter);
- switch (status) {
- case DONE:
- coordination.endTask(new SplitLogTask.Done(this.serverName,this.mode),
- SplitLogCounters.tot_wkr_task_done, splitTaskDetails);
- break;
- case PREEMPTED:
- SplitLogCounters.tot_wkr_preempt_task.incrementAndGet();
- LOG.warn("task execution prempted " + splitTaskDetails.getWALFile());
- break;
- case ERR:
- if (server != null && !server.isStopped()) {
- coordination.endTask(new SplitLogTask.Err(this.serverName, this.mode),
- SplitLogCounters.tot_wkr_task_err, splitTaskDetails);
- break;
- }
- // if the RS is exiting then there is probably a tons of stuff
- // that can go wrong. Resign instead of signaling error.
- //$FALL-THROUGH$
- case RESIGNED:
- if (server != null && server.isStopped()) {
- LOG.info("task execution interrupted because worker is exiting "
- + splitTaskDetails.toString());
- }
- coordination.endTask(new SplitLogTask.Resigned(this.serverName, this.mode),
- SplitLogCounters.tot_wkr_task_resigned, splitTaskDetails);
- break;
- }
- } finally {
- LOG.info("worker " + serverName + " done with task " + splitTaskDetails.toString() + " in "
- + (System.currentTimeMillis() - startTime) + "ms");
- this.inProgressTasks.decrementAndGet();
- }
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java
index d8da4127f02..12af61993c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/CompressionContext.java
@@ -26,17 +26,18 @@ import org.apache.hadoop.hbase.io.TagCompressionContext;
import org.apache.hadoop.hbase.io.util.Dictionary;
/**
- * Context that holds the various dictionaries for compression in HLog.
+ * Context that holds the various dictionaries for compression in WAL.
*/
@InterfaceAudience.Private
-class CompressionContext {
+public class CompressionContext {
static final String ENABLE_WAL_TAGS_COMPRESSION =
"hbase.regionserver.wal.tags.enablecompression";
- final Dictionary regionDict;
- final Dictionary tableDict;
- final Dictionary familyDict;
+ // visible only for WALKey, until we move everything into o.a.h.h.wal
+ public final Dictionary regionDict;
+ public final Dictionary tableDict;
+ public final Dictionary familyDict;
final Dictionary qualifierDict;
final Dictionary rowDict;
// Context used for compressing tags
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java
index b75a7cf8ad0..4032cde5884 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/Compressor.java
@@ -33,9 +33,13 @@ import org.apache.hadoop.io.WritableUtils;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.wal.WALProvider;
+
/**
* A set of static functions for running our custom WAL compression/decompression.
- * Also contains a command line tool to compress and uncompress HLogs.
+ * Also contains a command line tool to compress and uncompress WALs.
*/
@InterfaceAudience.Private
public class Compressor {
@@ -56,8 +60,8 @@ public class Compressor {
private static void printHelp() {
System.err.println("usage: Compressor