diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java index 6c009b3c7e1..caca20b5cee 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferPool.java @@ -80,8 +80,9 @@ public class ByteBufferPool { this.maxPoolSize = maxPoolSize; this.directByteBuffer = directByteBuffer; // TODO can add initialPoolSize config also and make those many BBs ready for use. - LOG.info("Created ByteBufferPool with bufferSize : " + bufferSize + " and maxPoolSize : " - + maxPoolSize); + LOG.info("Created with bufferSize={} and maxPoolSize={}", + org.apache.hadoop.util.StringUtils.byteDesc(bufferSize), + org.apache.hadoop.util.StringUtils.byteDesc(maxPoolSize)); this.count = new AtomicInteger(0); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java index ae8cd1f6eef..2b2c1b8e121 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java @@ -138,63 +138,63 @@ public class Action { } protected void killMaster(ServerName server) throws IOException { - LOG.info("Killing master:" + server); + LOG.info("Killing master " + server); cluster.killMaster(server); cluster.waitForMasterToStop(server, killMasterTimeout); - LOG.info("Killed master server:" + server); + LOG.info("Killed master " + server); } protected void startMaster(ServerName server) throws IOException { - LOG.info("Starting master:" + server.getHostname()); + LOG.info("Starting master " + server.getHostname()); cluster.startMaster(server.getHostname(), server.getPort()); cluster.waitForActiveAndReadyMaster(startMasterTimeout); - LOG.info("Started master: " + server); + LOG.info("Started master " + server.getHostname()); } protected void killRs(ServerName server) throws IOException { - LOG.info("Killing region server:" + server); + LOG.info("Killing regionserver " + server); cluster.killRegionServer(server); cluster.waitForRegionServerToStop(server, killRsTimeout); - LOG.info("Killed region server:" + server + ". Reported num of rs:" + LOG.info("Killed regionserver " + server + ". Reported num of rs:" + cluster.getClusterMetrics().getLiveServerMetrics().size()); } protected void startRs(ServerName server) throws IOException { - LOG.info("Starting region server:" + server.getHostname()); + LOG.info("Starting regionserver " + server.getAddress()); cluster.startRegionServer(server.getHostname(), server.getPort()); cluster.waitForRegionServerToStart(server.getHostname(), server.getPort(), startRsTimeout); - LOG.info("Started region server:" + server + ". Reported num of rs:" + LOG.info("Started regionserver " + server.getAddress() + ". Reported num of rs:" + cluster.getClusterMetrics().getLiveServerMetrics().size()); } protected void killZKNode(ServerName server) throws IOException { - LOG.info("Killing zookeeper node:" + server); + LOG.info("Killing zookeeper node " + server); cluster.killZkNode(server); cluster.waitForZkNodeToStop(server, killZkNodeTimeout); - LOG.info("Killed zookeeper node:" + server + ". Reported num of rs:" + LOG.info("Killed zookeeper node " + server + ". Reported num of rs:" + cluster.getClusterMetrics().getLiveServerMetrics().size()); } protected void startZKNode(ServerName server) throws IOException { - LOG.info("Starting zookeeper node:" + server.getHostname()); + LOG.info("Starting zookeeper node " + server.getHostname()); cluster.startZkNode(server.getHostname(), server.getPort()); cluster.waitForZkNodeToStart(server, startZkNodeTimeout); - LOG.info("Started zookeeper node:" + server); + LOG.info("Started zookeeper node " + server); } protected void killDataNode(ServerName server) throws IOException { - LOG.info("Killing datanode:" + server); + LOG.info("Killing datanode " + server); cluster.killDataNode(server); cluster.waitForDataNodeToStop(server, killDataNodeTimeout); - LOG.info("Killed datanode:" + server + ". Reported num of rs:" + LOG.info("Killed datanode " + server + ". Reported num of rs:" + cluster.getClusterMetrics().getLiveServerMetrics().size()); } protected void startDataNode(ServerName server) throws IOException { - LOG.info("Starting datanode:" + server.getHostname()); + LOG.info("Starting datanode " + server.getHostname()); cluster.startDataNode(server); cluster.waitForDataNodeToStart(server, startDataNodeTimeout); - LOG.info("Started datanode:" + server); + LOG.info("Started datanode " + server); } protected void unbalanceRegions(ClusterMetrics clusterStatus, diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java index bb8f2443bd9..09bfe216acb 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRandomRegionOfTableAction.java @@ -21,10 +21,10 @@ package org.apache.hadoop.hbase.chaos.actions; import java.util.List; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.RegionInfo; /** * Action that tries to move a random region of a table. @@ -52,16 +52,17 @@ public class MoveRandomRegionOfTableAction extends Action { Admin admin = util.getAdmin(); LOG.info("Performing action: Move random region of table " + tableName); - List regions = admin.getTableRegions(tableName); + List regions = admin.getRegions(tableName); if (regions == null || regions.isEmpty()) { LOG.info("Table " + tableName + " doesn't have regions to move"); return; } - HRegionInfo region = PolicyBasedChaosMonkey.selectRandomItem( - regions.toArray(new HRegionInfo[regions.size()])); - LOG.debug("Unassigning region " + region.getRegionNameAsString()); - admin.unassign(region.getRegionName(), false); + RegionInfo region = PolicyBasedChaosMonkey.selectRandomItem( + regions.toArray(new RegionInfo[regions.size()])); + LOG.debug("Move random region {}", region.getRegionNameAsString()); + // Use facility over in MoveRegionsOfTableAction... + MoveRegionsOfTableAction.moveRegion(admin, MoveRegionsOfTableAction.getServers(admin), region); if (sleepTime > 0) { Thread.sleep(sleepTime); } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java index 08958e81f6b..3496b9a19f1 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/MoveRegionsOfTableAction.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.chaos.actions; +import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -29,6 +30,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.chaos.factories.MonkeyConstants; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.util.Bytes; /** @@ -56,14 +58,12 @@ public class MoveRegionsOfTableAction extends Action { } Admin admin = this.context.getHBaseIntegrationTestingUtility().getAdmin(); - Collection serversList = - admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(); - ServerName[] servers = serversList.toArray(new ServerName[serversList.size()]); + ServerName[] servers = getServers(admin); - LOG.info("Performing action: Move regions of table " + tableName); + LOG.info("Performing action: Move regions of table {}", tableName); List regions = admin.getTableRegions(tableName); if (regions == null || regions.isEmpty()) { - LOG.info("Table " + tableName + " doesn't have regions to move"); + LOG.info("Table {} doesn't have regions to move", tableName); return; } @@ -77,14 +77,7 @@ public class MoveRegionsOfTableAction extends Action { return; } - try { - String destServerName = - servers[RandomUtils.nextInt(0, servers.length)].getServerName(); - LOG.debug("Moving " + regionInfo.getRegionNameAsString() + " to " + destServerName); - admin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(destServerName)); - } catch (Exception ex) { - LOG.warn("Move failed, might be caused by other chaos: " + ex.getMessage()); - } + moveRegion(admin, servers, regionInfo); if (sleepTime > 0) { Thread.sleep(sleepTime); } @@ -96,4 +89,20 @@ public class MoveRegionsOfTableAction extends Action { } } } + + static ServerName [] getServers(Admin admin) throws IOException { + Collection serversList = + admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet(); + return serversList.toArray(new ServerName[serversList.size()]); + } + + static void moveRegion(Admin admin, ServerName [] servers, RegionInfo regionInfo) { + try { + String destServerName = servers[RandomUtils.nextInt(0, servers.length)].getServerName(); + LOG.debug("Moving {} to {}", regionInfo.getRegionNameAsString(), destServerName); + admin.move(regionInfo.getEncodedNameAsBytes(), Bytes.toBytes(destServerName)); + } catch (Exception ex) { + LOG.warn("Move failed, might be caused by other chaos: {}", ex.getMessage()); + } + } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java index a49f54117e0..70636dd3e84 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/monkies/PolicyBasedChaosMonkey.java @@ -113,7 +113,7 @@ public class PolicyBasedChaosMonkey extends ChaosMonkey { for (int i=0; i 0) { - LOG.info("Sleeping for: " + sleepTime); + LOG.info("Sleeping for {} ms", sleepTime); Threads.sleep(sleepTime); } } @@ -54,6 +54,6 @@ public abstract class PeriodicPolicy extends Policy { @Override public void init(PolicyContext context) throws Exception { super.init(context); - LOG.info("Using ChaosMonkey Policy: " + this.getClass() + ", period: " + periodMs); + LOG.info("Using ChaosMonkey Policy {}, period={} ms", this.getClass(), periodMs); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java index b11aa32d935..a0dc89e0192 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/PeriodicRandomActionPolicy.java @@ -58,8 +58,7 @@ public class PeriodicRandomActionPolicy extends PeriodicPolicy { try { action.perform(); } catch (Exception ex) { - LOG.warn("Exception occurred during performing action: " - + StringUtils.stringifyException(ex)); + LOG.warn("Exception performing action: " + StringUtils.stringifyException(ex)); } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 861e3b299bb..6238e103967 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -94,8 +94,8 @@ public abstract class RemoteProcedureDispatcher(pipeline); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6e1aceb2340..414bc31dea1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -955,8 +955,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi nextSeqid++; } - LOG.info("Onlined " + this.getRegionInfo().getShortNameToLog() + - "; next sequenceid=" + nextSeqid); + LOG.info("Opened {}; next sequenceid={}", this.getRegionInfo().getShortNameToLog(), nextSeqid); // A region can be reopened if failed a split; reset flags this.closing.set(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index f26e2cb05d4..593c08d62ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1140,9 +1140,7 @@ public class HRegionServer extends HasThread implements if (this.zooKeeper != null) { this.zooKeeper.close(); } - LOG.info("stopping server " + this.serverName + "; zookeeper connection closed."); - - LOG.info(Thread.currentThread().getName() + " exiting"); + LOG.info("Exiting; stopping=" + this.serverName + "; zookeeper connection closed."); } private boolean containsMetaTableRegions() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 220881dca7b..68a057a7013 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -292,7 +292,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat this.memstore = ReflectionUtils.newInstance(clz, new Object[] { conf, this.comparator, this, this.getHRegion().getRegionServicesForStores(), inMemoryCompaction }); } - LOG.info("Memstore class name is {}", className); + LOG.debug("Memstore type={}", className); this.offPeakHours = OffPeakHours.getInstance(conf); // Setting up cache configuration for this family diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java index 2daa5d246b5..c32fce2aec0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HeapMemoryManager.java @@ -207,10 +207,10 @@ public class HeapMemoryManager { } public void start(ChoreService service) { - LOG.info("Starting HeapMemoryTuner chore."); - this.heapMemTunerChore = new HeapMemoryTunerChore(); - service.scheduleChore(heapMemTunerChore); - if (tunerOn) { + LOG.info("Starting, tuneOn={}", this.tunerOn); + this.heapMemTunerChore = new HeapMemoryTunerChore(); + service.scheduleChore(heapMemTunerChore); + if (tunerOn) { // Register HeapMemoryTuner as a memstore flush listener memStoreFlusher.registerFlushRequestListener(heapMemTunerChore); } @@ -218,7 +218,7 @@ public class HeapMemoryManager { public void stop() { // The thread is Daemon. Just interrupting the ongoing process. - LOG.info("Stopping HeapMemoryTuner chore."); + LOG.info("Stopping"); this.heapMemTunerChore.cancel(true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java index f7ee4ef32e9..0afa3813edd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Leases.java @@ -144,10 +144,9 @@ public class Leases extends HasThread { * without any cancellation calls. */ public void close() { - LOG.info(Thread.currentThread().getName() + " closing leases"); this.stopRequested = true; leases.clear(); - LOG.info(Thread.currentThread().getName() + " closed leases"); + LOG.info("Closed leases"); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java index d4aafedd9d9..fbb5f75a467 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java @@ -85,13 +85,12 @@ public abstract class MemStoreCompactionStrategy { int numOfSegments = versionedList.getNumOfSegments(); if (numOfSegments > pipelineThreshold) { // to avoid too many segments, merge now - LOG.debug("{} in-memory compaction of {}; merging {} segments", - strategy, cfName, numOfSegments); + LOG.debug("{} {}; merging {} segments", strategy, cfName, numOfSegments); return getMergingAction(); } // just flatten a segment - LOG.debug("{} in-memory compaction of {}; flattening a segment", strategy, cfName); + LOG.debug("{} {}; flattening a segment", strategy, cfName); return getFlattenAction(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index a8c3362db23..5c908e5bfff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -92,7 +92,8 @@ public class MemStoreCompactor { // get a snapshot of the list of the segments from the pipeline, // this local copy of the list is marked with specific version versionedList = compactingMemStore.getImmutableSegments(); - LOG.debug("Starting In-Memory Compaction of {}", + LOG.debug("Starting on {}/{}", + compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(), compactingMemStore.getStore().getColumnFamilyName()); HStore store = compactingMemStore.getStore(); RegionCoprocessorHost cpHost = store.getCoprocessorHost(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 66a2ad5001f..70074bf3b41 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -413,7 +413,7 @@ public abstract class Segment { @Override public String toString() { - String res = "Type=" + this.getClass().getSimpleName() + ", "; + String res = "type=" + this.getClass().getSimpleName() + ", "; res += "empty=" + (isEmpty()? "yes": "no") + ", "; res += "cellCount=" + getCellsCount() + ", "; res += "cellSize=" + keySize() + ", "; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index 212eb047f97..0432641e9ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.util.StringUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -149,14 +150,14 @@ public class CompactionConfiguration { @Override public String toString() { return String.format( - "size [%d, %d, %d); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" + "size [%s, %s, %s); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" + " major period %d, major jitter %f, min locality to compact %f;" + " tiered compaction: max_age %d, incoming window min %d," + " compaction policy for tiered window %s, single output for minor %b," + " compaction window factory %s", - minCompactSize, - maxCompactSize, - offPeakMaxCompactSize, + StringUtils.byteDesc(minCompactSize), + StringUtils.byteDesc(maxCompactSize), + StringUtils.byteDesc(offPeakMaxCompactSize), minFilesToCompact, maxFilesToCompact, compactionRatio, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java index d9d10d98ae9..ce425308088 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java @@ -118,9 +118,9 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy { + " files of size "+ smallestSize + " because the store might be stuck"); return new ArrayList<>(smallest); } - LOG.debug("Exploring compaction algorithm has selected " + bestSelection.size() - + " files of size " + bestSize + " starting at candidate #" + bestStart + - " after considering " + opts + " permutations with " + optsInRatio + " in ratio"); + LOG.debug("Exploring compaction algorithm has selected {} files of size {} starting at " + + "candidate #{} after considering {} permutations with {} in ratio", bestSelection.size(), + bestSize, bestSize, opts, optsInRatio); return new ArrayList<>(bestSelection); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java index a1bd21bd805..27c25eac67c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java @@ -90,7 +90,7 @@ public abstract class PressureAwareThroughputController extends Configured imple if (speed >= 1E15) { // large enough to say it is unlimited return "unlimited"; } else { - return String.format("%.2f MB/sec", speed / 1024 / 1024); + return String.format("%.2f MB/second", speed / 1024 / 1024); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java index ad12c66cca1..c259890bb26 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java @@ -132,7 +132,7 @@ public class Replication implements ReplicationSourceService, ReplicationSinkSer } this.statsThreadPeriod = this.conf.getInt("replication.stats.thread.period.seconds", 5 * 60); - LOG.debug("ReplicationStatisticsThread " + this.statsThreadPeriod); + LOG.debug("Replication stats-in-log period={} seconds", this.statsThreadPeriod); this.replicationLoad = new ReplicationLoad(); } diff --git a/pom.xml b/pom.xml index a9df16f7e53..2455253163f 100755 --- a/pom.xml +++ b/pom.xml @@ -1344,9 +1344,6 @@ 4.0.3 2.4.1 1.3.8 - - 3.6.2.Final 2.1.11 1.0.18 @@ -2321,6 +2318,9 @@ ${hadoop-two.version} hbase-hadoop2-compat src/main/assembly/hadoop-two-compat.xml + + 3.6.2.Final @@ -2595,6 +2595,9 @@ hbase-hadoop2-compat src/main/assembly/hadoop-two-compat.xml + + 3.10.5.Final