diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index a8b8ebfa6a2..c3cb866c211 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -97,7 +97,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { try { done = waitUntilDone(startTime * 1000L + asyncProcess.primaryCallTimeoutMicroseconds); } catch (InterruptedException ex) { - LOG.error("Replica thread was interrupted - no replica calls: " + ex.getMessage()); + LOG.error("Replica thread interrupted - no replica calls {}", ex.getMessage()); return; } } @@ -141,7 +141,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (loc == null) return; HRegionLocation[] locs = loc.getRegionLocations(); if (locs.length == 1) { - LOG.warn("No replicas found for " + action.getAction()); + LOG.warn("No replicas found for {}", action.getAction()); return; } synchronized (replicaResultLock) { @@ -222,8 +222,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { return; } catch (Throwable t) { // This should not happen. Let's log & retry anyway. - LOG.error("#" + asyncProcess.id + ", Caught throwable while calling. This is unexpected." + - " Retrying. Server is " + server + ", tableName=" + tableName, t); + LOG.error("id=" + asyncProcess.id + ", caught throwable. Unexpected." + + " Retrying. Server=" + server + ", tableName=" + tableName, t); receiveGlobalFailure(multiAction, server, numAttempt, t); return; } @@ -239,8 +239,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { } } catch (Throwable t) { // Something really bad happened. We are on the send thread that will now die. - LOG.error("Internal AsyncProcess #" + asyncProcess.id + " error for " - + tableName + " processing for " + server, t); + LOG.error("id=" + asyncProcess.id + " error for " + tableName + " processing " + server, t); throw new RuntimeException(t); } finally { asyncProcess.decTaskCounters(multiAction.getRegions(), server); @@ -550,8 +549,8 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { if (t instanceof RejectedExecutionException) { // This should never happen. But as the pool is provided by the end user, // let's secure this a little. - LOG.warn("#" + asyncProcess.id + ", the task was rejected by the pool. This is unexpected." + - " Server is " + server.getServerName(), t); + LOG.warn("id=" + asyncProcess.id + ", task rejected by pool. Unexpected." + + " Server=" + server.getServerName(), t); } else { // see #HBASE-14359 for more details LOG.warn("Caught unexpected exception/error: ", t); @@ -659,7 +658,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { try { pool.submit(replicaRunnable); } catch (RejectedExecutionException ree) { - LOG.warn("#" + asyncProcess.id + ", replica task was rejected by the pool - no replica calls", ree); + LOG.warn("id=" + asyncProcess.id + " replica task rejected by pool; no replica calls", ree); } } } @@ -955,7 +954,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { StringBuilder sb = new StringBuilder(); sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName).append(", ") .append("attempt=").append(numAttempt) - .append("/").append(asyncProcess.numTries).append(" "); + .append("/").append(asyncProcess.numTries).append(", "); if (failureCount > 0 || error != null){ sb.append("failed=").append(failureCount).append("ops").append(", last exception="). diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index f8a4557d790..b8620b1b743 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -163,7 +163,7 @@ class NettyRpcConnection extends RpcConnection { relogin(); } } catch (IOException e) { - LOG.warn("relogin failed", e); + LOG.warn("Relogin failed", e); } synchronized (this) { reloginInProgress = false; @@ -251,7 +251,7 @@ class NettyRpcConnection extends RpcConnection { } private void connect() { - LOG.debug("Connecting to {}", remoteId.address); + LOG.trace("Connecting to {}", remoteId.address); this.channel = new Bootstrap().group(rpcClient.group).channel(rpcClient.channelClass) .option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay()) diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index 19efdc79c0e..e5105a50452 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -1427,7 +1427,8 @@ public class ProcedureExecutor { */ private void execProcedure(final RootProcedureState procStack, final Procedure procedure) { - Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE); + Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE, + procedure.toString()); // Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException. // The exception is caught below and then we hurry to the exit without disturbing state. The diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java index ee237f227ce..1a4ccc1a205 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java @@ -150,7 +150,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase try { ZKUtil.deleteNode(zookeeper, fileNode); } catch (NoNodeException e) { - LOG.warn(fileNode + " has already been deleted when removing log"); + LOG.warn("{} already deleted when removing log", fileNode); } catch (KeeperException e) { throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); @@ -182,8 +182,8 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase try { return ZKUtil.parseWALPositionFrom(bytes); } catch (DeserializationException de) { - LOG.warn("Failed to parse log position (serverName=" + serverName + ", queueId=" + queueId + - ", fileName=" + fileName + ")"); + LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", + serverName, queueId, fileName); } // if we can not parse the position, start at the beginning of the wal file again return 0; @@ -192,8 +192,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase @Override public Pair> claimQueue(ServerName sourceServerName, String queueId, ServerName destServerName) throws ReplicationException { - LOG.info( - "Atomically moving " + sourceServerName + "/" + queueId + "'s WALs to " + destServerName); + LOG.info("Atomically moving {}/{}'s WALs to {}", sourceServerName, queueId, destServerName); try { ZKUtil.createWithParents(zookeeper, getRsNode(destServerName)); } catch (KeeperException e) { @@ -208,7 +207,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase String newQueueId = queueId + "-" + sourceServerName; if (CollectionUtils.isEmpty(wals)) { ZKUtil.deleteNodeFailSilent(zookeeper, oldQueueNode); - LOG.info("Removed " + sourceServerName + "/" + queueId + " since it's empty"); + LOG.info("Removed empty {}/{}", sourceServerName, queueId); return new Pair<>(newQueueId, Collections.emptySortedSet()); } String newQueueNode = getQueueNode(destServerName, newQueueId); @@ -220,9 +219,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase for (String wal : wals) { String oldWalNode = getFileNode(oldQueueNode, wal); byte[] logOffset = ZKUtil.getData(this.zookeeper, oldWalNode); - if (LOG.isDebugEnabled()) { - LOG.debug("Creating " + wal + " with data " + Bytes.toStringBinary(logOffset)); - } + LOG.debug("Creating {} with data {}", wal, Bytes.toStringBinary(logOffset)); String newWalNode = getFileNode(newQueueNode, wal); listOfOps.add(ZKUtilOp.createAndFailSilent(newWalNode, logOffset)); listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldWalNode)); @@ -231,21 +228,17 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase // add delete op for peer listOfOps.add(ZKUtilOp.deleteNodeFailSilent(oldQueueNode)); - if (LOG.isTraceEnabled()) { - LOG.trace("The multi list size is: " + listOfOps.size()); - } + LOG.trace("The multi list size is {}", listOfOps.size()); ZKUtil.multiOrSequential(zookeeper, listOfOps, false); - LOG.info( - "Atomically moved " + sourceServerName + "/" + queueId + "'s WALs to " + destServerName); + LOG.info("Atomically moved {}/{}'s WALs to {}", sourceServerName, queueId, destServerName); return new Pair<>(newQueueId, logQueue); } catch (NoNodeException | NodeExistsException | NotEmptyException | BadVersionException e) { // Multi call failed; it looks like some other regionserver took away the logs. // These exceptions mean that zk tells us the request can not be execute so it is safe to just // return a null. For other types of exception should be thrown out to notify the upper layer. - LOG.info( - "Claim queue queueId=" + queueId + " from " + sourceServerName + " to " + destServerName + - " failed with " + e.toString() + ", maybe someone else has already took away the logs"); + LOG.info("Claim queue queueId={} from {} to {} failed with {}, someone else took the log?", + queueId,sourceServerName, destServerName, e.toString()); return null; } catch (KeeperException | InterruptedException e) { throw new ReplicationException("Claim queue queueId=" + queueId + " from " + @@ -323,7 +316,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase int v0 = getQueuesZNodeCversion(); List rss = getListOfReplicators0(); if (rss.isEmpty()) { - LOG.debug("Didn't find any region server that replicates, won't prevent any deletions."); + LOG.debug("Didn't find a RegionServer that replicates, won't prevent deletions."); return Collections.emptySet(); } Set wals = new HashSet<>(); @@ -336,8 +329,8 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase if (v0 == v1) { return wals; } - LOG.info(String.format("Replication queue node cversion changed from %d to %d, retry = %d", - v0, v1, retry)); + LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", + v0, v1, retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all wals", e); @@ -357,7 +350,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase String peerNode = getHFileRefsPeerNode(peerId); try { if (ZKUtil.checkExists(zookeeper, peerNode) == -1) { - LOG.info("Adding peer " + peerId + " to hfile reference queue."); + LOG.info("Adding peer {} to hfile reference queue.", peerId); ZKUtil.createWithParents(zookeeper, peerNode); } } catch (KeeperException e) { @@ -371,11 +364,9 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase String peerNode = getHFileRefsPeerNode(peerId); try { if (ZKUtil.checkExists(zookeeper, peerNode) == -1) { - if (LOG.isDebugEnabled()) { - LOG.debug("Peer " + peerNode + " not found in hfile reference queue."); - } + LOG.debug("Peer {} not found in hfile reference queue.", peerNode); } else { - LOG.info("Removing peer " + peerNode + " from hfile reference queue."); + LOG.info("Removing peer {} from hfile reference queue.", peerNode); ZKUtil.deleteNodeRecursively(zookeeper, peerNode); } } catch (KeeperException e) { @@ -388,17 +379,12 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase public void addHFileRefs(String peerId, List> pairs) throws ReplicationException { String peerNode = getHFileRefsPeerNode(peerId); - boolean debugEnabled = LOG.isDebugEnabled(); - if (debugEnabled) { - LOG.debug("Adding hfile references " + pairs + " in queue " + peerNode); - } + LOG.debug("Adding hfile references {} in queue {}", pairs, peerNode); List listOfOps = pairs.stream().map(p -> p.getSecond().getName()) .map(n -> getHFileNode(peerNode, n)) .map(f -> ZKUtilOp.createAndFailSilent(f, HConstants.EMPTY_BYTE_ARRAY)).collect(toList()); - if (debugEnabled) { - LOG.debug("The multi list size for adding hfile references in zk for node " + peerNode + - " is " + listOfOps.size()); - } + LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", + peerNode, listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -409,17 +395,12 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase @Override public void removeHFileRefs(String peerId, List files) throws ReplicationException { String peerNode = getHFileRefsPeerNode(peerId); - boolean debugEnabled = LOG.isDebugEnabled(); - if (debugEnabled) { - LOG.debug("Removing hfile references " + files + " from queue " + peerNode); - } + LOG.debug("Removing hfile references {} from queue {}", files, peerNode); List listOfOps = files.stream().map(n -> getHFileNode(peerNode, n)) .map(ZKUtilOp::deleteNodeFailSilent).collect(toList()); - if (debugEnabled) { - LOG.debug("The multi list size for removing hfile references in zk for node " + peerNode + - " is " + listOfOps.size()); - } + LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", + peerNode, listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -474,7 +455,7 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase int v0 = getHFileRefsZNodeCversion(); List peers = getAllPeersFromHFileRefsQueue(); if (peers.isEmpty()) { - LOG.debug("Didn't find any peers with hfile references, won't prevent any deletions."); + LOG.debug("Didn't find any peers with hfile references, won't prevent deletions."); return Collections.emptySet(); } Set hfileRefs = new HashSet<>(); @@ -485,9 +466,8 @@ class ZKReplicationQueueStorage extends ZKReplicationStorageBase if (v0 == v1) { return hfileRefs; } - LOG.debug(String.format( - "Replication hfile references node cversion changed from " + "%d to %d, retry = %d", v0, - v1, retry)); + LOG.debug("Replication hfile references node cversion changed from %d to %d, retry = %d", + v0, v1, retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all hfile refs", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index dea6bf819de..f5496eae0e3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -109,9 +109,7 @@ public class HFileArchiver { */ public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) throws IOException { - if (LOG.isDebugEnabled()) { - LOG.debug("ARCHIVING " + regionDir.toString()); - } + LOG.debug("ARCHIVING {}", rootdir.toString()); // otherwise, we archive the files // make sure we can archive @@ -145,7 +143,7 @@ public class HFileArchiver { FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden); // if there no files, we can just delete the directory and return; if (storeDirs == null) { - LOG.debug("Region directory " + regionDir + " empty."); + LOG.debug("Directory {} empty.", regionDir); return deleteRegionWithoutArchiving(fs, regionDir); } @@ -194,8 +192,8 @@ public class HFileArchiver { RegionInfo parent, Path familyDir, byte[] family) throws IOException { FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir); if (storeFiles == null) { - LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() + - ", family=" + Bytes.toString(family)); + LOG.debug("No files to dispose of in {}, family={}", parent.getRegionNameAsString(), + Bytes.toString(family)); return; } @@ -230,15 +228,15 @@ public class HFileArchiver { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { - LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:" - + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family)); + LOG.warn("Passed filesystem is null, so just deleting files without archiving for {}," + + "family={}", Bytes.toString(regionInfo.getRegionName()), Bytes.toString(family)); deleteStoreFilesWithoutArchiving(compactedFiles); return; } // short circuit if we don't have any files to delete if (compactedFiles.isEmpty()) { - LOG.debug("No store files to dispose, done!"); + LOG.debug("No files to dispose of, done!"); return; } @@ -255,7 +253,7 @@ public class HFileArchiver { } // otherwise we attempt to archive the store files - LOG.debug("Archiving compacted store files."); + LOG.debug("Archiving compacted files."); // Wrap the storefile into a File StoreToFile getStorePath = new StoreToFile(fs); @@ -319,9 +317,11 @@ public class HFileArchiver { private static List resolveAndArchive(FileSystem fs, Path baseArchiveDir, Collection toArchive, long start) throws IOException { // short circuit if no files to move - if (toArchive.isEmpty()) return Collections.emptyList(); + if (toArchive.isEmpty()) { + return Collections.emptyList(); + } - if (LOG.isTraceEnabled()) LOG.trace("moving files to the archive directory: " + baseArchiveDir); + LOG.trace("Moving files to the archive directory {}", baseArchiveDir); // make sure the archive directory exists if (!fs.exists(baseArchiveDir)) { @@ -329,7 +329,7 @@ public class HFileArchiver { throw new IOException("Failed to create the archive directory:" + baseArchiveDir + ", quitting archive attempt."); } - if (LOG.isTraceEnabled()) LOG.trace("Created archive directory:" + baseArchiveDir); + LOG.trace("Created archive directory {}", baseArchiveDir); } List failures = new ArrayList<>(); @@ -337,7 +337,7 @@ public class HFileArchiver { for (File file : toArchive) { // if its a file archive it try { - if (LOG.isTraceEnabled()) LOG.trace("Archiving: " + file); + LOG.trace("Archiving {}", file); if (file.isFile()) { // attempt to archive the file if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) { @@ -346,7 +346,7 @@ public class HFileArchiver { } } else { // otherwise its a directory and we need to archive all files - if (LOG.isTraceEnabled()) LOG.trace(file + " is a directory, archiving children files"); + LOG.trace("{} is a directory, archiving children files", file); // so we add the directory name to the one base archive Path parentArchiveDir = new Path(baseArchiveDir, file.getName()); // and then get all the files from that directory and attempt to @@ -355,7 +355,7 @@ public class HFileArchiver { failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start)); } } catch (IOException e) { - LOG.warn("Failed to archive " + file, e); + LOG.warn("Failed to archive {}", file, e); failures.add(file); } } @@ -385,10 +385,8 @@ public class HFileArchiver { // really, really unlikely situtation, where we get the same name for the existing file, but // is included just for that 1 in trillion chance. if (fs.exists(archiveFile)) { - if (LOG.isDebugEnabled()) { - LOG.debug("File:" + archiveFile + " already exists in archive, moving to " - + "timestamped backup and overwriting current."); - } + LOG.debug("{} already exists in archive, moving to timestamped backup and " + + "overwriting current.", archiveFile); // move the archive file to the stamped backup Path backedupArchiveFile = new Path(archiveDir, filename + SEPARATOR + archiveStartTime); @@ -405,10 +403,7 @@ public class HFileArchiver { LOG.debug("Backed up archive file from " + archiveFile); } - if (LOG.isTraceEnabled()) { - LOG.trace("No existing file in archive for: " + archiveFile + - ", free to archive original file."); - } + LOG.trace("No existing file in archive for {}, free to archive original file.", archiveFile); // at this point, we should have a free spot for the archive file boolean success = false; @@ -421,11 +416,11 @@ public class HFileArchiver { try { if (!fs.exists(archiveDir)) { if (fs.mkdirs(archiveDir)) { - LOG.debug("Created archive directory:" + archiveDir); + LOG.debug("Created archive directory {}", archiveDir); } } } catch (IOException e) { - LOG.warn("Failed to create directory: " + archiveDir, e); + LOG.warn("Failed to create directory {}", archiveDir, e); } } @@ -446,9 +441,7 @@ public class HFileArchiver { return false; } - if (LOG.isDebugEnabled()) { - LOG.debug("Finished archiving from " + currentFile + ", to " + archiveFile); - } + LOG.debug("Archived from {} to {}", currentFile, archiveFile); return true; } @@ -462,10 +455,10 @@ public class HFileArchiver { private static boolean deleteRegionWithoutArchiving(FileSystem fs, Path regionDir) throws IOException { if (fs.delete(regionDir, true)) { - LOG.debug("Deleted " + regionDir); + LOG.debug("Deleted {}", regionDir); return true; } - LOG.debug("Failed to delete region directory:" + regionDir); + LOG.debug("Failed to delete directory {}", regionDir); return false; } @@ -480,13 +473,13 @@ public class HFileArchiver { */ private static void deleteStoreFilesWithoutArchiving(Collection compactedFiles) throws IOException { - LOG.debug("Deleting store files without archiving."); + LOG.debug("Deleting files without archiving."); List errors = new ArrayList<>(0); for (HStoreFile hsf : compactedFiles) { try { hsf.deleteStoreFile(); } catch (IOException e) { - LOG.error("Failed to delete store file:" + hsf.getPath()); + LOG.error("Failed to delete {}", hsf.getPath()); errors.add(e); } } @@ -605,7 +598,7 @@ public class HFileArchiver { @Override public String toString() { - return this.getClass() + ", file:" + getPath().toString(); + return this.getClass().getSimpleName() + ", " + getPath().toString(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 671a5737125..f497a4226f7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -1507,8 +1507,8 @@ public class HMaster extends HRegionServer implements MasterServices { TableDescriptor tblDesc = getTableDescriptors().get(table); if (table.isSystemTable() || (tblDesc != null && !tblDesc.isNormalizationEnabled())) { - LOG.debug("Skipping normalization for table: " + table + ", as it's either system" - + " table or doesn't have auto normalization turned on"); + LOG.trace("Skipping normalization for {}, as it's either system" + + " table or doesn't have auto normalization turned on", table); continue; } List plans = this.normalizer.computePlanForTable(table); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java index dca9cbbbec6..ac5ad64b28d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java @@ -292,7 +292,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer { continue; } if (!c.isNeeded()) { - LOG.debug(c.getClass().getName() + " indicated that its cost should not be considered"); + LOG.debug("{} not needed", c.getClass().getSimpleName()); continue; } sumMultiplier += multiplier; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java index eab06a2ea8f..1a393073d52 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java @@ -633,7 +633,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler { boolean hasLock = true; final LockAndQueue[] regionLocks = new LockAndQueue[regionInfo.length]; for (int i = 0; i < regionInfo.length; ++i) { - LOG.info(procedure + ", " + regionInfo[i].getRegionNameAsString()); + LOG.info("{} checking lock on {}", procedure, regionInfo[i].getEncodedName()); assert table != null; assert regionInfo[i] != null; assert regionInfo[i].getTable() != null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java index 1f9f4b822d9..5dc8e485961 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ChunkCreator.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import java.lang.ref.WeakReference; -import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.concurrent.BlockingQueue; @@ -38,8 +36,8 @@ import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.HeapMemoryTuneObse import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.util.StringUtils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Does the management of memstoreLAB chunk creations. A monotonically incrementing id is associated @@ -100,13 +98,13 @@ public class ChunkCreator { float poolSizePercentage, float indexChunkSizePercentage, float initialCountPercentage, HeapMemoryManager heapMemoryManager) { - this.dataChunksPool = initializePool(globalMemStoreSize, + this.dataChunksPool = initializePool("data", globalMemStoreSize, (1 - indexChunkSizePercentage) * poolSizePercentage, initialCountPercentage, chunkSize, heapMemoryManager); // The index chunks pool is needed only when the index type is CCM. // Since the pools are not created at all when the index type isn't CCM, // we don't need to check it here. - this.indexChunksPool = initializePool(globalMemStoreSize, + this.indexChunksPool = initializePool("index", globalMemStoreSize, indexChunkSizePercentage * poolSizePercentage, initialCountPercentage, (int) (indexChunkSizePercentage * chunkSize), heapMemoryManager); @@ -333,8 +331,11 @@ public class ChunkCreator { private static final int statThreadPeriod = 60 * 5; private final AtomicLong chunkCount = new AtomicLong(); private final LongAdder reusedChunkCount = new LongAdder(); + private final String label; - MemStoreChunkPool(int chunkSize, int maxCount, int initialCount, float poolSizePercentage) { + MemStoreChunkPool(String label, int chunkSize, int maxCount, int initialCount, + float poolSizePercentage) { + this.label = label; this.chunkSize = chunkSize; this.maxCount = maxCount; this.poolSizePercentage = poolSizePercentage; @@ -423,12 +424,10 @@ public class ChunkCreator { long created = chunkCount.get(); long reused = reusedChunkCount.sum(); long total = created + reused; - LOG.debug("Stats (chunk size=" + chunkSize + "): " - + "current pool size=" + reclaimedChunks.size() - + ",created chunk count=" + created - + ",reused chunk count=" + reused - + ",reuseRatio=" + (total == 0 ? "0" : StringUtils.formatPercent( - (float) reused / (float) total, 2))); + LOG.debug("{} Stats (chunk size={}): current pool size={}, created chunk count={}, " + + "reused chunk count={}, reuseRatio={}", label, chunkSize, reclaimedChunks.size(), + created, reused, + (total == 0? "0": StringUtils.formatPercent((float)reused/(float)total,2))); } } @@ -440,7 +439,7 @@ public class ChunkCreator { public void onHeapMemoryTune(long newMemstoreSize, long newBlockCacheSize) { // don't do any tuning in case of offheap memstore if (isOffheap()) { - LOG.warn("Not tuning the chunk pool as it is offheap"); + LOG.warn("{} not tuning the chunk pool as it is offheap", label); return; } int newMaxCount = @@ -450,12 +449,14 @@ public class ChunkCreator { if (newMaxCount > this.maxCount) { // Max chunks getting increased. Just change the variable. Later calls to getChunk() would // create and add them to Q - LOG.info("Max count for chunks increased from " + this.maxCount + " to " + newMaxCount); + LOG.info("{} max count for chunks increased from {} to {}", this.label, this.maxCount, + newMaxCount); this.maxCount = newMaxCount; } else { // Max chunks getting decreased. We may need to clear off some of the pooled chunks now // itself. If the extra chunks are serving already, do not pool those when we get them back - LOG.info("Max count for chunks decreased from " + this.maxCount + " to " + newMaxCount); + LOG.info("{} max count for chunks decreased from {} to {}", this.label, this.maxCount, + newMaxCount); this.maxCount = newMaxCount; if (this.reclaimedChunks.size() > newMaxCount) { synchronized (this) { @@ -474,11 +475,11 @@ public class ChunkCreator { chunkPoolDisabled = false; } - private MemStoreChunkPool initializePool(long globalMemStoreSize, float poolSizePercentage, - float initialCountPercentage, int chunkSize, - HeapMemoryManager heapMemoryManager) { + private MemStoreChunkPool initializePool(String label, long globalMemStoreSize, + float poolSizePercentage, float initialCountPercentage, int chunkSize, + HeapMemoryManager heapMemoryManager) { if (poolSizePercentage <= 0) { - LOG.info("PoolSizePercentage is less than 0. So not using pool"); + LOG.info("{} poolSizePercentage is less than 0. So not using pool", label); return null; } if (chunkPoolDisabled) { @@ -490,14 +491,13 @@ public class ChunkCreator { } int maxCount = (int) (globalMemStoreSize * poolSizePercentage / chunkSize); if (initialCountPercentage > 1.0 || initialCountPercentage < 0) { - throw new IllegalArgumentException( - MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + " must be between 0.0 and 1.0"); + throw new IllegalArgumentException(label + " " + MemStoreLAB.CHUNK_POOL_INITIALSIZE_KEY + + " must be between 0.0 and 1.0"); } int initialCount = (int) (initialCountPercentage * maxCount); - LOG.info("Allocating MemStoreChunkPool with chunk size " - + StringUtils.byteDesc(chunkSize) + ", max count " + maxCount - + ", initial count " + initialCount); - MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(chunkSize, maxCount, + LOG.info("{} allocating {} MemStoreChunkPool with chunk size {}, max count {}, " + + "initial count {}", label, StringUtils.byteDesc(chunkSize), maxCount, initialCount); + MemStoreChunkPool memStoreChunkPool = new MemStoreChunkPool(label, chunkSize, maxCount, initialCount, poolSizePercentage); if (heapMemoryManager != null && memStoreChunkPool != null) { // Register with Heap Memory manager diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index 04a553ea2cb..98df9b167ec 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -158,10 +158,10 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati @Override public String toString() { - return "compaction_queue=(" - + longCompactions.getQueue().size() + ":" + return "compactionQueue=(longCompactions=" + + longCompactions.getQueue().size() + ":shortCompactions=" + shortCompactions.getQueue().size() + ")" - + ", split_queue=" + splits.getQueue().size(); + + ", splitQueue=" + splits.getQueue().size(); } public String dumpQueue() { @@ -528,10 +528,10 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati @Override public String toString() { if (compaction != null) { - return "Request = " + compaction.getRequest(); + return "Request=" + compaction.getRequest(); } else { - return "regionName = " + region.toString() + ", storeName = " + store.toString() + - ", priority = " + queuedPriority + ", time = " + time; + return "region=" + region.toString() + ", storeName=" + store.toString() + + ", priority=" + queuedPriority + ", startTime=" + time; } } @@ -591,7 +591,7 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati boolean completed = region.compact(c, store, compactionThroughputController, user); long now = EnvironmentEdgeManager.currentTime(); - LOG.info(((completed) ? "Completed" : "Aborted") + " compaction: " + + LOG.info(((completed) ? "Completed" : "Aborted") + " compaction " + this + "; duration=" + StringUtils.formatTimeDiff(now, start)); if (completed) { // degenerate case: blocked regions require recursive enqueues @@ -619,7 +619,7 @@ public class CompactSplit implements CompactionRequester, PropagatingConfigurati tracker.afterExecution(store); completeTracker.completed(store); region.decrementCompactionsQueuedCount(); - LOG.debug("CompactSplitThread Status: " + CompactSplit.this); + LOG.debug("Status {}", CompactSplit.this); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index d60b04924c9..efdc8ab5916 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -427,9 +427,8 @@ public class CompactingMemStore extends AbstractMemStore { // compaction is in progress compactor.start(); } catch (IOException e) { - LOG.warn("Unable to run memstore compaction. region " - + getRegionServices().getRegionInfo().getRegionNameAsString() + "store: " - + getFamilyName(), e); + LOG.warn("Unable to run in-memory compaction on {}/{}; exception={}", + getRegionServices().getRegionInfo().getEncodedName(), getFamilyName(), e); } } finally { inMemoryFlushInProgress.set(false); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index ba7ab5b82bf..94938cb0617 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2077,8 +2077,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return false; } } - LOG.info("Starting compaction on " + store + " in region " + this - + (compaction.getRequest().isOffPeak()?" as an off-peak compaction":"")); + LOG.info("Starting compaction of {} in {}{}", store, this, + (compaction.getRequest().isOffPeak()?" as an off-peak compaction":"")); doRegionCompactionPrep(); try { status.setStatus("Compacting store " + store); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java index fbb5f75a467..c80b9126051 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactionStrategy.java @@ -85,12 +85,12 @@ public abstract class MemStoreCompactionStrategy { int numOfSegments = versionedList.getNumOfSegments(); if (numOfSegments > pipelineThreshold) { // to avoid too many segments, merge now - LOG.debug("{} {}; merging {} segments", strategy, cfName, numOfSegments); + LOG.trace("Strategy={}, store={}; merging {} segments", strategy, cfName, numOfSegments); return getMergingAction(); } // just flatten a segment - LOG.debug("{} {}; flattening a segment", strategy, cfName); + LOG.trace("Strategy={}, store={}; flattening a segment", strategy, cfName); return getFlattenAction(); } @@ -104,8 +104,8 @@ public abstract class MemStoreCompactionStrategy { protected Action compact(VersionedSegmentsList versionedList, String strategyInfo) { int numOfSegments = versionedList.getNumOfSegments(); - LOG.debug(strategyInfo+" memory compaction for store " + cfName - + " compacting " + numOfSegments + " segments"); + LOG.trace("{} in-memory compaction for store={} compacting {} segments", strategyInfo, + cfName, numOfSegments); return Action.COMPACT; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index 5c908e5bfff..1f8578d3226 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -92,7 +92,7 @@ public class MemStoreCompactor { // get a snapshot of the list of the segments from the pipeline, // this local copy of the list is marked with specific version versionedList = compactingMemStore.getImmutableSegments(); - LOG.debug("Starting on {}/{}", + LOG.trace("Speculative compaction starting on {}/{}", compactingMemStore.getStore().getHRegion().getRegionInfo().getEncodedName(), compactingMemStore.getStore().getColumnFamilyName()); HStore store = compactingMemStore.getStore(); @@ -177,8 +177,8 @@ public class MemStoreCompactor { } } } catch (IOException e) { - LOG.debug("Interrupting the MemStore in-memory compaction for store " - + compactingMemStore.getFamilyName()); + LOG.trace("Interrupting in-memory compaction for store={}", + compactingMemStore.getFamilyName()); Thread.currentThread().interrupt(); } finally { // For the MERGE case, if the result was created, but swap didn't happen, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 9f6a015351c..2bc1e015083 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -980,10 +980,8 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner heap.peek() == null || bytesRead < preadMaxBytes) { return; } - if (LOG.isDebugEnabled()) { - LOG.debug("Switch to stream read because we have already read " + bytesRead + - " bytes from this scanner"); - } + LOG.debug("Switch to stream read (scanned={} bytes) of {}", bytesRead, + this.store.getColumnFamilyName()); scanUsePread = false; Cell lastTop = heap.peek(); List memstoreScanners = new ArrayList<>(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index 056f076e2e2..7dfb7fdda78 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -199,15 +199,14 @@ public abstract class Compactor { } tmp = fileInfo.get(TIMERANGE_KEY); fd.latestPutTs = tmp == null ? HConstants.LATEST_TIMESTAMP: TimeRangeTracker.parseFrom(tmp).getMax(); - if (LOG.isDebugEnabled()) { - LOG.debug("Compacting " + file + - ", keycount=" + keyCount + - ", bloomtype=" + r.getBloomFilterType().toString() + - ", size=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1) + - ", encoding=" + r.getHFileReader().getDataBlockEncoding() + - ", seqNum=" + seqNum + - (allFiles ? ", earliestPutTs=" + earliestPutTs: "")); - } + LOG.debug("Compacting {}, keycount={}, bloomtype={}, size={}, encoding={}, seqNum={}{}", + (file.getPath() == null? null: file.getPath().getName()), + keyCount, + r.getBloomFilterType().toString(), + TraditionalBinaryPrefix.long2String(r.length(), "", 1), + r.getHFileReader().getDataBlockEncoding(), + seqNum, + (allFiles? ", earliestPutTs=" + earliestPutTs: "")); } return fd; }