diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java index 49431b635d7..3024962ebd6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java @@ -32,10 +32,6 @@ import org.apache.yetus.audience.InterfaceAudience; public class RegionTooBusyException extends IOException { private static final long serialVersionUID = 1728345723728342L; - /** - * Constructor - * @param msg message - */ // Be careful. Keep variance in the passed 'msg' low because its msg is used as a key over in // RetriesExhaustedWithDetailsException grouping failure types. public RegionTooBusyException(final String msg) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index ed1bdb3e476..d214dcaca94 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -977,12 +977,12 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { Throwable error, long backOffTime, boolean willRetry, String startTime, int failed, int stopped) { StringBuilder sb = new StringBuilder(); - sb.append("#").append(asyncProcess.id).append(", table=").append(tableName).append(", ") + sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName).append(", ") .append("attempt=").append(numAttempt) .append("/").append(asyncProcess.numTries).append(" "); if (failureCount > 0 || error != null){ - sb.append("failed=").append(failureCount).append("ops").append(", last exception: "). + sb.append("failed=").append(failureCount).append("ops").append(", last exception="). append(error == null ? "null" : error); } else { sb.append("succeeded"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java index bc7c82d1db3..abf132c94ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/CleanerChore.java @@ -125,7 +125,7 @@ public abstract class CleanerChore extends Schedu // but upmost to the number of available processors. int size = Math.min(Integer.valueOf(poolSize), AVAIL_PROCESSORS); if (size == AVAIL_PROCESSORS) { - LOG.warn("Use full core processors to scan dir"); + LOG.warn("Use full core processors to scan dir, size={}" + size); } return size; } else if (poolSize.matches("0.[0-9]+|1.0")) { @@ -157,7 +157,7 @@ public abstract class CleanerChore extends Schedu for (String className : logCleaners) { T logCleaner = newFileCleaner(className, conf); if (logCleaner != null) { - LOG.debug("initialize cleaner=" + className); + LOG.debug("Initialize cleaner=" + className); this.cleanersChain.add(logCleaner); } } @@ -168,10 +168,7 @@ public abstract class CleanerChore extends Schedu public void onConfigurationChange(Configuration conf) { int updatedSize = calculatePoolSize(conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE)); if (updatedSize == chorePoolSize) { - if (LOG.isDebugEnabled()) { - LOG.debug("Size from configuration is the same as previous which is " + - updatedSize + ", no need to update."); - } + LOG.trace("Size from configuration is same as previous={}, no need to update.", updatedSize); return; } chorePoolSize = updatedSize; @@ -186,8 +183,7 @@ public abstract class CleanerChore extends Schedu private void updateChorePoolSize(int updatedSize) { chorePool.shutdownNow(); - LOG.info("Update chore's pool size from " + - chorePool.getParallelism() + " to " + updatedSize); + LOG.info("Update chore's pool size from {} to {}", chorePool.getParallelism(), updatedSize); chorePool = new ForkJoinPool(updatedSize); } @@ -208,7 +204,7 @@ public abstract class CleanerChore extends Schedu cleaner.init(this.params); return cleaner; } catch (Exception e) { - LOG.warn("Can NOT create CleanerDelegate: " + className, e); + LOG.warn("Can NOT create CleanerDelegate={}", className, e); // skipping if can't instantiate return null; } @@ -218,9 +214,7 @@ public abstract class CleanerChore extends Schedu protected void chore() { if (getEnabled()) { if (runCleaner()) { - if (LOG.isDebugEnabled()) { - LOG.debug("Cleaned old files/dirs under " + oldFileDir + " successfully."); - } + LOG.debug("Cleaned old files/dirs under {} successfully", oldFileDir); } else { LOG.warn("Failed to fully clean old files/dirs under " + oldFileDir + "."); } @@ -274,9 +268,7 @@ public abstract class CleanerChore extends Schedu directorySpaces.put(f, space); return space; } catch (IOException e) { - if (LOG.isTraceEnabled()) { - LOG.trace("failed to get space consumed by path " + f.getPath(), e); - } + LOG.trace("Failed to get space consumed by path={}", f.getPath(), e); return -1; } } @@ -343,9 +335,7 @@ public abstract class CleanerChore extends Schedu int deletedFileCount = 0; for (FileStatus file : filesToDelete) { Path filePath = file.getPath(); - if (LOG.isDebugEnabled()) { - LOG.debug("Removing: " + filePath + " from archive"); - } + LOG.trace("Removing {} from archive", filePath); try { boolean success = this.fs.delete(filePath, false); if (success) { @@ -426,14 +416,10 @@ public abstract class CleanerChore extends Schedu boolean nullSubDirs = subDirs == null; if (nullSubDirs) { - if (LOG.isDebugEnabled()) { - LOG.debug("There is no subdir under " + dir); - } + LOG.trace("There is no subdir under {}", dir); } if (files == null) { - if (LOG.isDebugEnabled()) { - LOG.debug("There is no file under " + dir); - } + LOG.trace("There is no file under {}", dir); } int capacity = nullSubDirs ? 0 : subDirs.size(); @@ -449,7 +435,7 @@ public abstract class CleanerChore extends Schedu boolean result = true; result &= deleteAction(() -> checkAndDeleteFiles(files), "files"); - result &= deleteAction(() -> getCleanRusult(tasks), "subdirs"); + result &= deleteAction(() -> getCleanResult(tasks), "subdirs"); // if and only if files and subdirs under current dir are deleted successfully, and // it is not the root dir, then task will try to delete it. if (result && !root) { @@ -478,24 +464,16 @@ public abstract class CleanerChore extends Schedu */ private boolean deleteAction(Action deletion, String type) { boolean deleted; - String errorMsg = ""; + String errorMsg = null; try { - if (LOG.isDebugEnabled()) { - LOG.debug("Start deleting " + type + " under " + dir); - } + LOG.trace("Start deleting {} under {}", type, dir); deleted = deletion.act(); } catch (IOException ioe) { errorMsg = ioe.getMessage(); + LOG.warn("Could not delete {} under {}; {}", type, dir, errorMsg); deleted = false; } - if (LOG.isDebugEnabled()) { - if (deleted) { - LOG.debug("Finish deleting " + type + " under " + dir); - } else { - LOG.debug("Couldn't delete " + type + " completely under " + dir + - " with reasons: " + (!errorMsg.equals("") ? errorMsg : " undeletable, please check.")); - } - } + LOG.trace("Finish deleting {} under {}, deleted=", type, dir, deleted); return deleted; } @@ -505,7 +483,7 @@ public abstract class CleanerChore extends Schedu * @return true if all subdirs deleted successfully, false for patial/all failures * @throws IOException something happen during computation */ - private boolean getCleanRusult(List tasks) throws IOException { + private boolean getCleanResult(List tasks) throws IOException { boolean cleaned = true; try { for (CleanerTask task : tasks) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index ca1bfd3bd4f..b9a5a2ba368 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4173,10 +4173,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Don't print current limit because it will vary too much. The message is used as a key // over in RetriesExhaustedWithDetailsException processing. throw new RegionTooBusyException("Over memstore limit; regionName=" + - (this.getRegionInfo() == null? "unknown": this.getRegionInfo().getRegionNameAsString()) + + (this.getRegionInfo() == null? "unknown": this.getRegionInfo().getEncodedName()) + ", server=" + (this.getRegionServerServices() == null ? "unknown": this.getRegionServerServices().getServerName()) + - ", blockingMemStoreSize=" + blockingMemStoreSize); + ", blockingMemStoreSize=" + + org.apache.hadoop.hbase.procedure2.util.StringUtils.humanSize(blockingMemStoreSize)); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 9e352ef4283..1d4e43141ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -431,8 +431,7 @@ class MemStoreFlusher implements FlushRequester { */ private boolean flushRegion(final FlushRegionEntry fqe) { HRegion region = fqe.region; - if (!region.getRegionInfo().isMetaRegion() && - isTooManyStoreFiles(region)) { + if (!region.getRegionInfo().isMetaRegion() && isTooManyStoreFiles(region)) { if (fqe.isMaximumWait(this.blockingWaitTime)) { LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) + "ms on a compaction to clean up 'too many store files'; waited " + @@ -442,7 +441,7 @@ class MemStoreFlusher implements FlushRequester { // If this is first time we've been put off, then emit a log message. if (fqe.getRequeueCount() <= 0) { // Note: We don't impose blockingStoreFiles constraint on meta regions - LOG.warn("Region " + region.getRegionInfo().getRegionNameAsString() + " has too many " + + LOG.warn("Region " + region.getRegionInfo().getEncodedName() + " has too many " + "store files; delaying flush up to " + this.blockingWaitTime + "ms"); if (!this.server.compactSplitThread.requestSplit(region)) { try { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java index ec90830c177..a1bd21bd805 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/PressureAwareThroughputController.java @@ -122,7 +122,7 @@ public abstract class PressureAwareThroughputController extends Configured imple // do not log too much if (now - operation.lastLogTime > 5L * 1000) { LOG.debug("deltaSize: " + deltaSize + " bytes; elapseTime: " + elapsedTime + " ns"); - LOG.debug(opName + " sleep " + sleepTime + " ms because current throughput is " + LOG.debug(opName + " sleep=" + sleepTime + "ms because current throughput is " + throughputDesc(deltaSize, elapsedTime) + ", max allowed is " + throughputDesc(maxThroughputPerOperation) + ", already slept " + operation.numberOfSleeps + " time(s) and total slept time is " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java index 764c06517fc..ad65c59436e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/throttle/ThroughputControlUtil.java @@ -49,7 +49,7 @@ public final class ThroughputControlUtil { break; } } - return store.getRegionInfo().getRegionNameAsString() + NAME_DELIMITER + + return store.getRegionInfo().getEncodedName() + NAME_DELIMITER + store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName + NAME_DELIMITER + counter; }