HBASE-19652 Turn down CleanerChore logging; too chatty
This commit is contained in:
parent
d6d8369655
commit
50348cc1a4
|
@ -32,10 +32,6 @@ import org.apache.yetus.audience.InterfaceAudience;
|
||||||
public class RegionTooBusyException extends IOException {
|
public class RegionTooBusyException extends IOException {
|
||||||
private static final long serialVersionUID = 1728345723728342L;
|
private static final long serialVersionUID = 1728345723728342L;
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructor
|
|
||||||
* @param msg message
|
|
||||||
*/
|
|
||||||
// Be careful. Keep variance in the passed 'msg' low because its msg is used as a key over in
|
// Be careful. Keep variance in the passed 'msg' low because its msg is used as a key over in
|
||||||
// RetriesExhaustedWithDetailsException grouping failure types.
|
// RetriesExhaustedWithDetailsException grouping failure types.
|
||||||
public RegionTooBusyException(final String msg) {
|
public RegionTooBusyException(final String msg) {
|
||||||
|
|
|
@ -977,12 +977,12 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
|
||||||
Throwable error, long backOffTime, boolean willRetry, String startTime,
|
Throwable error, long backOffTime, boolean willRetry, String startTime,
|
||||||
int failed, int stopped) {
|
int failed, int stopped) {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append("#").append(asyncProcess.id).append(", table=").append(tableName).append(", ")
|
sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName).append(", ")
|
||||||
.append("attempt=").append(numAttempt)
|
.append("attempt=").append(numAttempt)
|
||||||
.append("/").append(asyncProcess.numTries).append(" ");
|
.append("/").append(asyncProcess.numTries).append(" ");
|
||||||
|
|
||||||
if (failureCount > 0 || error != null){
|
if (failureCount > 0 || error != null){
|
||||||
sb.append("failed=").append(failureCount).append("ops").append(", last exception: ").
|
sb.append("failed=").append(failureCount).append("ops").append(", last exception=").
|
||||||
append(error == null ? "null" : error);
|
append(error == null ? "null" : error);
|
||||||
} else {
|
} else {
|
||||||
sb.append("succeeded");
|
sb.append("succeeded");
|
||||||
|
|
|
@ -125,7 +125,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
// but upmost to the number of available processors.
|
// but upmost to the number of available processors.
|
||||||
int size = Math.min(Integer.valueOf(poolSize), AVAIL_PROCESSORS);
|
int size = Math.min(Integer.valueOf(poolSize), AVAIL_PROCESSORS);
|
||||||
if (size == AVAIL_PROCESSORS) {
|
if (size == AVAIL_PROCESSORS) {
|
||||||
LOG.warn("Use full core processors to scan dir");
|
LOG.warn("Use full core processors to scan dir, size={}" + size);
|
||||||
}
|
}
|
||||||
return size;
|
return size;
|
||||||
} else if (poolSize.matches("0.[0-9]+|1.0")) {
|
} else if (poolSize.matches("0.[0-9]+|1.0")) {
|
||||||
|
@ -157,7 +157,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
for (String className : logCleaners) {
|
for (String className : logCleaners) {
|
||||||
T logCleaner = newFileCleaner(className, conf);
|
T logCleaner = newFileCleaner(className, conf);
|
||||||
if (logCleaner != null) {
|
if (logCleaner != null) {
|
||||||
LOG.debug("initialize cleaner=" + className);
|
LOG.debug("Initialize cleaner=" + className);
|
||||||
this.cleanersChain.add(logCleaner);
|
this.cleanersChain.add(logCleaner);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,10 +168,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
public void onConfigurationChange(Configuration conf) {
|
public void onConfigurationChange(Configuration conf) {
|
||||||
int updatedSize = calculatePoolSize(conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE));
|
int updatedSize = calculatePoolSize(conf.get(CHORE_POOL_SIZE, DEFAULT_CHORE_POOL_SIZE));
|
||||||
if (updatedSize == chorePoolSize) {
|
if (updatedSize == chorePoolSize) {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("Size from configuration is same as previous={}, no need to update.", updatedSize);
|
||||||
LOG.debug("Size from configuration is the same as previous which is " +
|
|
||||||
updatedSize + ", no need to update.");
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
chorePoolSize = updatedSize;
|
chorePoolSize = updatedSize;
|
||||||
|
@ -186,8 +183,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
|
|
||||||
private void updateChorePoolSize(int updatedSize) {
|
private void updateChorePoolSize(int updatedSize) {
|
||||||
chorePool.shutdownNow();
|
chorePool.shutdownNow();
|
||||||
LOG.info("Update chore's pool size from " +
|
LOG.info("Update chore's pool size from {} to {}", chorePool.getParallelism(), updatedSize);
|
||||||
chorePool.getParallelism() + " to " + updatedSize);
|
|
||||||
chorePool = new ForkJoinPool(updatedSize);
|
chorePool = new ForkJoinPool(updatedSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,7 +204,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
cleaner.init(this.params);
|
cleaner.init(this.params);
|
||||||
return cleaner;
|
return cleaner;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.warn("Can NOT create CleanerDelegate: " + className, e);
|
LOG.warn("Can NOT create CleanerDelegate={}", className, e);
|
||||||
// skipping if can't instantiate
|
// skipping if can't instantiate
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -218,9 +214,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
protected void chore() {
|
protected void chore() {
|
||||||
if (getEnabled()) {
|
if (getEnabled()) {
|
||||||
if (runCleaner()) {
|
if (runCleaner()) {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug("Cleaned old files/dirs under {} successfully", oldFileDir);
|
||||||
LOG.debug("Cleaned old files/dirs under " + oldFileDir + " successfully.");
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Failed to fully clean old files/dirs under " + oldFileDir + ".");
|
LOG.warn("Failed to fully clean old files/dirs under " + oldFileDir + ".");
|
||||||
}
|
}
|
||||||
|
@ -274,9 +268,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
directorySpaces.put(f, space);
|
directorySpaces.put(f, space);
|
||||||
return space;
|
return space;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
if (LOG.isTraceEnabled()) {
|
LOG.trace("Failed to get space consumed by path={}", f.getPath(), e);
|
||||||
LOG.trace("failed to get space consumed by path " + f.getPath(), e);
|
|
||||||
}
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -343,9 +335,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
int deletedFileCount = 0;
|
int deletedFileCount = 0;
|
||||||
for (FileStatus file : filesToDelete) {
|
for (FileStatus file : filesToDelete) {
|
||||||
Path filePath = file.getPath();
|
Path filePath = file.getPath();
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("Removing {} from archive", filePath);
|
||||||
LOG.debug("Removing: " + filePath + " from archive");
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
boolean success = this.fs.delete(filePath, false);
|
boolean success = this.fs.delete(filePath, false);
|
||||||
if (success) {
|
if (success) {
|
||||||
|
@ -426,14 +416,10 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
|
|
||||||
boolean nullSubDirs = subDirs == null;
|
boolean nullSubDirs = subDirs == null;
|
||||||
if (nullSubDirs) {
|
if (nullSubDirs) {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("There is no subdir under {}", dir);
|
||||||
LOG.debug("There is no subdir under " + dir);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (files == null) {
|
if (files == null) {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("There is no file under {}", dir);
|
||||||
LOG.debug("There is no file under " + dir);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int capacity = nullSubDirs ? 0 : subDirs.size();
|
int capacity = nullSubDirs ? 0 : subDirs.size();
|
||||||
|
@ -449,7 +435,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
|
|
||||||
boolean result = true;
|
boolean result = true;
|
||||||
result &= deleteAction(() -> checkAndDeleteFiles(files), "files");
|
result &= deleteAction(() -> checkAndDeleteFiles(files), "files");
|
||||||
result &= deleteAction(() -> getCleanRusult(tasks), "subdirs");
|
result &= deleteAction(() -> getCleanResult(tasks), "subdirs");
|
||||||
// if and only if files and subdirs under current dir are deleted successfully, and
|
// if and only if files and subdirs under current dir are deleted successfully, and
|
||||||
// it is not the root dir, then task will try to delete it.
|
// it is not the root dir, then task will try to delete it.
|
||||||
if (result && !root) {
|
if (result && !root) {
|
||||||
|
@ -478,24 +464,16 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
*/
|
*/
|
||||||
private boolean deleteAction(Action<Boolean> deletion, String type) {
|
private boolean deleteAction(Action<Boolean> deletion, String type) {
|
||||||
boolean deleted;
|
boolean deleted;
|
||||||
String errorMsg = "";
|
String errorMsg = null;
|
||||||
try {
|
try {
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("Start deleting {} under {}", type, dir);
|
||||||
LOG.debug("Start deleting " + type + " under " + dir);
|
|
||||||
}
|
|
||||||
deleted = deletion.act();
|
deleted = deletion.act();
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
errorMsg = ioe.getMessage();
|
errorMsg = ioe.getMessage();
|
||||||
|
LOG.warn("Could not delete {} under {}; {}", type, dir, errorMsg);
|
||||||
deleted = false;
|
deleted = false;
|
||||||
}
|
}
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.trace("Finish deleting {} under {}, deleted=", type, dir, deleted);
|
||||||
if (deleted) {
|
|
||||||
LOG.debug("Finish deleting " + type + " under " + dir);
|
|
||||||
} else {
|
|
||||||
LOG.debug("Couldn't delete " + type + " completely under " + dir +
|
|
||||||
" with reasons: " + (!errorMsg.equals("") ? errorMsg : " undeletable, please check."));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return deleted;
|
return deleted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,7 +483,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
||||||
* @return true if all subdirs deleted successfully, false for patial/all failures
|
* @return true if all subdirs deleted successfully, false for patial/all failures
|
||||||
* @throws IOException something happen during computation
|
* @throws IOException something happen during computation
|
||||||
*/
|
*/
|
||||||
private boolean getCleanRusult(List<CleanerTask> tasks) throws IOException {
|
private boolean getCleanResult(List<CleanerTask> tasks) throws IOException {
|
||||||
boolean cleaned = true;
|
boolean cleaned = true;
|
||||||
try {
|
try {
|
||||||
for (CleanerTask task : tasks) {
|
for (CleanerTask task : tasks) {
|
||||||
|
|
|
@ -4173,10 +4173,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
// Don't print current limit because it will vary too much. The message is used as a key
|
// Don't print current limit because it will vary too much. The message is used as a key
|
||||||
// over in RetriesExhaustedWithDetailsException processing.
|
// over in RetriesExhaustedWithDetailsException processing.
|
||||||
throw new RegionTooBusyException("Over memstore limit; regionName=" +
|
throw new RegionTooBusyException("Over memstore limit; regionName=" +
|
||||||
(this.getRegionInfo() == null? "unknown": this.getRegionInfo().getRegionNameAsString()) +
|
(this.getRegionInfo() == null? "unknown": this.getRegionInfo().getEncodedName()) +
|
||||||
", server=" + (this.getRegionServerServices() == null ? "unknown":
|
", server=" + (this.getRegionServerServices() == null ? "unknown":
|
||||||
this.getRegionServerServices().getServerName()) +
|
this.getRegionServerServices().getServerName()) +
|
||||||
", blockingMemStoreSize=" + blockingMemStoreSize);
|
", blockingMemStoreSize=" +
|
||||||
|
org.apache.hadoop.hbase.procedure2.util.StringUtils.humanSize(blockingMemStoreSize));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -431,8 +431,7 @@ class MemStoreFlusher implements FlushRequester {
|
||||||
*/
|
*/
|
||||||
private boolean flushRegion(final FlushRegionEntry fqe) {
|
private boolean flushRegion(final FlushRegionEntry fqe) {
|
||||||
HRegion region = fqe.region;
|
HRegion region = fqe.region;
|
||||||
if (!region.getRegionInfo().isMetaRegion() &&
|
if (!region.getRegionInfo().isMetaRegion() && isTooManyStoreFiles(region)) {
|
||||||
isTooManyStoreFiles(region)) {
|
|
||||||
if (fqe.isMaximumWait(this.blockingWaitTime)) {
|
if (fqe.isMaximumWait(this.blockingWaitTime)) {
|
||||||
LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) +
|
LOG.info("Waited " + (EnvironmentEdgeManager.currentTime() - fqe.createTime) +
|
||||||
"ms on a compaction to clean up 'too many store files'; waited " +
|
"ms on a compaction to clean up 'too many store files'; waited " +
|
||||||
|
@ -442,7 +441,7 @@ class MemStoreFlusher implements FlushRequester {
|
||||||
// If this is first time we've been put off, then emit a log message.
|
// If this is first time we've been put off, then emit a log message.
|
||||||
if (fqe.getRequeueCount() <= 0) {
|
if (fqe.getRequeueCount() <= 0) {
|
||||||
// Note: We don't impose blockingStoreFiles constraint on meta regions
|
// Note: We don't impose blockingStoreFiles constraint on meta regions
|
||||||
LOG.warn("Region " + region.getRegionInfo().getRegionNameAsString() + " has too many " +
|
LOG.warn("Region " + region.getRegionInfo().getEncodedName() + " has too many " +
|
||||||
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
|
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
|
||||||
if (!this.server.compactSplitThread.requestSplit(region)) {
|
if (!this.server.compactSplitThread.requestSplit(region)) {
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -122,7 +122,7 @@ public abstract class PressureAwareThroughputController extends Configured imple
|
||||||
// do not log too much
|
// do not log too much
|
||||||
if (now - operation.lastLogTime > 5L * 1000) {
|
if (now - operation.lastLogTime > 5L * 1000) {
|
||||||
LOG.debug("deltaSize: " + deltaSize + " bytes; elapseTime: " + elapsedTime + " ns");
|
LOG.debug("deltaSize: " + deltaSize + " bytes; elapseTime: " + elapsedTime + " ns");
|
||||||
LOG.debug(opName + " sleep " + sleepTime + " ms because current throughput is "
|
LOG.debug(opName + " sleep=" + sleepTime + "ms because current throughput is "
|
||||||
+ throughputDesc(deltaSize, elapsedTime) + ", max allowed is "
|
+ throughputDesc(deltaSize, elapsedTime) + ", max allowed is "
|
||||||
+ throughputDesc(maxThroughputPerOperation) + ", already slept "
|
+ throughputDesc(maxThroughputPerOperation) + ", already slept "
|
||||||
+ operation.numberOfSleeps + " time(s) and total slept time is "
|
+ operation.numberOfSleeps + " time(s) and total slept time is "
|
||||||
|
|
|
@ -49,7 +49,7 @@ public final class ThroughputControlUtil {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return store.getRegionInfo().getRegionNameAsString() + NAME_DELIMITER +
|
return store.getRegionInfo().getEncodedName() + NAME_DELIMITER +
|
||||||
store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName +
|
store.getColumnFamilyDescriptor().getNameAsString() + NAME_DELIMITER + opName +
|
||||||
NAME_DELIMITER + counter;
|
NAME_DELIMITER + counter;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue