HBASE-20770 WAL cleaner logs way too much; gets clogged when lots of work to do

General log cleanup; setting stuff that can flood the log to TRACE.
This commit is contained in:
Michael Stack 2018-06-23 23:29:11 -07:00
parent 025ddce868
commit 0db2b628d6
No known key found for this signature in database
GPG Key ID: 9816C7FC8ACC93D2
6 changed files with 17 additions and 29 deletions

View File

@ -45,10 +45,7 @@ public class NettyHBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
public void setupSaslHandler(ChannelPipeline p) { public void setupSaslHandler(ChannelPipeline p) {
String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP); String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP);
if (LOG.isDebugEnabled()) { LOG.trace("SASL client context established. Negotiated QoP {}", qop);
LOG.debug("SASL client context established. Negotiated QoP: " + qop);
}
if (qop == null || "auth".equalsIgnoreCase(qop)) { if (qop == null || "auth".equalsIgnoreCase(qop)) {
return; return;
} }

View File

@ -71,9 +71,7 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler<
} }
private void writeResponse(ChannelHandlerContext ctx, byte[] response) { private void writeResponse(ChannelHandlerContext ctx, byte[] response) {
if (LOG.isDebugEnabled()) { LOG.trace("Sending token size={} from initSASLContext.", response.length);
LOG.debug("Will send token of size " + response.length + " from initSASLContext.");
}
ctx.writeAndFlush( ctx.writeAndFlush(
ctx.alloc().buffer(4 + response.length).writeInt(response.length).writeBytes(response)); ctx.alloc().buffer(4 + response.length).writeInt(response.length).writeBytes(response));
} }
@ -133,9 +131,7 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler<
} }
return; return;
} }
if (LOG.isDebugEnabled()) { LOG.trace("Reading input token size={} for processing by initSASLContext", len);
LOG.debug("Will read input token of size " + len + " for processing by initSASLContext");
}
final byte[] challenge = new byte[len]; final byte[] challenge = new byte[len];
msg.readBytes(challenge); msg.readBytes(challenge);
byte[] response = ugi.doAs(new PrivilegedExceptionAction<byte[]>() { byte[] response = ugi.doAs(new PrivilegedExceptionAction<byte[]>() {

View File

@ -291,8 +291,11 @@ public class AssignProcedure extends RegionTransitionProcedure {
" transition openSeqNum=" + openSeqNum + ", " + regionNode); " transition openSeqNum=" + openSeqNum + ", " + regionNode);
} }
if (openSeqNum < regionNode.getOpenSeqNum()) { if (openSeqNum < regionNode.getOpenSeqNum()) {
LOG.warn("Skipping update of open seqnum with " + openSeqNum + // Don't bother logging if openSeqNum == 0
" because current seqnum=" + regionNode.getOpenSeqNum()); if (openSeqNum != 0) {
LOG.warn("Skipping update of open seqnum with " + openSeqNum +
" because current seqnum=" + regionNode.getOpenSeqNum());
}
} else { } else {
regionNode.setOpenSeqNum(openSeqNum); regionNode.setOpenSeqNum(openSeqNum);
} }

View File

@ -220,8 +220,7 @@ class RegionLocationFinder {
tableDescriptor = this.services.getTableDescriptors().get(tableName); tableDescriptor = this.services.getTableDescriptors().get(tableName);
} }
} catch (FileNotFoundException fnfe) { } catch (FileNotFoundException fnfe) {
LOG.debug("FileNotFoundException during getTableDescriptors." + " Current table name = " LOG.debug("tableName={}", tableName, fnfe);
+ tableName, fnfe);
} }
return tableDescriptor; return tableDescriptor;
@ -277,8 +276,7 @@ class RegionLocationFinder {
blockDistbn = cache.get(hri); blockDistbn = cache.get(hri);
return blockDistbn; return blockDistbn;
} else { } else {
LOG.debug("HDFSBlocksDistribution not found in cache for region " LOG.trace("HDFSBlocksDistribution not found in cache for {}", hri.getRegionNameAsString());
+ hri.getRegionNameAsString());
blockDistbn = internalGetTopBlockLocation(hri); blockDistbn = internalGetTopBlockLocation(hri);
cache.put(hri, blockDistbn); cache.put(hri, blockDistbn);
return blockDistbn; return blockDistbn;

View File

@ -272,13 +272,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
try { try {
POOL.latchCountUp(); POOL.latchCountUp();
if (runCleaner()) { if (runCleaner()) {
if (LOG.isTraceEnabled()) { LOG.trace("Cleaned all WALs under {}", oldFileDir);
LOG.trace("Cleaned all WALs under {}", oldFileDir);
}
} else { } else {
if (LOG.isTraceEnabled()) { LOG.trace("WALs outstanding under {}", oldFileDir);
LOG.trace("WALs outstanding under {}", oldFileDir);
}
} }
} finally { } finally {
POOL.latchCountDown(); POOL.latchCountDown();
@ -291,9 +287,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
POOL.updatePool((long) (0.8 * getTimeUnit().toMillis(getPeriod()))); POOL.updatePool((long) (0.8 * getTimeUnit().toMillis(getPeriod())));
} }
} else { } else {
if (LOG.isTraceEnabled()) { LOG.trace("Cleaner chore disabled! Not cleaning.");
LOG.trace("Cleaner chore disabled! Not cleaning.");
}
} }
} }
@ -472,7 +466,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
@Override @Override
protected Boolean compute() { protected Boolean compute() {
LOG.debug("Cleaning under {}", dir); LOG.trace("Cleaning under {}", dir);
List<FileStatus> subDirs; List<FileStatus> subDirs;
List<FileStatus> files; List<FileStatus> files;
try { try {

View File

@ -235,7 +235,7 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
break; break;
} }
if (task != null) { if (task != null) {
LOG.debug("Removing {}", task.filePath); LOG.trace("Removing {}", task.filePath);
boolean succeed; boolean succeed;
try { try {
succeed = this.fs.delete(task.filePath, false); succeed = this.fs.delete(task.filePath, false);
@ -258,13 +258,13 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
private void countDeletedFiles(boolean isLargeFile, boolean fromLargeQueue) { private void countDeletedFiles(boolean isLargeFile, boolean fromLargeQueue) {
if (isLargeFile) { if (isLargeFile) {
if (deletedLargeFiles.get() == Long.MAX_VALUE) { if (deletedLargeFiles.get() == Long.MAX_VALUE) {
LOG.info("Deleted more than Long.MAX_VALUE large files, reset counter to 0"); LOG.debug("Deleted more than Long.MAX_VALUE large files, reset counter to 0");
deletedLargeFiles.set(0L); deletedLargeFiles.set(0L);
} }
deletedLargeFiles.incrementAndGet(); deletedLargeFiles.incrementAndGet();
} else { } else {
if (deletedSmallFiles.get() == Long.MAX_VALUE) { if (deletedSmallFiles.get() == Long.MAX_VALUE) {
LOG.info("Deleted more than Long.MAX_VALUE small files, reset counter to 0"); LOG.debug("Deleted more than Long.MAX_VALUE small files, reset counter to 0");
deletedSmallFiles.set(0L); deletedSmallFiles.set(0L);
} }
if (fromLargeQueue) { if (fromLargeQueue) {