HBASE-20770 WAL cleaner logs way too much; gets clogged when lots of work to do
General log cleanup; setting stuff that can flood the log to TRACE.
This commit is contained in:
parent
025ddce868
commit
0db2b628d6
|
@ -45,10 +45,7 @@ public class NettyHBaseSaslRpcClient extends AbstractHBaseSaslRpcClient {
|
|||
|
||||
public void setupSaslHandler(ChannelPipeline p) {
|
||||
String qop = (String) saslClient.getNegotiatedProperty(Sasl.QOP);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("SASL client context established. Negotiated QoP: " + qop);
|
||||
}
|
||||
|
||||
LOG.trace("SASL client context established. Negotiated QoP {}", qop);
|
||||
if (qop == null || "auth".equalsIgnoreCase(qop)) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -71,9 +71,7 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler<
|
|||
}
|
||||
|
||||
private void writeResponse(ChannelHandlerContext ctx, byte[] response) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Will send token of size " + response.length + " from initSASLContext.");
|
||||
}
|
||||
LOG.trace("Sending token size={} from initSASLContext.", response.length);
|
||||
ctx.writeAndFlush(
|
||||
ctx.alloc().buffer(4 + response.length).writeInt(response.length).writeBytes(response));
|
||||
}
|
||||
|
@ -133,9 +131,7 @@ public class NettyHBaseSaslRpcClientHandler extends SimpleChannelInboundHandler<
|
|||
}
|
||||
return;
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Will read input token of size " + len + " for processing by initSASLContext");
|
||||
}
|
||||
LOG.trace("Reading input token size={} for processing by initSASLContext", len);
|
||||
final byte[] challenge = new byte[len];
|
||||
msg.readBytes(challenge);
|
||||
byte[] response = ugi.doAs(new PrivilegedExceptionAction<byte[]>() {
|
||||
|
|
|
@ -291,8 +291,11 @@ public class AssignProcedure extends RegionTransitionProcedure {
|
|||
" transition openSeqNum=" + openSeqNum + ", " + regionNode);
|
||||
}
|
||||
if (openSeqNum < regionNode.getOpenSeqNum()) {
|
||||
LOG.warn("Skipping update of open seqnum with " + openSeqNum +
|
||||
" because current seqnum=" + regionNode.getOpenSeqNum());
|
||||
// Don't bother logging if openSeqNum == 0
|
||||
if (openSeqNum != 0) {
|
||||
LOG.warn("Skipping update of open seqnum with " + openSeqNum +
|
||||
" because current seqnum=" + regionNode.getOpenSeqNum());
|
||||
}
|
||||
} else {
|
||||
regionNode.setOpenSeqNum(openSeqNum);
|
||||
}
|
||||
|
|
|
@ -220,8 +220,7 @@ class RegionLocationFinder {
|
|||
tableDescriptor = this.services.getTableDescriptors().get(tableName);
|
||||
}
|
||||
} catch (FileNotFoundException fnfe) {
|
||||
LOG.debug("FileNotFoundException during getTableDescriptors." + " Current table name = "
|
||||
+ tableName, fnfe);
|
||||
LOG.debug("tableName={}", tableName, fnfe);
|
||||
}
|
||||
|
||||
return tableDescriptor;
|
||||
|
@ -277,8 +276,7 @@ class RegionLocationFinder {
|
|||
blockDistbn = cache.get(hri);
|
||||
return blockDistbn;
|
||||
} else {
|
||||
LOG.debug("HDFSBlocksDistribution not found in cache for region "
|
||||
+ hri.getRegionNameAsString());
|
||||
LOG.trace("HDFSBlocksDistribution not found in cache for {}", hri.getRegionNameAsString());
|
||||
blockDistbn = internalGetTopBlockLocation(hri);
|
||||
cache.put(hri, blockDistbn);
|
||||
return blockDistbn;
|
||||
|
|
|
@ -272,13 +272,9 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
|||
try {
|
||||
POOL.latchCountUp();
|
||||
if (runCleaner()) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Cleaned all WALs under {}", oldFileDir);
|
||||
}
|
||||
LOG.trace("Cleaned all WALs under {}", oldFileDir);
|
||||
} else {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("WALs outstanding under {}", oldFileDir);
|
||||
}
|
||||
LOG.trace("WALs outstanding under {}", oldFileDir);
|
||||
}
|
||||
} finally {
|
||||
POOL.latchCountDown();
|
||||
|
@ -291,9 +287,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
|||
POOL.updatePool((long) (0.8 * getTimeUnit().toMillis(getPeriod())));
|
||||
}
|
||||
} else {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Cleaner chore disabled! Not cleaning.");
|
||||
}
|
||||
LOG.trace("Cleaner chore disabled! Not cleaning.");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -472,7 +466,7 @@ public abstract class CleanerChore<T extends FileCleanerDelegate> extends Schedu
|
|||
|
||||
@Override
|
||||
protected Boolean compute() {
|
||||
LOG.debug("Cleaning under {}", dir);
|
||||
LOG.trace("Cleaning under {}", dir);
|
||||
List<FileStatus> subDirs;
|
||||
List<FileStatus> files;
|
||||
try {
|
||||
|
|
|
@ -235,7 +235,7 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
|||
break;
|
||||
}
|
||||
if (task != null) {
|
||||
LOG.debug("Removing {}", task.filePath);
|
||||
LOG.trace("Removing {}", task.filePath);
|
||||
boolean succeed;
|
||||
try {
|
||||
succeed = this.fs.delete(task.filePath, false);
|
||||
|
@ -258,13 +258,13 @@ public class HFileCleaner extends CleanerChore<BaseHFileCleanerDelegate> {
|
|||
private void countDeletedFiles(boolean isLargeFile, boolean fromLargeQueue) {
|
||||
if (isLargeFile) {
|
||||
if (deletedLargeFiles.get() == Long.MAX_VALUE) {
|
||||
LOG.info("Deleted more than Long.MAX_VALUE large files, reset counter to 0");
|
||||
LOG.debug("Deleted more than Long.MAX_VALUE large files, reset counter to 0");
|
||||
deletedLargeFiles.set(0L);
|
||||
}
|
||||
deletedLargeFiles.incrementAndGet();
|
||||
} else {
|
||||
if (deletedSmallFiles.get() == Long.MAX_VALUE) {
|
||||
LOG.info("Deleted more than Long.MAX_VALUE small files, reset counter to 0");
|
||||
LOG.debug("Deleted more than Long.MAX_VALUE small files, reset counter to 0");
|
||||
deletedSmallFiles.set(0L);
|
||||
}
|
||||
if (fromLargeQueue) {
|
||||
|
|
Loading…
Reference in New Issue