SOLR-6677: Some more logging moves from INFO to DEBUG

This commit is contained in:
Alan Woodward 2016-09-23 14:26:48 +01:00
parent 4c7a8c4b81
commit 255bf895b3
13 changed files with 46 additions and 54 deletions

View File

@ -71,7 +71,7 @@ public class DeleteReplicaCmd implements Cmd {
@SuppressWarnings("unchecked")
void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
throws KeeperException, InterruptedException {
log.info("deleteReplica() : {}", Utils.toJSONString(message));
log.debug("deleteReplica() : {}", Utils.toJSONString(message));
boolean parallel = message.getBool("parallel", false);
//If a count is specified the strategy needs be different
@ -142,7 +142,7 @@ public class DeleteReplicaCmd implements Cmd {
Set<String> replicas = shardToReplicasMapping.get(shardSlice);
//callDeleteReplica on all replicas
for (String replica: replicas) {
log.info("Deleting replica {} for shard {} based on count {}", replica, shardId, count);
log.debug("Deleting replica {} for shard {} based on count {}", replica, shardId, count);
deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
}
results.add("shard_id", shardId);

View File

@ -81,14 +81,14 @@ public abstract class ElectionContext implements Closeable {
public void cancelElection() throws InterruptedException, KeeperException {
if (leaderSeqPath != null) {
try {
log.info("Canceling election {}", leaderSeqPath);
log.debug("Canceling election {}", leaderSeqPath);
zkClient.delete(leaderSeqPath, -1, true);
} catch (NoNodeException e) {
// fine
log.info("cancelElection did not find election node to remove {}", leaderSeqPath);
log.debug("cancelElection did not find election node to remove {}", leaderSeqPath);
}
} else {
log.info("cancelElection skipped as this context has not been initialized");
log.debug("cancelElection skipped as this context has not been initialized");
}
}
@ -147,14 +147,14 @@ class ShardLeaderElectionContextBase extends ElectionContext {
// We do this by using a multi and ensuring the parent znode of the leader registration node
// matches the version we expect - there is a setData call that increments the parent's znode
// version whenever a leader registers.
log.info("Removing leader registration node on cancel: {} {}", leaderPath, leaderZkNodeParentVersion);
log.debug("Removing leader registration node on cancel: {} {}", leaderPath, leaderZkNodeParentVersion);
List<Op> ops = new ArrayList<>(2);
ops.add(Op.check(new Path(leaderPath).getParent().toString(), leaderZkNodeParentVersion));
ops.add(Op.delete(leaderPath, -1));
zkClient.multi(ops, true);
} catch (KeeperException.NoNodeException nne) {
// no problem
log.info("No leader registration node found to remove: {}", leaderPath);
log.debug("No leader registration node found to remove: {}", leaderPath);
} catch (KeeperException.BadVersionException bve) {
log.info("Cannot remove leader registration node because the current registered node is not ours: {}", leaderPath);
// no problem
@ -181,7 +181,7 @@ class ShardLeaderElectionContextBase extends ElectionContext {
try {
RetryUtil.retryOnThrowable(NodeExistsException.class, 60000, 5000, () -> {
synchronized (lock) {
log.info("Creating leader registration node {} after winning as {}", leaderPath, leaderSeqPath);
log.debug("Creating leader registration node {} after winning as {}", leaderPath, leaderSeqPath);
List<Op> ops = new ArrayList<>(2);
// We use a multi operation to get the parent nodes version, which will
@ -306,7 +306,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
int leaderVoteWait = cc.getZkController().getLeaderVoteWait();
log.info("Running the leader process for shard={} and weAreReplacement={} and leaderVoteWait={}", shardId, weAreReplacement, leaderVoteWait);
log.debug("Running the leader process for shard={} and weAreReplacement={} and leaderVoteWait={}", shardId, weAreReplacement, leaderVoteWait);
// clear the leader in clusterstate
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(),
ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection);
@ -471,7 +471,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
: clusterState.getReplica(collection, leaderProps.getStr(ZkStateReader.CORE_NODE_NAME_PROP));
if (rep != null && rep.getState() != Replica.State.ACTIVE
&& rep.getState() != Replica.State.RECOVERING) {
log.info("We have become the leader after core registration but are not in an ACTIVE state - publishing ACTIVE");
log.debug("We have become the leader after core registration but are not in an ACTIVE state - publishing ACTIVE");
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
}
}
@ -643,7 +643,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
}
if (found >= slices.getReplicasMap().size()) {
log.info("All replicas are ready to participate in election.");
log.debug("All replicas are ready to participate in election.");
return true;
}
@ -660,7 +660,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
throws InterruptedException, KeeperException, IOException {
// remove our ephemeral and re join the election
if (cc.isShutDown()) {
log.info("Not rejoining election because CoreContainer is closed");
log.debug("Not rejoining election because CoreContainer is closed");
return;
}
@ -674,10 +674,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
}
private boolean shouldIBeLeader(ZkNodeProps leaderProps, SolrCore core, boolean weAreReplacement) {
log.info("Checking if I should try and be the leader.");
log.debug("Checking if I should try and be the leader.");
if (isClosed) {
log.info("Bailing on leader process because we have been closed");
log.debug("Bailing on leader process because we have been closed");
return false;
}
@ -688,10 +688,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
}
if (core.getCoreDescriptor().getCloudDescriptor().getLastPublished() == Replica.State.ACTIVE) {
log.info("My last published State was Active, it's okay to be the leader.");
log.debug("My last published State was Active, it's okay to be the leader.");
return true;
}
log.info("My last published State was "
log.debug("My last published State was "
+ core.getCoreDescriptor().getCloudDescriptor().getLastPublished()
+ ", I won't be the leader.");
// TODO: and if no one is a good candidate?

View File

@ -227,7 +227,7 @@ public class Overseer implements Closeable {
while (workQueue.poll() != null);
} catch (KeeperException.BadVersionException bve) {
log.warn("Bad version writing to ZK using compare-and-set, will force refresh cluster state", bve);
log.warn("Bad version writing to ZK using compare-and-set, will force refresh cluster state: {}", bve.getMessage());
refreshClusterState = true;
} catch (KeeperException e) {
if (e.code() == KeeperException.Code.SESSIONEXPIRED) {

View File

@ -134,7 +134,7 @@ public class OverseerTaskQueue extends DistributedQueue {
return;
}
// If latchEventType is not null, only fire if the type matches
LOG.info("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
LOG.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
if (latchEventType == null || event.getType() == latchEventType) {
synchronized (lock) {
this.event = event;

View File

@ -174,7 +174,7 @@ public class RecoveryStrategy extends Thread implements Closeable {
+ " from "
+ leaderUrl
+ " gen:"
+ core.getDeletionPolicy().getLatestCommit() != null ? "null" : core.getDeletionPolicy().getLatestCommit().getGeneration()
+ (core.getDeletionPolicy().getLatestCommit() != null ? "null" : core.getDeletionPolicy().getLatestCommit().getGeneration())
+ " data:" + core.getDataDir()
+ " index:" + core.getIndexDir()
+ " newIndex:" + core.getNewIndexDir()

View File

@ -2515,7 +2515,7 @@ public class ZkController {
* @param nodeName to operate on
*/
public void publishNodeAsDown(String nodeName) {
log.info("Publish node={} as DOWN", nodeName);
log.debug("Publish node={} as DOWN", nodeName);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.DOWNNODE.toLower(),
ZkStateReader.NODE_NAME_PROP, nodeName);
try {

View File

@ -46,7 +46,7 @@ public class NodeMutator {
List<ZkWriteCommand> zkWriteCommands = new ArrayList<ZkWriteCommand>();
String nodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
log.info("DownNode state invoked for node: " + nodeName);
log.debug("DownNode state invoked for node: " + nodeName);
Map<String, DocCollection> collections = clusterState.getCollectionsMap();
for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
@ -62,7 +62,7 @@ public class NodeMutator {
Map<String,Object> props = replica.shallowCopy();
String rNodeName = replica.getNodeName();
if (rNodeName.equals(nodeName)) {
log.info("Update replica state for " + replica + " to " + Replica.State.DOWN.toString());
log.debug("Update replica state for " + replica + " to " + Replica.State.DOWN.toString());
props.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
}

View File

@ -156,7 +156,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
@Override
public void close() throws IOException {
synchronized (this) {
log.info("Closing " + this.getClass().getSimpleName() + " - " + byDirectoryCache.size() + " directories currently being tracked");
log.debug("Closing {} - {} directories currently being tracked", this.getClass().getSimpleName(), byDirectoryCache.size());
this.closed = true;
Collection<CacheValue> values = byDirectoryCache.values();
for (CacheValue val : values) {
@ -201,7 +201,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
}
for (CacheValue val : removeEntries) {
log.info("Removing directory after core close: " + val.path);
log.debug("Removing directory after core close: " + val.path);
try {
removeDirectory(val);
} catch (Exception e) {
@ -224,7 +224,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
// be sure this is called with the this sync lock
// returns true if we closed the cacheValue, false if it will be closed later
private boolean closeCacheValue(CacheValue cacheValue) {
log.info("looking to close " + cacheValue.path + " " + cacheValue.closeEntries.toString());
log.debug("looking to close {} {}", cacheValue.path, cacheValue.closeEntries.toString());
List<CloseListener> listeners = closeListeners.remove(cacheValue.directory);
if (listeners != null) {
for (CloseListener listener : listeners) {
@ -268,11 +268,11 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
for (CacheValue val : cacheValue.removeEntries) {
if (!val.deleteAfterCoreClose) {
log.info("Removing directory before core close: " + val.path);
log.debug("Removing directory before core close: " + val.path);
try {
removeDirectory(val);
} catch (Exception e) {
SolrException.log(log, "Error removing directory", e);
SolrException.log(log, "Error removing directory " + val.path + " before core close", e);
}
} else {
removeEntries.add(val);
@ -292,13 +292,13 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
}
private void close(CacheValue val) {
log.info("Closing directory, CoreContainer#isShutdown={}", coreContainer != null ? coreContainer.isShutDown() : "null");
log.debug("Closing directory, CoreContainer#isShutdown={}", coreContainer != null ? coreContainer.isShutDown() : "null");
try {
if (coreContainer != null && coreContainer.isShutDown() && val.directory instanceof ShutdownAwareDirectory) {
log.info("Closing directory on shutdown: " + val.path);
log.debug("Closing directory on shutdown: " + val.path);
((ShutdownAwareDirectory) val.directory).closeOnShutdown();
} else {
log.info("Closing directory: " + val.path);
log.debug("Closing directory: " + val.path);
val.directory.close();
}
assert ObjectReleaseTracker.release(val.directory);
@ -350,7 +350,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
CacheValue newCacheValue = new CacheValue(fullPath, directory);
byDirectoryCache.put(directory, newCacheValue);
byPathCache.put(fullPath, newCacheValue);
log.debug("return new directory for " + fullPath);
log.debug("return new directory for {}", fullPath);
success = true;
} finally {
if (!success) {

View File

@ -2068,7 +2068,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
public void closeSearcher() {
log.info(logid+"Closing main searcher on request.");
log.debug(logid+"Closing main searcher on request.");
synchronized (searcherLock) {
if (realtimeSearcher != null) {
realtimeSearcher.decref();

View File

@ -77,14 +77,10 @@ public class SolrDeletionPolicy extends IndexDeletionPolicy implements NamedList
*/
@Override
public void onInit(List<? extends IndexCommit> commits) throws IOException {
// SOLR-4547: log basic data at INFO, add filenames at DEBUG.
if (commits.isEmpty()) {
return;
}
log.info("SolrDeletionPolicy.onInit: commits: {}",
new CommitsLoggingInfo(commits));
log.debug("SolrDeletionPolicy.onInit: commits: {}",
new CommitsLoggingDebug(commits));
log.debug("SolrDeletionPolicy.onInit: commits: {}", new CommitsLoggingDebug(commits));
updateCommits(commits);
}
@ -93,11 +89,7 @@ public class SolrDeletionPolicy extends IndexDeletionPolicy implements NamedList
*/
@Override
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
// SOLR-4547: log basic data at INFO, add filenames at DEBUG.
log.info("SolrDeletionPolicy.onCommit: commits: {}",
new CommitsLoggingInfo(commits));
log.debug("SolrDeletionPolicy.onCommit: commits: {}",
new CommitsLoggingDebug(commits));
log.debug("SolrDeletionPolicy.onCommit: commits: {}", new CommitsLoggingDebug(commits));
updateCommits(commits);
}
@ -158,10 +150,7 @@ public class SolrDeletionPolicy extends IndexDeletionPolicy implements NamedList
synchronized (this) {
long maxCommitAgeTimeStamp = -1L;
IndexCommit newest = commits.get(commits.size() - 1);
// SOLR-4547: Removed the filenames from this log entry because this
// method is only called from methods that have just logged them
// at DEBUG.
log.info("newest commit generation = " + newest.getGeneration());
log.debug("newest commit generation = " + newest.getGeneration());
int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
int totalKept = 1;

View File

@ -82,12 +82,12 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
private void closeIndexWriter(IndexWriterCloser closer) {
try {
log.info("SolrCoreState ref count has reached 0 - closing IndexWriter");
log.debug("SolrCoreState ref count has reached 0 - closing IndexWriter");
if (closer != null) {
log.info("closing IndexWriter with IndexWriterCloser");
log.debug("closing IndexWriter with IndexWriterCloser");
closer.closeWriter(indexWriter);
} else if (indexWriter != null) {
log.info("closing IndexWriter...");
log.debug("closing IndexWriter...");
indexWriter.close();
}
indexWriter = null;
@ -184,14 +184,14 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
if (iw != null) {
if (!rollback) {
try {
log.info("Closing old IndexWriter... core=" + coreName);
log.debug("Closing old IndexWriter... core=" + coreName);
iw.close();
} catch (Exception e) {
SolrException.log(log, "Error closing old IndexWriter. core=" + coreName, e);
}
} else {
try {
log.info("Rollback old IndexWriter... core=" + coreName);
log.debug("Rollback old IndexWriter... core=" + coreName);
iw.rollback();
} catch (Exception e) {
SolrException.log(log, "Error rolling back old IndexWriter. core=" + coreName, e);
@ -362,12 +362,15 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
/** called from recoveryStrat on a successful recovery */
@Override
public void recovered() {
recoveryStrat = null;
recoveringAfterStartup = false; // once we have successfully recovered, we no longer need to act as if we are recovering after startup
}
/** called from recoveryStrat on a failed recovery */
@Override
public void failed() {}
public void failed() {
recoveryStrat = null;
}
@Override
public synchronized void close(IndexWriterCloser closer) {

View File

@ -729,7 +729,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
@Override
public void close() throws IOException {
log.info("closing " + this);
log.debug("closing " + this);
commitTracker.close();
softCommitTracker.close();

View File

@ -70,7 +70,7 @@ public abstract class SolrCoreState {
if (close) {
try {
log.info("Closing SolrCoreState");
log.debug("Closing SolrCoreState");
close(closer);
} catch (Exception e) {
log.error("Error closing SolrCoreState", e);