LUCENE-7788: fail precommit on unparameterised log messages and examine for wasted work/objects

This commit is contained in:
Erick Erickson 2020-04-27 08:34:10 -04:00
parent b25eabe6a9
commit ff4363675e
20 changed files with 332 additions and 171 deletions

View File

@ -58,7 +58,7 @@ class ValidateLogCallsTask extends DefaultTask {
Set<String> dirsToCheck = ["solr/core/src/java/org/apache/solr/analysis" Set<String> dirsToCheck = ["solr/core/src/java/org/apache/solr/analysis"
, "solr/core/src/java/org/apache/solr/api" , "solr/core/src/java/org/apache/solr/api"
, "solr/core/src/java/org/apache/solr/client" , "solr/core/src/java/org/apache/solr/client"
// , "solr/core/src/java/org/apache/solr/cloud" // 120 , "solr/core/src/java/org/apache/solr/cloud" // 120
, "solr/core/src/java/org/apache/solr/cloud/api" , "solr/core/src/java/org/apache/solr/cloud/api"
, "solr/core/src/java/org/apache/solr/cloud/autoscaling" , "solr/core/src/java/org/apache/solr/cloud/autoscaling"
, "solr/core/src/java/org/apache/solr/cloud/cdcr" , "solr/core/src/java/org/apache/solr/cloud/cdcr"
@ -115,11 +115,19 @@ class ValidateLogCallsTask extends DefaultTask {
, "Suggester.java" : [147, 181] , "Suggester.java" : [147, 181]
, "UpdateLog.java" : [1976] , "UpdateLog.java" : [1976]
, "V2HttpCall.java" : [158] , "V2HttpCall.java" : [158]
// checking against 8x in master, take these out usually.
// , "CoreContainer.java" : [1096]
// , "ConcurrentLFUCache.java" : [700, 911]
// , "ConcurrentLRUCache.java" : [911]
// , "DirectUpdateHandler2.java" : [844, 865]
// , "PeerSync.java" : [697]
// , "SolrIndexWriter.java" : [356]
// , "UpdateLog.java" : [1973]
] ]
def logLevels = ["log.trace", "log.debug", "log.info", "log.warn", "log.error", "log.fatal"] def logLevels = ["log.trace", "log.debug", "log.info", "log.warn", "log.error", "log.fatal"]
def errsFound = 0; def errsFound = 0;
def violations = new TreeSet(); def violations = new ArrayList();
def reportViolation(String msg) { def reportViolation(String msg) {
violations.add(System.lineSeparator + msg); violations.add(System.lineSeparator + msg);
@ -143,7 +151,7 @@ class ValidateLogCallsTask extends DefaultTask {
// If the line has been explicitly checked, skip it. // If the line has been explicitly checked, skip it.
if (line.replaceAll("\\s", "").toLowerCase().contains("//logok")) { if (line.replaceAll("\\s", "").toLowerCase().contains("//logok")) {
return return violation
} }
// Strip all of the comments, things in quotes and the like. // Strip all of the comments, things in quotes and the like.
def level = "" def level = ""
@ -199,14 +207,18 @@ class ValidateLogCallsTask extends DefaultTask {
violation = true violation = true
} }
if (violation) { if (violation) {
reportViolation(String.format("Suspicious logging call File: '%s' line: '%d' log message: '%s' parent path: '%s'. Parameterize or surround with 'if (log.is*Enabled) {... stripped: '%s'" reportViolation(String.format("Suspicious logging call, Parameterize or surround with 'if (log.is*Enabled) {..%s %s:%d"
, file.name , System.lineSeparator, file.getAbsolutePath(), lineNumber))
, lineNumber // reportViolation(String.format("Suspicious logging call File: '%s' line: '%d' log message: '%s' parent path: '%s'. Parameterize or surround with 'if (log.is*Enabled) {... stripped: '%s'"
, line // , file.name
, file.getParentFile().getAbsolutePath() // , lineNumber
, stripped // , line
)) // , file.getParentFile().getAbsolutePath()
// , stripped
// ))
// reportViolation(String.format("%s:%d", file.getAbsolutePath(), lineNumber))
} }
return violation
} }
@ -235,6 +247,7 @@ class ValidateLogCallsTask extends DefaultTask {
int state = 0 // 0 == not collecting a log line, 1 == collecting a log line, 2 == just collected the last. int state = 0 // 0 == not collecting a log line, 1 == collecting a log line, 2 == just collected the last.
int lineNumber = 0 int lineNumber = 0
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
boolean foundViolation = false
// We have to assemble line-by-line to get the full log statement. We require that all loggers (except requestLog // We have to assemble line-by-line to get the full log statement. We require that all loggers (except requestLog
// and slowLog) be exactly "log". That will be checked as well. // and slowLog) be exactly "log". That will be checked as well.
@ -275,26 +288,44 @@ class ValidateLogCallsTask extends DefaultTask {
case 1: case 1:
break; break;
case 2: case 2:
checkLogLine(file, sb.toString(), lineNumber, prevLine) foundViolation = (checkLogLine(file, sb.toString(), lineNumber, prevLine)) ? true : foundViolation
state = 0 state = 0
break; break;
default: default:
break; break;
} }
} }
return foundViolation
} }
@TaskAction @TaskAction
def checkLogLines() { def checkLogLines() {
dirsToCheck.add(srcDir) dirsToCheck.add(srcDir)
//nocommit. This is here to check 8x on another branch since I can't run Gradle
// over 8x. Used periodically as a sanity check.
// new File("/Users/Erick/apache/solrJiras/master").traverse(type: groovy.io.FileType.FILES, nameFilter: ~/.*\.java/) { File it ->
// if (dirsToCheck.any { dir ->
// it.getCanonicalPath().contains(dir)
// }) {
// if (checkFile(it)) {
// println(it.getAbsolutePath())
// // nocommit. This just makes it much easier to get to the files during this mass migration!
// }
// }
project.sourceSets.each { srcSet -> project.sourceSets.each { srcSet ->
srcSet.java.each { f -> srcSet.java.each { f ->
if (dirsToCheck.any { if (dirsToCheck.any {
f.getCanonicalPath().contains(it) f.getCanonicalPath().contains(it)
}) { }) {
checkFile(f) if (checkFile(f)) {
// nocommit. This just makes it much easier to get to the files during this mass migration!
println(f.getAbsolutePath())
}
} }
//nocommit move this to project based whenever // nocommit move
// this to project based whenever
// if (srcDir != null && f.getCanonicalPath().contains(srcDir)) { // if (srcDir != null && f.getCanonicalPath().contains(srcDir)) {
// checkFile(f) // checkFile(f)
// } // }

View File

@ -115,11 +115,16 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
// synchronized due to SOLR-11535 // synchronized due to SOLR-11535
@Override @Override
public synchronized boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) { public synchronized boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
log.debug("-- onStateChanged@" + Long.toHexString(hashCode()) + ": replicaIds=" + replicaIds + ", solrCoreNames=" + solrCoreNames + if (log.isDebugEnabled()) {
(latch != null ? "\nlatch count=" + latch.getCount() : "") + log.debug("-- onStateChanged@{}: replicaIds={}, solrCoreNames={} {}\ncollectionState {}"
"\ncollectionState=" + collectionState); , Long.toHexString(hashCode()), replicaIds, solrCoreNames
, (latch != null ? "\nlatch count=" + latch.getCount() : "")
, collectionState); // logOk
}
if (collectionState == null) { // collection has been deleted - don't wait if (collectionState == null) { // collection has been deleted - don't wait
log.debug("-- collection deleted, decrementing latch by " + replicaIds.size() + solrCoreNames.size()); if (log.isDebugEnabled()) {
log.debug("-- collection deleted, decrementing latch by {} ", replicaIds.size() + solrCoreNames.size()); // logOk
}
if (latch != null) { if (latch != null) {
for (int i = 0; i < replicaIds.size() + solrCoreNames.size(); i++) { for (int i = 0; i < replicaIds.size() + solrCoreNames.size(); i++) {
latch.countDown(); latch.countDown();
@ -134,7 +139,7 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
return true; return true;
} }
if (collectionState.getZNodeVersion() == lastZkVersion) { if (collectionState.getZNodeVersion() == lastZkVersion) {
log.debug("-- spurious call with already seen zkVersion=" + lastZkVersion + ", ignoring..."); log.debug("-- spurious call with already seen zkVersion= {}, ignoring...", lastZkVersion);
return false; return false;
} }
lastZkVersion = collectionState.getZNodeVersion(); lastZkVersion = collectionState.getZNodeVersion();
@ -160,7 +165,9 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
} }
} }
} }
log.debug("-- " + Long.toHexString(hashCode()) + " now latch count=" + latch.getCount()); if (log.isDebugEnabled()) {
log.debug("-- {} now latchcount={}", Long.toHexString(hashCode()), latch.getCount());
}
if (replicaIds.isEmpty() && solrCoreNames.isEmpty()) { if (replicaIds.isEmpty() && solrCoreNames.isEmpty()) {
return true; return true;
} else { } else {

View File

@ -94,7 +94,7 @@ public class CloudConfigSetService extends ConfigSetService {
try { try {
stat = zkController.getZkClient().exists(zkPath, null, true); stat = zkController.getZkClient().exists(zkPath, null, true);
} catch (KeeperException e) { } catch (KeeperException e) {
log.warn("Unexpected exception when getting modification time of " + zkPath, e); log.warn("Unexpected exception when getting modification time of {}", zkPath, e);
return null; // debatable; we'll see an error soon if there's a real problem return null; // debatable; we'll see an error soon if there's a real problem
} catch (InterruptedException e) { } catch (InterruptedException e) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();

View File

@ -91,9 +91,9 @@ public class CloudUtil {
SolrException.log(log, "Failed to delete instance dir for core:" SolrException.log(log, "Failed to delete instance dir for core:"
+ desc.getName() + " dir:" + desc.getInstanceDir()); + desc.getName() + " dir:" + desc.getInstanceDir());
} }
log.error("", new SolrException(ErrorCode.SERVER_ERROR, log.error("{}",
"Will not load SolrCore " + desc.getName() new SolrException(ErrorCode.SERVER_ERROR, "Will not load SolrCore " + desc.getName()
+ " because it has been replaced due to failover.")); + " because it has been replaced due to failover.")); // logOk
throw new SolrException(ErrorCode.SERVER_ERROR, throw new SolrException(ErrorCode.SERVER_ERROR,
"Will not load SolrCore " + desc.getName() "Will not load SolrCore " + desc.getName()
+ " because it has been replaced due to failover."); + " because it has been replaced due to failover.");
@ -249,7 +249,9 @@ public class CloudUtil {
} }
Collection<Slice> slices = withInactive ? collectionState.getSlices() : collectionState.getActiveSlices(); Collection<Slice> slices = withInactive ? collectionState.getSlices() : collectionState.getActiveSlices();
if (slices.size() != expectedShards) { if (slices.size() != expectedShards) {
log.debug("-- wrong number of slices for collection {}, expected={}, found={}: {}", collectionState.getName(), expectedShards, collectionState.getSlices().size(), collectionState.getSlices()); if (log.isDebugEnabled()) {
log.debug("-- wrong number of slices for collection {}, expected={}, found={}: {}", collectionState.getName(), expectedShards, collectionState.getSlices().size(), collectionState.getSlices());
}
return false; return false;
} }
Set<String> leaderless = new HashSet<>(); Set<String> leaderless = new HashSet<>();
@ -268,7 +270,9 @@ public class CloudUtil {
activeReplicas++; activeReplicas++;
} }
if (activeReplicas != expectedReplicas) { if (activeReplicas != expectedReplicas) {
log.debug("-- wrong number of active replicas for collection {} in slice {}, expected={}, found={}", collectionState.getName(), slice.getName(), expectedReplicas, activeReplicas); if (log.isDebugEnabled()) {
log.debug("-- wrong number of active replicas for collection {} in slice {}, expected={}, found={}", collectionState.getName(), slice.getName(), expectedReplicas, activeReplicas);
}
return false; return false;
} }
} }

View File

@ -436,8 +436,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false); RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get(); SolrIndexSearcher searcher = searchHolder.get();
try { try {
log.debug(core.getCoreContainer().getZkController().getNodeName() + " synched " if (log.isDebugEnabled()) {
+ searcher.count(new MatchAllDocsQuery())); log.debug("{} synched {}", core.getCoreContainer().getZkController().getNodeName()
, searcher.count(new MatchAllDocsQuery()));
}
} finally { } finally {
searchHolder.decref(); searchHolder.decref();
} }
@ -472,8 +474,8 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
} }
// in case of leaderVoteWait timeout, a replica with lower term can win the election // in case of leaderVoteWait timeout, a replica with lower term can win the election
if (setTermToMax) { if (setTermToMax) {
log.error("WARNING: Potential data loss -- Replica {} became leader after timeout (leaderVoteWait) " + log.error("WARNING: Potential data loss -- Replica {} became leader after timeout (leaderVoteWait) {}"
"without being up-to-date with the previous leader", coreNodeName); , "without being up-to-date with the previous leader", coreNodeName);
zkController.getShardTerms(collection, shardId).setTermEqualsToLeader(coreNodeName); zkController.getShardTerms(collection, shardId).setTermEqualsToLeader(coreNodeName);
} }
super.runLeaderProcess(weAreReplacement, 0); super.runLeaderProcess(weAreReplacement, 0);
@ -485,7 +487,9 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
return; return;
} }
} }
log.info("I am the new leader: " + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId); if (log.isInfoEnabled()) {
log.info("I am the new leader: {} {}", ZkCoreNodeProps.getCoreUrl(leaderProps), shardId);
}
// we made it as leader - send any recovery requests we need to // we made it as leader - send any recovery requests we need to
syncStrategy.requestRecoveries(); syncStrategy.requestRecoveries();
@ -500,7 +504,9 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
try (SolrCore core = cc.getCore(coreName)) { try (SolrCore core = cc.getCore(coreName)) {
if (core == null) { if (core == null) {
log.debug("SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames()); if (log.isDebugEnabled()) {
log.debug("SolrCore not found: {} in {}", coreName, cc.getLoadedCoreNames());
}
return; return;
} }
@ -532,8 +538,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS); long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS);
while (!isClosed && !cc.isShutDown()) { while (!isClosed && !cc.isShutDown()) {
if (System.nanoTime() > timeoutAt) { if (System.nanoTime() > timeoutAt) {
log.warn("After waiting for {}ms, no other potential leader was found, {} try to become leader anyway (" + log.warn("After waiting for {}ms, no other potential leader was found, {} try to become leader anyway (core_term:{}, highest_term:{})",
"core_term:{}, highest_term:{})",
timeout, coreNodeName, zkShardTerms.getTerm(coreNodeName), zkShardTerms.getHighestTerm()); timeout, coreNodeName, zkShardTerms.getTerm(coreNodeName), zkShardTerms.getHighestTerm());
return true; return true;
} }
@ -625,11 +630,11 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
return true; return true;
} else { } else {
if (cnt % 40 == 0) { if (cnt % 40 == 0) {
log.info("Waiting until we see more replicas up for shard {}: total={}" if (log.isInfoEnabled()) {
+ " found={}" log.info("Waiting until we see more replicas up for shard {}: total={} found={} timeoute in={}ms"
+ " timeoutin={}ms", , shardId, slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size(), found,
shardId, slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size(), found, TimeUnit.MILLISECONDS.convert(timeoutAt - System.nanoTime(), TimeUnit.NANOSECONDS));
TimeUnit.MILLISECONDS.convert(timeoutAt - System.nanoTime(), TimeUnit.NANOSECONDS)); }
} }
} }
@ -638,7 +643,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
return false; return false;
} }
} else { } else {
log.warn("Shard not found: " + shardId + " for collection " + collection); log.warn("Shard not found: {} for collection {}", shardId, collection);
return false; return false;
@ -676,13 +681,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
log.debug("All replicas are ready to participate in election."); log.debug("All replicas are ready to participate in election.");
return true; return true;
} }
} else { } else {
log.warn("Shard not found: " + shardId + " for collection " + collection); log.warn("Shard not found: {} for collection {}", shardId, collection);
return false; return false;
} }
return false; return false;
} }

View File

@ -279,12 +279,16 @@ class ExclusiveSliceProperty {
} }
private void removeProp(Slice origSlice, String replicaName) { private void removeProp(Slice origSlice, String replicaName) {
log.debug("Removing property {} from slice {}, replica {}", property, origSlice.getName(), replicaName); if (log.isDebugEnabled()) {
log.debug("Removing property {} from slice {}, replica {}", property, origSlice.getName(), replicaName);
}
getReplicaFromChanged(origSlice, replicaName).getProperties().remove(property); getReplicaFromChanged(origSlice, replicaName).getProperties().remove(property);
} }
private void addProp(Slice origSlice, String replicaName) { private void addProp(Slice origSlice, String replicaName) {
log.debug("Adding property {} to slice {}, replica {}", property, origSlice.getName(), replicaName); if (log.isDebugEnabled()) {
log.debug("Adding property {} to slice {}, replica {}", property, origSlice.getName(), replicaName);
}
getReplicaFromChanged(origSlice, replicaName).getProperties().put(property, "true"); getReplicaFromChanged(origSlice, replicaName).getProperties().put(property, "true");
} }

View File

@ -341,7 +341,7 @@ public class LeaderElector {
} catch (KeeperException.NoNodeException nne) { } catch (KeeperException.NoNodeException nne) {
// expected . don't do anything // expected . don't do anything
} catch (Exception e) { } catch (Exception e) {
log.warn("My watched node still exists and can't remove " + myNode, e); log.warn("My watched node still exists and can't remove {}", myNode, e);
} }
return; return;
} }

View File

@ -141,7 +141,7 @@ public class LockTree {
void unlock(LockImpl lockObject) { void unlock(LockImpl lockObject) {
if (myLock == lockObject) myLock = null; if (myLock == lockObject) myLock = null;
else { else {
log.info("Unlocked multiple times : {}", lockObject.toString()); log.info("Unlocked multiple times : {}", lockObject);
} }
} }
@ -171,7 +171,7 @@ public class LockTree {
void clear() { void clear() {
if (myLock != null) { if (myLock != null) {
log.warn("lock_is_leaked at" + constructPath(new LinkedList<>())); log.warn("lock_is_leaked at {}", constructPath(new LinkedList<>()));
myLock = null; myLock = null;
} }
for (Node node : children.values()) node.clear(); for (Node node : children.values()) node.clear();

View File

@ -149,7 +149,9 @@ public class Overseer implements SolrCloseable {
isLeader = amILeader(); // not a no, not a yes, try ask again isLeader = amILeader(); // not a no, not a yes, try ask again
} }
log.info("Starting to work on the main queue : {}", LeaderElector.getNodeName(myId)); if (log.isInfoEnabled()) {
log.info("Starting to work on the main queue : {}", LeaderElector.getNodeName(myId));
}
try { try {
ZkStateWriter zkStateWriter = null; ZkStateWriter zkStateWriter = null;
ClusterState clusterState = null; ClusterState clusterState = null;
@ -182,7 +184,9 @@ public class Overseer implements SolrCloseable {
byte[] data = fallbackQueue.peek(); byte[] data = fallbackQueue.peek();
while (fallbackQueueSize > 0 && data != null) { while (fallbackQueueSize > 0 && data != null) {
final ZkNodeProps message = ZkNodeProps.load(data); final ZkNodeProps message = ZkNodeProps.load(data);
log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message); if (log.isDebugEnabled()) {
log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message);
}
// force flush to ZK after each message because there is no fallback if workQueue items // force flush to ZK after each message because there is no fallback if workQueue items
// are removed from workQueue but fail to be written to ZK // are removed from workQueue but fail to be written to ZK
try { try {
@ -239,7 +243,9 @@ public class Overseer implements SolrCloseable {
for (Pair<String, byte[]> head : queue) { for (Pair<String, byte[]> head : queue) {
byte[] data = head.second(); byte[] data = head.second();
final ZkNodeProps message = ZkNodeProps.load(data); final ZkNodeProps message = ZkNodeProps.load(data);
log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getZkStats().getQueueLength(), message, clusterState.getZkClusterStateVersion()); if (log.isDebugEnabled()) {
log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getZkStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
}
processedNodes.add(head.first()); processedNodes.add(head.first());
fallbackQueueSize = processedNodes.size(); fallbackQueueSize = processedNodes.size();
@ -274,7 +280,9 @@ public class Overseer implements SolrCloseable {
} }
} }
} finally { } finally {
log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId)); if (log.isInfoEnabled()) {
log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId));
}
//do this in a separate thread because any wait is interrupted in this main thread //do this in a separate thread because any wait is interrupted in this main thread
new Thread(this::checkIfIamStillLeader, "OverseerExitThread").start(); new Thread(this::checkIfIamStillLeader, "OverseerExitThread").start();
} }
@ -306,7 +314,7 @@ public class Overseer implements SolrCloseable {
// ZooKeeper in which case another Overseer should take over // ZooKeeper in which case another Overseer should take over
// TODO: if ordering for the message is not important, we could // TODO: if ordering for the message is not important, we could
// track retries and put it back on the end of the queue // track retries and put it back on the end of the queue
log.error("Overseer could not process the current clusterstate state update message, skipping the message: " + message, e); log.error("Overseer could not process the current clusterstate state update message, skipping the message: {}", message, e);
stats.error(operation); stats.error(operation);
} finally { } finally {
timerContext.stop(); timerContext.stop();
@ -346,7 +354,7 @@ public class Overseer implements SolrCloseable {
} catch (KeeperException.BadVersionException e) { } catch (KeeperException.BadVersionException e) {
//no problem ignore it some other Overseer has already taken over //no problem ignore it some other Overseer has already taken over
} catch (Exception e) { } catch (Exception e) {
log.error("Could not delete my leader node "+path, e); log.error("Could not delete my leader node {}", path, e);
} }
} else{ } else{
@ -419,7 +427,9 @@ public class Overseer implements SolrCloseable {
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message)); return Collections.singletonList(new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message));
case QUIT: case QUIT:
if (myId.equals(message.get(ID))) { if (myId.equals(message.get(ID))) {
log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId)); if (log.isInfoEnabled()) {
log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
}
overseerCollectionConfigSetProcessor.close(); overseerCollectionConfigSetProcessor.close();
close(); close();
} else { } else {
@ -562,7 +572,7 @@ public class Overseer implements SolrCloseable {
closed = false; closed = false;
doClose(); doClose();
stats = new Stats(); stats = new Stats();
log.info("Overseer (id=" + id + ") starting"); log.info("Overseer (id={}) starting", id);
createOverseerNode(reader.getZkClient()); createOverseerNode(reader.getZkClient());
//launch cluster state updater thread //launch cluster state updater thread
ThreadGroup tg = new ThreadGroup("Overseer state updater."); ThreadGroup tg = new ThreadGroup("Overseer state updater.");
@ -745,7 +755,7 @@ public class Overseer implements SolrCloseable {
public synchronized void close() { public synchronized void close() {
if (this.id != null) { if (this.id != null) {
log.info("Overseer (id=" + id + ") closing"); log.info("Overseer (id={}) closing", id);
} }
this.closed = true; this.closed = true;
doClose(); doClose();

View File

@ -98,7 +98,7 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
+ " expected: " + CONFIGSETS_ACTION_PREFIX); + " expected: " + CONFIGSETS_ACTION_PREFIX);
} }
operation = operation.substring(CONFIGSETS_ACTION_PREFIX.length()); operation = operation.substring(CONFIGSETS_ACTION_PREFIX.length());
log.info("OverseerConfigSetMessageHandler.processMessage : " + operation + " , " + message.toString()); log.info("OverseerConfigSetMessageHandler.processMessage : {}, {}", operation, message);
ConfigSetParams.ConfigSetAction action = ConfigSetParams.ConfigSetAction.get(operation); ConfigSetParams.ConfigSetAction action = ConfigSetParams.ConfigSetAction.get(operation);
if (action == null) { if (action == null) {

View File

@ -92,7 +92,9 @@ public class OverseerNodePrioritizer {
if(!designateNodeId.equals( electionNodes.get(1))) { //checking if it is already at no:1 if(!designateNodeId.equals( electionNodes.get(1))) { //checking if it is already at no:1
log.info("asking node {} to come join election at head", designateNodeId); log.info("asking node {} to come join election at head", designateNodeId);
invokeOverseerOp(designateNodeId, "rejoinAtHead"); //ask designate to come first invokeOverseerOp(designateNodeId, "rejoinAtHead"); //ask designate to come first
log.info("asking the old first in line {} to rejoin election ",electionNodes.get(1) ); if (log.isInfoEnabled()) {
log.info("asking the old first in line {} to rejoin election ", electionNodes.get(1));
}
invokeOverseerOp(electionNodes.get(1), "rejoin");//ask second inline to go behind invokeOverseerOp(electionNodes.get(1), "rejoin");//ask second inline to go behind
} }
//now ask the current leader to QUIT , so that the designate can takeover //now ask the current leader to QUIT , so that the designate can takeover

View File

@ -208,7 +208,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
continue; // not a no, not a yes, try asking again continue; // not a no, not a yes, try asking again
} }
log.debug("Cleaning up work-queue. #Running tasks: {} #Completed tasks: {}", runningTasksSize(), completedTasks.size()); if (log.isDebugEnabled()) {
log.debug("Cleaning up work-queue. #Running tasks: {} #Completed tasks: {}", runningTasksSize(), completedTasks.size());
}
cleanUpWorkQueue(); cleanUpWorkQueue();
printTrackingMaps(); printTrackingMaps();
@ -236,7 +238,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
//instead of reading MAX_PARALLEL_TASKS items always, we should only fetch as much as we can execute //instead of reading MAX_PARALLEL_TASKS items always, we should only fetch as much as we can execute
int toFetch = Math.min(MAX_BLOCKED_TASKS - heads.size(), MAX_PARALLEL_TASKS - runningTasksSize()); int toFetch = Math.min(MAX_BLOCKED_TASKS - heads.size(), MAX_PARALLEL_TASKS - runningTasksSize());
List<QueueEvent> newTasks = workQueue.peekTopN(toFetch, excludedTasks, 2000L); List<QueueEvent> newTasks = workQueue.peekTopN(toFetch, excludedTasks, 2000L);
log.debug("Got {} tasks from work-queue : [{}]", newTasks.size(), newTasks); if (log.isDebugEnabled()) {
log.debug("Got {} tasks from work-queue : [{}]", newTasks.size(), newTasks);
}
heads.addAll(newTasks); heads.addAll(newTasks);
} else { } else {
// Prevent free-spinning this loop. // Prevent free-spinning this loop.
@ -281,14 +285,16 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
} }
String operation = message.getStr(Overseer.QUEUE_OPERATION); String operation = message.getStr(Overseer.QUEUE_OPERATION);
if (operation == null) { if (operation == null) {
log.error("Msg does not have required " + Overseer.QUEUE_OPERATION + ": {}", message); log.error("Msg does not have required {} : {}", Overseer.QUEUE_OPERATION, message);
workQueue.remove(head); workQueue.remove(head);
continue; continue;
} }
OverseerMessageHandler messageHandler = selector.selectOverseerMessageHandler(message); OverseerMessageHandler messageHandler = selector.selectOverseerMessageHandler(message);
OverseerMessageHandler.Lock lock = messageHandler.lockTask(message, taskBatch); OverseerMessageHandler.Lock lock = messageHandler.lockTask(message, taskBatch);
if (lock == null) { if (lock == null) {
log.debug("Exclusivity check failed for [{}]", message.toString()); if (log.isDebugEnabled()) {
log.debug("Exclusivity check failed for [{}]", message);
}
//we may end crossing the size of the MAX_BLOCKED_TASKS. They are fine //we may end crossing the size of the MAX_BLOCKED_TASKS. They are fine
if (blockedTasks.size() < MAX_BLOCKED_TASKS) if (blockedTasks.size() < MAX_BLOCKED_TASKS)
blockedTasks.put(head.getId(), head); blockedTasks.put(head.getId(), head);
@ -296,7 +302,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
} }
try { try {
markTaskAsRunning(head, asyncId); markTaskAsRunning(head, asyncId);
log.debug("Marked task [{}] as running", head.getId()); if (log.isDebugEnabled()) {
log.debug("Marked task [{}] as running", head.getId());
}
} catch (KeeperException.NodeExistsException e) { } catch (KeeperException.NodeExistsException e) {
lock.unlock(); lock.unlock();
// This should never happen // This should never happen
@ -308,7 +316,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
continue; continue;
} }
log.debug(messageHandler.getName() + ": Get the message id:" + head.getId() + " message:" + message.toString()); if (log.isDebugEnabled()) {
log.debug("{}: Get the message id: {} message: {}", messageHandler.getName(), head.getId(), message);
}
Runner runner = new Runner(messageHandler, message, Runner runner = new Runner(messageHandler, message,
operation, head, lock); operation, head, lock);
tpe.execute(runner); tpe.execute(runner);
@ -500,7 +510,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
try { try {
try { try {
log.debug("Runner processing {}", head.getId()); if (log.isDebugEnabled()) {
log.debug("Runner processing {}", head.getId());
}
response = messageHandler.processMessage(message, operation); response = messageHandler.processMessage(message, operation);
} finally { } finally {
timerContext.stop(); timerContext.stop();
@ -511,22 +523,31 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
if (response != null && (response.getResponse().get("failure") != null if (response != null && (response.getResponse().get("failure") != null
|| response.getResponse().get("exception") != null)) { || response.getResponse().get("exception") != null)) {
failureMap.put(asyncId, OverseerSolrResponseSerializer.serialize(response)); failureMap.put(asyncId, OverseerSolrResponseSerializer.serialize(response));
log.debug("Updated failed map for task with zkid:[{}]", head.getId()); if (log.isDebugEnabled()) {
log.debug("Updated failed map for task with zkid:[{}]", head.getId());
}
} else { } else {
completedMap.put(asyncId, OverseerSolrResponseSerializer.serialize(response)); completedMap.put(asyncId, OverseerSolrResponseSerializer.serialize(response));
log.debug("Updated completed map for task with zkid:[{}]", head.getId()); if (log.isDebugEnabled()) {
log.debug("Updated completed map for task with zkid:[{}]", head.getId());
}
} }
} else { } else {
head.setBytes(OverseerSolrResponseSerializer.serialize(response)); head.setBytes(OverseerSolrResponseSerializer.serialize(response));
log.debug("Completed task:[{}]", head.getId()); if (log.isDebugEnabled()) {
log.debug("Completed task:[{}]", head.getId());
}
} }
markTaskComplete(head.getId(), asyncId); markTaskComplete(head.getId(), asyncId);
log.debug("Marked task [{}] as completed.", head.getId()); if (log.isDebugEnabled()) {
log.debug("Marked task [{}] as completed.", head.getId());
}
printTrackingMaps(); printTrackingMaps();
log.debug(messageHandler.getName() + ": Message id:" + head.getId() + if (log.isDebugEnabled()) {
" complete, response:" + response.getResponse().toString()); log.debug("{}: Message id: {} complete, response: {}", messageHandler.getName(), head.getId(), response.getResponse());
}
success = true; success = true;
} catch (AlreadyClosedException e) { } catch (AlreadyClosedException e) {
@ -565,7 +586,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
if (asyncId != null) { if (asyncId != null) {
if (!runningMap.remove(asyncId)) { if (!runningMap.remove(asyncId)) {
log.warn("Could not find and remove async call [" + asyncId + "] from the running map."); log.warn("Could not find and remove async call [{}] from the running map.", asyncId );
} }
} }
@ -577,7 +598,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
try { try {
if (asyncId != null) { if (asyncId != null) {
if (!runningMap.remove(asyncId)) { if (!runningMap.remove(asyncId)) {
log.warn("Could not find and remove async call [" + asyncId + "] from the running map."); log.warn("Could not find and remove async call [{}] from the running map.", asyncId);
} }
} }
@ -612,14 +633,18 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
private void printTrackingMaps() { private void printTrackingMaps() {
if (log.isDebugEnabled()) { if (log.isDebugEnabled()) {
synchronized (runningTasks) { synchronized (runningTasks) {
log.debug("RunningTasks: {}", runningTasks.toString()); log.debug("RunningTasks: {}", runningTasks);
}
if (log.isDebugEnabled()) {
log.debug("BlockedTasks: {}", blockedTasks.keySet());
} }
log.debug("BlockedTasks: {}", blockedTasks.keySet().toString());
synchronized (completedTasks) { synchronized (completedTasks) {
log.debug("CompletedTasks: {}", completedTasks.keySet().toString()); if (log.isDebugEnabled()) {
log.debug("CompletedTasks: {}", completedTasks.keySet());
}
} }
synchronized (runningZKTasks) { synchronized (runningZKTasks) {
log.info("RunningZKTasks: {}", runningZKTasks.toString()); log.info("RunningZKTasks: {}", runningZKTasks);
} }
} }
} }

View File

@ -88,7 +88,9 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
if (data != null) { if (data != null) {
ZkNodeProps message = ZkNodeProps.load(data); ZkNodeProps message = ZkNodeProps.load(data);
if (message.containsKey(requestIdKey)) { if (message.containsKey(requestIdKey)) {
log.debug("Looking for {}, found {}", message.get(requestIdKey), requestId); if (log.isDebugEnabled()) {
log.debug("Looking for {}, found {}", message.get(requestIdKey), requestId);
}
if(message.get(requestIdKey).equals(requestId)) return true; if(message.get(requestIdKey).equals(requestId)) return true;
} }
} }
@ -116,8 +118,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
zookeeper.setData(responsePath, event.getBytes(), true); zookeeper.setData(responsePath, event.getBytes(), true);
} catch (KeeperException.NoNodeException ignored) { } catch (KeeperException.NoNodeException ignored) {
// we must handle the race case where the node no longer exists // we must handle the race case where the node no longer exists
log.info("Response ZK path: " + responsePath + " doesn't exist." log.info("Response ZK path: {} doesn't exist. Requestor may have disconnected from ZooKeeper", responsePath);
+ " Requestor may have disconnected from ZooKeeper");
} }
try { try {
zookeeper.delete(path, -1, true); zookeeper.delete(path, -1, true);
@ -156,7 +157,9 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
return; return;
} }
// If latchEventType is not null, only fire if the type matches // If latchEventType is not null, only fire if the type matches
log.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType); if (log.isDebugEnabled()) {
log.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
}
if (latchEventType == null || event.getType() == latchEventType) { if (latchEventType == null || event.getType() == latchEventType) {
lock.lock(); lock.lock();
try { try {
@ -283,7 +286,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
sb.append(queueEvent.getId()).append(", "); sb.append(queueEvent.getId()).append(", ");
} }
sb.append("]"); sb.append("]");
log.debug("Returning topN elements: {}", sb.toString()); log.debug("Returning topN elements: {}", sb);
} }
} }

View File

@ -62,7 +62,9 @@ public class RecoveringCoreTermWatcher implements ZkShardTerms.CoreTermWatcher {
solrCore.getUpdateHandler().getSolrCoreState().doRecovery(solrCore.getCoreContainer(), solrCore.getCoreDescriptor()); solrCore.getUpdateHandler().getSolrCoreState().doRecovery(solrCore.getCoreContainer(), solrCore.getCoreDescriptor());
} }
} catch (Exception e) { } catch (Exception e) {
log.info("Failed to watch term of core {}", coreDescriptor.getName(), e); if (log.isInfoEnabled()) {
log.info("Failed to watch term of core {}", coreDescriptor.getName(), e);
}
return false; return false;
} }

View File

@ -262,18 +262,17 @@ public class RecoveryStrategy implements Runnable, Closeable {
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null); Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
try { try {
final IndexCommit commit = core.getDeletionPolicy().getLatestCommit(); final IndexCommit commit = core.getDeletionPolicy().getLatestCommit();
log.debug(core.getCoreContainer() if (log.isDebugEnabled()) {
.getZkController().getNodeName() log.debug("{} replicated {} from {} gen: {} data: {} index: {} newIndex: {} files: {}"
+ " replicated " , core.getCoreContainer().getZkController().getNodeName()
+ searcher.count(new MatchAllDocsQuery()) , searcher.count(new MatchAllDocsQuery())
+ " from " , leaderUrl
+ leaderUrl , (null == commit ? "null" : commit.getGeneration())
+ " gen:" , core.getDataDir()
+ (null == commit ? "null" : commit.getGeneration()) , core.getIndexDir()
+ " data:" + core.getDataDir() , core.getNewIndexDir()
+ " index:" + core.getIndexDir() , Arrays.asList(dir.listAll()));
+ " newIndex:" + core.getNewIndexDir() }
+ " files:" + Arrays.asList(dir.listAll()));
} finally { } finally {
core.getDirectoryFactory().release(dir); core.getDirectoryFactory().release(dir);
searchHolder.decref(); searchHolder.decref();
@ -310,7 +309,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
return; return;
} }
log.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup); log.info("Starting recovery process. recoveringAfterStartup={}", recoveringAfterStartup);
try { try {
doRecovery(core); doRecovery(core);
@ -373,12 +372,16 @@ public class RecoveryStrategy implements Runnable, Closeable {
return; return;
} }
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl, if (log.isInfoEnabled()) {
ourUrl); log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
ourUrl);
}
zkController.publish(this.coreDescriptor, Replica.State.RECOVERING); zkController.publish(this.coreDescriptor, Replica.State.RECOVERING);
if (isClosed()) { if (isClosed()) {
log.info("Recovery for core {} has been closed", core.getName()); if (log.isInfoEnabled()) {
log.info("Recovery for core {} has been closed", core.getName());
}
break; break;
} }
log.info("Starting Replication Recovery."); log.info("Starting Replication Recovery.");
@ -389,7 +392,9 @@ public class RecoveryStrategy implements Runnable, Closeable {
replicate(zkController.getNodeName(), core, leaderprops); replicate(zkController.getNodeName(), core, leaderprops);
if (isClosed()) { if (isClosed()) {
log.info("Recovery for core {} has been closed", core.getName()); if (log.isInfoEnabled()) {
log.info("Recovery for core {} has been closed", core.getName());
}
break; break;
} }
@ -427,11 +432,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
try { try {
if (isClosed()) { if (isClosed()) {
log.info("Recovery for core {} has been closed", core.getName()); if (log.isInfoEnabled()) {
log.info("Recovery for core {} has been closed", core.getName());
}
break; break;
} }
log.error("Recovery failed - trying again... (" + retries + ")"); log.error("Recovery failed - trying again... ({})", retries);
retries++; retries++;
if (retries >= maxRetries) { if (retries >= maxRetries) {
@ -453,11 +460,15 @@ public class RecoveryStrategy implements Runnable, Closeable {
// will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in // will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
// order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m). // order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
int loopCount = retries < 4 ? (int) Math.min(Math.pow(2, retries), 12) : 12; int loopCount = retries < 4 ? (int) Math.min(Math.pow(2, retries), 12) : 12;
log.info("Wait [{}] seconds before trying to recover again (attempt={})", if (log.isInfoEnabled()) {
TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries); log.info("Wait [{}] seconds before trying to recover again (attempt={})",
TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries);
}
for (int i = 0; i < loopCount; i++) { for (int i = 0; i < loopCount; i++) {
if (isClosed()) { if (isClosed()) {
log.info("Recovery for core {} has been closed", core.getName()); if (log.isInfoEnabled()) {
log.info("Recovery for core {} has been closed", core.getName());
}
break; // check if someone closed us break; // check if someone closed us
} }
Thread.sleep(startingRecoveryDelayMilliSeconds); Thread.sleep(startingRecoveryDelayMilliSeconds);
@ -471,7 +482,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
} }
// We skip core.seedVersionBuckets(); We don't have a transaction log // We skip core.seedVersionBuckets(); We don't have a transaction log
log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery)); log.info("Finished recovery process, successful=[{}]", successfulRecovery);
} }
// TODO: perhaps make this grab a new core each time through the loop to handle core reloads? // TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
@ -511,15 +522,19 @@ public class RecoveryStrategy implements Runnable, Closeable {
if (oldIdx > 0) { if (oldIdx > 0) {
log.info("Found new versions added after startup: num=[{}]", oldIdx); log.info("Found new versions added after startup: num=[{}]", oldIdx);
log.info("currentVersions size={} range=[{} to {}]", recentVersions.size(), recentVersions.get(0), if (log.isInfoEnabled()) {
recentVersions.get(recentVersions.size() - 1)); log.info("currentVersions size={} range=[{} to {}]", recentVersions.size(), recentVersions.get(0),
recentVersions.get(recentVersions.size() - 1));
}
} }
if (startingVersions.isEmpty()) { if (startingVersions.isEmpty()) {
log.info("startupVersions is empty"); log.info("startupVersions is empty");
} else { } else {
log.info("startupVersions size={} range=[{} to {}]", startingVersions.size(), startingVersions.get(0), if (log.isInfoEnabled()) {
startingVersions.get(startingVersions.size() - 1)); log.info("startupVersions size={} range=[{} to {}]", startingVersions.size(), startingVersions.get(0),
startingVersions.get(startingVersions.size() - 1));
}
} }
} catch (Exception e) { } catch (Exception e) {
SolrException.log(log, "Error getting recent versions.", e); SolrException.log(log, "Error getting recent versions.", e);
@ -578,9 +593,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
// recalling buffer updates will drop the old buffer tlog // recalling buffer updates will drop the old buffer tlog
ulog.bufferUpdates(); ulog.bufferUpdates();
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), if (log.isInfoEnabled()) {
leader.getCoreUrl(), log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(),
ourUrl); leader.getCoreUrl(),
ourUrl);
}
zkController.publish(this.coreDescriptor, Replica.State.RECOVERING); zkController.publish(this.coreDescriptor, Replica.State.RECOVERING);
final Slice slice = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName()) final Slice slice = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName())
@ -618,8 +635,10 @@ public class RecoveryStrategy implements Runnable, Closeable {
// first thing we just try to sync // first thing we just try to sync
if (firstTime) { if (firstTime) {
firstTime = false; // only try sync the first time through the loop firstTime = false; // only try sync the first time through the loop
log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(), if (log.isInfoEnabled()) {
recoveringAfterStartup); log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(),
recoveringAfterStartup);
}
// System.out.println("Attempting to PeerSync from " + leaderUrl // System.out.println("Attempting to PeerSync from " + leaderUrl
// + " i am:" + zkController.getNodeName()); // + " i am:" + zkController.getNodeName());
PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core, PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
@ -713,7 +732,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
break; break;
} }
log.error("Recovery failed - trying again... (" + retries + ")"); log.error("Recovery failed - trying again... ({})", retries);
retries++; retries++;
if (retries >= maxRetries) { if (retries >= maxRetries) {
@ -759,7 +778,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
core.seedVersionBuckets(); core.seedVersionBuckets();
} }
log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery)); log.info("Finished recovery process, successful=[{}]", successfulRecovery);
} }
private final Replica pingLeader(String ourUrl, CoreDescriptor coreDesc, boolean mayPutReplicaAsDown) private final Replica pingLeader(String ourUrl, CoreDescriptor coreDesc, boolean mayPutReplicaAsDown)
@ -894,7 +913,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd); HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd);
prevSendPreRecoveryHttpUriRequest = mrr.httpUriRequest; prevSendPreRecoveryHttpUriRequest = mrr.httpUriRequest;
log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd.toString()); log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd);
mrr.future.get(); mrr.future.get();
} }

View File

@ -129,9 +129,13 @@ public class SolrZkServer {
}; };
if (zkProps.getServers().size() > 1) { if (zkProps.getServers().size() > 1) {
log.info("STARTING EMBEDDED ENSEMBLE ZOOKEEPER SERVER at port " + zkProps.getClientPortAddress().getPort()); if (log.isInfoEnabled()) {
log.info("STARTING EMBEDDED ENSEMBLE ZOOKEEPER SERVER at port {}", zkProps.getClientPortAddress().getPort());
}
} else { } else {
log.info("STARTING EMBEDDED STANDALONE ZOOKEEPER SERVER at port " + zkProps.getClientPortAddress().getPort()); if (log.isInfoEnabled()) {
log.info("STARTING EMBEDDED STANDALONE ZOOKEEPER SERVER at port {}", zkProps.getClientPortAddress().getPort());
}
} }
log.warn("Embedded Zookeeper is not recommended in production environments. See Reference Guide for details."); log.warn("Embedded Zookeeper is not recommended in production environments. See Reference Guide for details.");
@ -171,7 +175,7 @@ class SolrZkServerProps extends QuorumPeerConfig {
public static Properties getProperties(String path) throws ConfigException { public static Properties getProperties(String path) throws ConfigException {
File configFile = new File(path); File configFile = new File(path);
log.info("Reading configuration from: " + configFile); log.info("Reading configuration from: {}", configFile);
try { try {
if (!configFile.exists()) { if (!configFile.exists()) {

View File

@ -96,7 +96,9 @@ public class SyncStrategy {
recoveryRequests.clear(); recoveryRequests.clear();
log.info("Sync replicas to " + ZkCoreNodeProps.getCoreUrl(leaderProps)); if (log.isInfoEnabled()) {
log.info("Sync replicas to {}", ZkCoreNodeProps.getCoreUrl(leaderProps));
}
if (core.getUpdateHandler().getUpdateLog() == null) { if (core.getUpdateHandler().getUpdateLog() == null) {
log.error("No UpdateLog found - cannot sync"); log.error("No UpdateLog found - cannot sync");
@ -197,14 +199,18 @@ public class SyncStrategy {
.getReplicaProps(collection, shardId, .getReplicaProps(collection, shardId,
cd.getCloudDescriptor().getCoreNodeName()); cd.getCloudDescriptor().getCoreNodeName());
if (nodes == null) { if (nodes == null) {
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + " has no replicas"); if (log.isInfoEnabled()) {
log.info("{} has no replicas", ZkCoreNodeProps.getCoreUrl(leaderProps));
}
return; return;
} }
ZkCoreNodeProps zkLeader = new ZkCoreNodeProps(leaderProps); ZkCoreNodeProps zkLeader = new ZkCoreNodeProps(leaderProps);
for (ZkCoreNodeProps node : nodes) { for (ZkCoreNodeProps node : nodes) {
try { try {
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": try and ask " + node.getCoreUrl() + " to sync"); if (log.isInfoEnabled()) {
log.info("{}: try and ask {} to sync", ZkCoreNodeProps.getCoreUrl(leaderProps), node.getCoreUrl());
}
requestSync(node.getBaseUrl(), node.getCoreUrl(), zkLeader.getCoreUrl(), node.getCoreName(), nUpdates); requestSync(node.getBaseUrl(), node.getCoreUrl(), zkLeader.getCoreUrl(), node.getCoreName(), nUpdates);
@ -223,8 +229,10 @@ public class SyncStrategy {
} }
if (!success) { if (!success) {
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": Sync failed - we will ask replica (" + srsp.getShardAddress() if (log.isInfoEnabled()) {
+ ") to recover."); log.info("{}: Sync failed - we will ask replica ({}) to recover."
, ZkCoreNodeProps.getCoreUrl(leaderProps), srsp.getShardAddress());
}
if (isClosed) { if (isClosed) {
log.info("We have been closed, don't request that a replica recover"); log.info("We have been closed, don't request that a replica recover");
} else { } else {
@ -235,7 +243,9 @@ public class SyncStrategy {
recoveryRequests.add(rr); recoveryRequests.add(rr);
} }
} else { } else {
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": " + " sync completed with " + srsp.getShardAddress()); if (log.isInfoEnabled()) {
log.info("{}: sync completed with {}", ZkCoreNodeProps.getCoreUrl(leaderProps), srsp.getShardAddress());
}
} }
} }

View File

@ -266,7 +266,9 @@ public class ZkController implements Closeable {
} }
public Object call() throws Exception { public Object call() throws Exception {
log.info("Registering core {} afterExpiration? {}", descriptor.getName(), afterExpiration); if (log.isInfoEnabled()) {
log.info("Registering core {} afterExpiration? {}", descriptor.getName(), afterExpiration);
}
register(descriptor.getName(), descriptor, recoverReloadedCores, afterExpiration, false); register(descriptor.getName(), descriptor, recoverReloadedCores, afterExpiration, false);
return descriptor; return descriptor;
} }
@ -432,7 +434,7 @@ public class ZkController implements Closeable {
} }
} catch (Exception exc) { } catch (Exception exc) {
// not much we can do here other than warn in the log // not much we can do here other than warn in the log
log.warn("Error when notifying OnReconnect listener " + listener + " after session re-connected.", exc); log.warn("Error when notifying OnReconnect listener {} after session re-connected.", listener, exc);
} }
} }
} catch (InterruptedException e) { } catch (InterruptedException e) {
@ -767,7 +769,7 @@ public class ZkController implements Closeable {
String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + zkConfigName + "/" + fileName; String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + zkConfigName + "/" + fileName;
byte[] bytes = zkClient.getData(zkPath, null, null, true); byte[] bytes = zkClient.getData(zkPath, null, null, true);
if (bytes == null) { if (bytes == null) {
log.error("Config file contains no data:" + zkPath); log.error("Config file contains no data:{}", zkPath);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"Config file contains no data:" + zkPath); "Config file contains no data:" + zkPath);
} }
@ -868,8 +870,9 @@ public class ZkController implements Closeable {
if (zkClient.exists("/configs/_default", true) == false) { if (zkClient.exists("/configs/_default", true) == false) {
String configDirPath = getDefaultConfigDirPath(); String configDirPath = getDefaultConfigDirPath();
if (configDirPath == null) { if (configDirPath == null) {
log.warn("The _default configset could not be uploaded. Please provide 'solr.default.confdir' parameter that points to a configset" + log.warn("The _default configset could not be uploaded. Please provide 'solr.default.confdir' parameter that points to a configset {} {}"
" intended to be the default. Current 'solr.default.confdir' value: {}", System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE)); , "intended to be the default. Current 'solr.default.confdir' value:"
, System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE));
} else { } else {
ZkMaintenanceUtils.upConfig(zkClient, Paths.get(configDirPath), ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME); ZkMaintenanceUtils.upConfig(zkClient, Paths.get(configDirPath), ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
} }
@ -1019,7 +1022,7 @@ public class ZkController implements Closeable {
} catch (KeeperException.NodeExistsException e) { } catch (KeeperException.NodeExistsException e) {
// someone else already created this node - ignore // someone else already created this node - ignore
} catch (KeeperException | InterruptedException e1) { } catch (KeeperException | InterruptedException e1) {
log.warn("Unable to register nodeLost path for " + n, e1); log.warn("Unable to register nodeLost path for {}", n, e1);
} }
} }
} }
@ -1109,7 +1112,7 @@ public class ZkController implements Closeable {
String nodeName = getNodeName(); String nodeName = getNodeName();
String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName; String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName; String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
log.info("Register node as live in ZooKeeper:" + nodePath); log.info("Register node as live in ZooKeeper:{}", nodePath);
List<Op> ops = new ArrayList<>(2); List<Op> ops = new ArrayList<>(2);
ops.add(Op.create(nodePath, null, zkClient.getZkACLProvider().getACLsToAdd(nodePath), CreateMode.EPHEMERAL)); ops.add(Op.create(nodePath, null, zkClient.getZkACLProvider().getACLsToAdd(nodePath), CreateMode.EPHEMERAL));
// if there are nodeAdded triggers don't create nodeAdded markers // if there are nodeAdded triggers don't create nodeAdded markers
@ -1130,7 +1133,7 @@ public class ZkController implements Closeable {
String nodeName = getNodeName(); String nodeName = getNodeName();
String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName; String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName; String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
log.info("Remove node as live in ZooKeeper:" + nodePath); log.info("Remove node as live in ZooKeeper:{}", nodePath);
List<Op> ops = new ArrayList<>(2); List<Op> ops = new ArrayList<>(2);
ops.add(Op.delete(nodePath, -1)); ops.add(Op.delete(nodePath, -1));
ops.add(Op.delete(nodeAddedPath, -1)); ops.add(Op.delete(nodeAddedPath, -1));
@ -1230,7 +1233,7 @@ public class ZkController implements Closeable {
String leaderUrl = getLeader(cloudDesc, leaderVoteWait + 600000); String leaderUrl = getLeader(cloudDesc, leaderVoteWait + 600000);
String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName); String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
log.debug("We are " + ourUrl + " and leader is " + leaderUrl); log.debug("We are {} and leader is {}", ourUrl, leaderUrl);
boolean isLeader = leaderUrl.equals(ourUrl); boolean isLeader = leaderUrl.equals(ourUrl);
assert !(isLeader && replica.getType() == Type.PULL) : "Pull replica became leader!"; assert !(isLeader && replica.getType() == Type.PULL) : "Pull replica became leader!";
@ -1261,13 +1264,15 @@ public class ZkController implements Closeable {
if (slice.getState() != Slice.State.CONSTRUCTION || !isLeader) { if (slice.getState() != Slice.State.CONSTRUCTION || !isLeader) {
Future<UpdateLog.RecoveryInfo> recoveryFuture = core.getUpdateHandler().getUpdateLog().recoverFromLog(); Future<UpdateLog.RecoveryInfo> recoveryFuture = core.getUpdateHandler().getUpdateLog().recoverFromLog();
if (recoveryFuture != null) { if (recoveryFuture != null) {
log.info("Replaying tlog for " + ourUrl + " during startup... NOTE: This can take a while."); log.info("Replaying tlog for {} during startup... NOTE: This can take a while.", ourUrl);
recoveryFuture.get(); // NOTE: this could potentially block for recoveryFuture.get(); // NOTE: this could potentially block for
// minutes or more! // minutes or more!
// TODO: public as recovering in the mean time? // TODO: public as recovering in the mean time?
// TODO: in the future we could do peersync in parallel with recoverFromLog // TODO: in the future we could do peersync in parallel with recoverFromLog
} else { } else {
log.debug("No LogReplay needed for core={} baseURL={}", core.getName(), baseUrl); if (log.isDebugEnabled()) {
log.debug("No LogReplay needed for core={} baseURL={}", core.getName(), baseUrl);
}
} }
} }
} }
@ -1490,14 +1495,18 @@ public class ZkController implements Closeable {
} }
if (doRecovery) { if (doRecovery) {
log.info("Core needs to recover:" + core.getName()); if (log.isInfoEnabled()) {
log.info("Core needs to recover:{}", core.getName());
}
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor()); core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
return true; return true;
} }
ZkShardTerms zkShardTerms = getShardTerms(collection, shardId); ZkShardTerms zkShardTerms = getShardTerms(collection, shardId);
if (zkShardTerms.registered(coreZkNodeName) && !zkShardTerms.canBecomeLeader(coreZkNodeName)) { if (zkShardTerms.registered(coreZkNodeName) && !zkShardTerms.canBecomeLeader(coreZkNodeName)) {
log.info("Leader's term larger than core " + core.getName() + "; starting recovery process"); if (log.isInfoEnabled()) {
log.info("Leader's term larger than core {}; starting recovery process", core.getName());
}
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor()); core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
return true; return true;
} }
@ -1532,7 +1541,7 @@ public class ZkController implements Closeable {
try { try {
String collection = cd.getCloudDescriptor().getCollectionName(); String collection = cd.getCloudDescriptor().getCollectionName();
log.debug("publishing state={}", state.toString()); log.debug("publishing state={}", state);
// System.out.println(Thread.currentThread().getStackTrace()[3]); // System.out.println(Thread.currentThread().getStackTrace()[3]);
Integer numShards = cd.getCloudDescriptor().getNumShards(); Integer numShards = cd.getCloudDescriptor().getNumShards();
if (numShards == null) { // XXX sys prop hack if (numShards == null) { // XXX sys prop hack
@ -1581,7 +1590,9 @@ public class ZkController implements Closeable {
} }
} catch (SolrCoreInitializationException ex) { } catch (SolrCoreInitializationException ex) {
// The core had failed to initialize (in a previous request, not this one), hence nothing to do here. // The core had failed to initialize (in a previous request, not this one), hence nothing to do here.
log.info("The core '{}' had failed to initialize before.", cd.getName()); if (log.isInfoEnabled()) {
log.info("The core '{}' had failed to initialize before.", cd.getName());
}
} }
// pull replicas are excluded because their terms are not considered // pull replicas are excluded because their terms are not considered
@ -1719,7 +1730,9 @@ public class ZkController implements Closeable {
} }
private void waitForShardId(CoreDescriptor cd) { private void waitForShardId(CoreDescriptor cd) {
log.debug("waiting to find shard id in clusterstate for " + cd.getName()); if (log.isDebugEnabled()) {
log.debug("waiting to find shard id in clusterstate for {}", cd.getName());
}
int retryCount = 320; int retryCount = 320;
while (retryCount-- > 0) { while (retryCount-- > 0) {
final String shardId = zkStateReader.getClusterState().getShardId(cd.getCollectionName(), getNodeName(), cd.getName()); final String shardId = zkStateReader.getClusterState().getShardId(cd.getCollectionName(), getNodeName(), cd.getName());
@ -1908,11 +1921,13 @@ public class ZkController implements Closeable {
boolean isLeader = leaderProps.getCoreUrl().equals(ourUrl); boolean isLeader = leaderProps.getCoreUrl().equals(ourUrl);
if (!isLeader && !SKIP_AUTO_RECOVERY) { if (!isLeader && !SKIP_AUTO_RECOVERY) {
if (!getShardTerms(collection, shard).canBecomeLeader(myCoreNodeName)) { if (!getShardTerms(collection, shard).canBecomeLeader(myCoreNodeName)) {
log.debug("Term of replica " + myCoreNodeName + log.debug("Term of replica {} is already less than leader, so not waiting for leader to see down state."
" is already less than leader, so not waiting for leader to see down state."); , myCoreNodeName);
} else { } else {
log.info("replica={} is making a best effort attempt to wait for leader={} to see it's DOWN state.", myCoreNodeName, leaderProps.getCoreUrl()); if (log.isInfoEnabled()) {
log.info("replica={} is making a best effort attempt to wait for leader={} to see it's DOWN state.", myCoreNodeName, leaderProps.getCoreUrl());
}
try (HttpSolrClient client = new Builder(leaderBaseUrl) try (HttpSolrClient client = new Builder(leaderBaseUrl)
.withConnectionTimeout(8000) // short timeouts, we may be in a storm and this is best effort and maybe we should be the leader now .withConnectionTimeout(8000) // short timeouts, we may be in a storm and this is best effort and maybe we should be the leader now
@ -1975,7 +1990,7 @@ public class ZkController implements Closeable {
public static void linkConfSet(SolrZkClient zkClient, String collection, String confSetName) throws KeeperException, InterruptedException { public static void linkConfSet(SolrZkClient zkClient, String collection, String confSetName) throws KeeperException, InterruptedException {
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection; String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
log.debug("Load collection config from:" + path); log.debug("Load collection config from:{}", path);
byte[] data; byte[] data;
try { try {
data = zkClient.getData(path, null, null, true); data = zkClient.getData(path, null, null, true);
@ -2024,7 +2039,9 @@ public class ZkController implements Closeable {
//List<String> allCoreNames = cfg.getAllCoreNames(); //List<String> allCoreNames = cfg.getAllCoreNames();
List<CoreDescriptor> cds = cc.getCoresLocator().discover(cc); List<CoreDescriptor> cds = cc.getCoresLocator().discover(cc);
log.info("bootstrapping config for " + cds.size() + " cores into ZooKeeper using solr.xml from " + cc.getSolrHome()); if (log.isInfoEnabled()) {
log.info("bootstrapping config for {} cores into ZooKeeper using solr.xml from {}", cds.size(), cc.getSolrHome());
}
for (CoreDescriptor cd : cds) { for (CoreDescriptor cd : cds) {
String coreName = cd.getName(); String coreName = cd.getName();
@ -2032,7 +2049,7 @@ public class ZkController implements Closeable {
if (StringUtils.isEmpty(confName)) if (StringUtils.isEmpty(confName))
confName = coreName; confName = coreName;
Path udir = cd.getInstanceDir().resolve("conf"); Path udir = cd.getInstanceDir().resolve("conf");
log.info("Uploading directory " + udir + " with name " + confName + " for SolrCore " + coreName); log.info("Uploading directory {} with name {} for solrCore {}", udir, confName, coreName);
configManager.uploadConfigDir(udir, confName); configManager.uploadConfigDir(udir, confName);
} }
} }
@ -2078,7 +2095,7 @@ public class ZkController implements Closeable {
try { try {
return asyncIdsMap.putIfAbsent(asyncId, new byte[0]); return asyncIdsMap.putIfAbsent(asyncId, new byte[0]);
} catch (InterruptedException e) { } catch (InterruptedException e) {
log.error("Could not claim asyncId=" + asyncId, e); log.error("Could not claim asyncId={}", asyncId, e);
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -2094,7 +2111,7 @@ public class ZkController implements Closeable {
try { try {
return asyncIdsMap.remove(asyncId); return asyncIdsMap.remove(asyncId);
} catch (InterruptedException e) { } catch (InterruptedException e) {
log.error("Could not release asyncId=" + asyncId, e); log.error("Could not release asyncId={}", asyncId, e);
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -2265,7 +2282,7 @@ public class ZkController implements Closeable {
if (listener != null) { if (listener != null) {
synchronized (reconnectListeners) { synchronized (reconnectListeners) {
reconnectListeners.add(listener); reconnectListeners.add(listener);
log.debug("Added new OnReconnect listener "+listener); log.debug("Added new OnReconnect listener {}", listener);
} }
} }
} }
@ -2280,10 +2297,11 @@ public class ZkController implements Closeable {
wasRemoved = reconnectListeners.remove(listener); wasRemoved = reconnectListeners.remove(listener);
} }
if (wasRemoved) { if (wasRemoved) {
log.debug("Removed OnReconnect listener "+listener); log.debug("Removed OnReconnect listener {}", listener);
} else { } else {
log.warn("Was asked to remove OnReconnect listener "+listener+ log.warn("Was asked to remove OnReconnect listener {}{}"
", but remove operation did not find it in the list of registered listeners."); , listener
, ", but remove operation did not find it in the list of registered listeners.");
} }
} }
} }
@ -2324,12 +2342,16 @@ public class ZkController implements Closeable {
} catch (KeeperException.NodeExistsException nee) { } catch (KeeperException.NodeExistsException nee) {
try { try {
Stat stat = zkClient.exists(resourceLocation, null, true); Stat stat = zkClient.exists(resourceLocation, null, true);
log.debug("failed to set data version in zk is {} and expected version is {} ", stat.getVersion(), znodeVersion); if (log.isDebugEnabled()) {
log.debug("failed to set data version in zk is {} and expected version is {} ", stat.getVersion(), znodeVersion);
}
} catch (Exception e1) { } catch (Exception e1) {
log.warn("could not get stat"); log.warn("could not get stat");
} }
log.info(StrUtils.formatString(errMsg, resourceLocation, znodeVersion)); if (log.isInfoEnabled()) {
log.info(StrUtils.formatString(errMsg, resourceLocation, znodeVersion));
}
throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry."); throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry.");
} }
} }
@ -2344,7 +2366,9 @@ public class ZkController implements Closeable {
log.error(e.getMessage()); log.error(e.getMessage());
} }
log.info(StrUtils.formatString(errMsg + " zkVersion= " + v, resourceLocation, znodeVersion)); if (log.isInfoEnabled()) {
log.info(StrUtils.formatString("%s zkVersion= %d %s %d", errMsg, resourceLocation, znodeVersion));
}
throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry."); throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry.");
} catch (ResourceModifiedInZkException e) { } catch (ResourceModifiedInZkException e) {
throw e; throw e;
@ -2384,7 +2408,7 @@ public class ZkController implements Closeable {
synchronized (confDirectoryListeners) { synchronized (confDirectoryListeners) {
final Set<Runnable> listeners = confDirectoryListeners.get(confDir); final Set<Runnable> listeners = confDirectoryListeners.get(confDir);
if (listeners == null) { if (listeners == null) {
log.warn(confDir + " has no more registered listeners, but a live one attempted to unregister!"); log.warn("{} has no more registered listeners, but a live one attempted to unregister!", confDir);
return; return;
} }
if (listeners.remove(listener)) { if (listeners.remove(listener)) {
@ -2627,7 +2651,7 @@ public class ZkController implements Closeable {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
log.debug("Publish node as down was interrupted."); log.debug("Publish node as down was interrupted.");
} catch (KeeperException e) { } catch (KeeperException e) {
log.warn("Could not publish node as down: " + e.getMessage()); log.warn("Could not publish node as down: {}", e.getMessage());
} }
} }
@ -2638,31 +2662,41 @@ public class ZkController implements Closeable {
if (!core.getSolrConfig().useColdSearcher) { if (!core.getSolrConfig().useColdSearcher) {
RefCounted<SolrIndexSearcher> registeredSearcher = core.getRegisteredSearcher(); RefCounted<SolrIndexSearcher> registeredSearcher = core.getRegisteredSearcher();
if (registeredSearcher != null) { if (registeredSearcher != null) {
log.debug("Found a registered searcher: {} for core: {}", registeredSearcher.get(), core); if (log.isDebugEnabled()) {
log.debug("Found a registered searcher: {} for core: {}", registeredSearcher.get(), core);
}
registeredSearcher.decref(); registeredSearcher.decref();
} else { } else {
Future[] waitSearcher = new Future[1]; Future[] waitSearcher = new Future[1];
log.info("No registered searcher found for core: {}, waiting until a searcher is registered before publishing as active", core.getName()); if (log.isInfoEnabled()) {
log.info("No registered searcher found for core: {}, waiting until a searcher is registered before publishing as active", core.getName());
}
final RTimer timer = new RTimer(); final RTimer timer = new RTimer();
RefCounted<SolrIndexSearcher> searcher = null; RefCounted<SolrIndexSearcher> searcher = null;
try { try {
searcher = core.getSearcher(false, true, waitSearcher, true); searcher = core.getSearcher(false, true, waitSearcher, true);
boolean success = true; boolean success = true;
if (waitSearcher[0] != null) { if (waitSearcher[0] != null) {
log.debug("Waiting for first searcher of core {}, id: {} to be registered", core.getName(), core); if (log.isDebugEnabled()) {
log.debug("Waiting for first searcher of core {}, id: {} to be registered", core.getName(), core);
}
try { try {
waitSearcher[0].get(); waitSearcher[0].get();
} catch (ExecutionException e) { } catch (ExecutionException e) {
log.warn("Wait for a searcher to be registered for core " + core.getName() + ",id: " + core + " failed due to: " + e, e); log.warn("Wait for a searcher to be registered for core {}, id: {} failed due to: {}", core.getName(), core, e, e);
success = false; success = false;
} }
} }
if (success) { if (success) {
if (searcher == null) { if (searcher == null) {
// should never happen // should never happen
log.debug("Did not find a searcher even after the future callback for core: {}, id: {}!!!", core.getName(), core); if (log.isDebugEnabled()) {
log.debug("Did not find a searcher even after the future callback for core: {}, id: {}!!!", core.getName(), core);
}
} else { } else {
log.info("Found a registered searcher: {}, took: {} ms for core: {}, id: {}", searcher.get(), timer.getTime(), core.getName(), core); if (log.isInfoEnabled()) {
log.info("Found a registered searcher: {}, took: {} ms for core: {}, id: {}", searcher.get(), timer.getTime(), core.getName(), core);
}
} }
} }
} finally { } finally {
@ -2673,7 +2707,9 @@ public class ZkController implements Closeable {
} }
RefCounted<SolrIndexSearcher> newestSearcher = core.getNewestSearcher(false); RefCounted<SolrIndexSearcher> newestSearcher = core.getNewestSearcher(false);
if (newestSearcher != null) { if (newestSearcher != null) {
log.debug("Found newest searcher: {} for core: {}, id: {}", newestSearcher.get(), core.getName(), core); if (log.isDebugEnabled()) {
log.debug("Found newest searcher: {} for core: {}, id: {}", newestSearcher.get(), core.getName(), core);
}
newestSearcher.decref(); newestSearcher.decref();
} }
} }

View File

@ -250,7 +250,9 @@ public class ZkDistributedQueue implements DistributedQueue {
try { try {
zookeeper.delete(ops.get(j).getPath(), -1, true); zookeeper.delete(ops.get(j).getPath(), -1, true);
} catch (KeeperException.NoNodeException e2) { } catch (KeeperException.NoNodeException e2) {
log.debug("Can not remove node which is not exist : " + ops.get(j).getPath()); if (log.isDebugEnabled()) {
log.debug("Can not remove node which is not exist : {}", ops.get(j).getPath());
}
} }
} }
} }
@ -417,7 +419,7 @@ public class ZkDistributedQueue implements DistributedQueue {
for (String childName : childNames) { for (String childName : childNames) {
// Check format // Check format
if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) { if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) {
log.debug("Found child node with improper name: " + childName); log.debug("Found child node with improper name: {}", childName);
continue; continue;
} }
orderedChildren.add(childName); orderedChildren.add(childName);

View File

@ -91,7 +91,7 @@ public class ZkSolrResourceLoader extends SolrResourceLoader {
// Retry in case of session expiry // Retry in case of session expiry
try { try {
Thread.sleep(1000); Thread.sleep(1000);
log.debug("Sleeping for 1s before retrying fetching resource=" + resource); log.debug("Sleeping for 1s before retrying fetching resource={}", resource);
} catch (InterruptedException ie) { } catch (InterruptedException ie) {
Thread.currentThread().interrupt(); Thread.currentThread().interrupt();
throw new IOException("Could not load resource=" + resource, ie); throw new IOException("Could not load resource=" + resource, ie);