mirror of https://github.com/apache/lucene.git
LUCENE-7788: fail precommit on unparameterised log messages and examine for wasted work/objects
This commit is contained in:
parent
b25eabe6a9
commit
ff4363675e
|
@ -58,7 +58,7 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
Set<String> dirsToCheck = ["solr/core/src/java/org/apache/solr/analysis"
|
||||
, "solr/core/src/java/org/apache/solr/api"
|
||||
, "solr/core/src/java/org/apache/solr/client"
|
||||
// , "solr/core/src/java/org/apache/solr/cloud" // 120
|
||||
, "solr/core/src/java/org/apache/solr/cloud" // 120
|
||||
, "solr/core/src/java/org/apache/solr/cloud/api"
|
||||
, "solr/core/src/java/org/apache/solr/cloud/autoscaling"
|
||||
, "solr/core/src/java/org/apache/solr/cloud/cdcr"
|
||||
|
@ -115,11 +115,19 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
, "Suggester.java" : [147, 181]
|
||||
, "UpdateLog.java" : [1976]
|
||||
, "V2HttpCall.java" : [158]
|
||||
// checking against 8x in master, take these out usually.
|
||||
// , "CoreContainer.java" : [1096]
|
||||
// , "ConcurrentLFUCache.java" : [700, 911]
|
||||
// , "ConcurrentLRUCache.java" : [911]
|
||||
// , "DirectUpdateHandler2.java" : [844, 865]
|
||||
// , "PeerSync.java" : [697]
|
||||
// , "SolrIndexWriter.java" : [356]
|
||||
// , "UpdateLog.java" : [1973]
|
||||
]
|
||||
def logLevels = ["log.trace", "log.debug", "log.info", "log.warn", "log.error", "log.fatal"]
|
||||
|
||||
def errsFound = 0;
|
||||
def violations = new TreeSet();
|
||||
def violations = new ArrayList();
|
||||
|
||||
def reportViolation(String msg) {
|
||||
violations.add(System.lineSeparator + msg);
|
||||
|
@ -143,7 +151,7 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
|
||||
// If the line has been explicitly checked, skip it.
|
||||
if (line.replaceAll("\\s", "").toLowerCase().contains("//logok")) {
|
||||
return
|
||||
return violation
|
||||
}
|
||||
// Strip all of the comments, things in quotes and the like.
|
||||
def level = ""
|
||||
|
@ -199,14 +207,18 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
violation = true
|
||||
}
|
||||
if (violation) {
|
||||
reportViolation(String.format("Suspicious logging call File: '%s' line: '%d' log message: '%s' parent path: '%s'. Parameterize or surround with 'if (log.is*Enabled) {... stripped: '%s'"
|
||||
, file.name
|
||||
, lineNumber
|
||||
, line
|
||||
, file.getParentFile().getAbsolutePath()
|
||||
, stripped
|
||||
))
|
||||
reportViolation(String.format("Suspicious logging call, Parameterize or surround with 'if (log.is*Enabled) {..%s %s:%d"
|
||||
, System.lineSeparator, file.getAbsolutePath(), lineNumber))
|
||||
// reportViolation(String.format("Suspicious logging call File: '%s' line: '%d' log message: '%s' parent path: '%s'. Parameterize or surround with 'if (log.is*Enabled) {... stripped: '%s'"
|
||||
// , file.name
|
||||
// , lineNumber
|
||||
// , line
|
||||
// , file.getParentFile().getAbsolutePath()
|
||||
// , stripped
|
||||
// ))
|
||||
// reportViolation(String.format("%s:%d", file.getAbsolutePath(), lineNumber))
|
||||
}
|
||||
return violation
|
||||
}
|
||||
|
||||
|
||||
|
@ -235,6 +247,7 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
int state = 0 // 0 == not collecting a log line, 1 == collecting a log line, 2 == just collected the last.
|
||||
int lineNumber = 0
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean foundViolation = false
|
||||
|
||||
// We have to assemble line-by-line to get the full log statement. We require that all loggers (except requestLog
|
||||
// and slowLog) be exactly "log". That will be checked as well.
|
||||
|
@ -275,26 +288,44 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
case 1:
|
||||
break;
|
||||
case 2:
|
||||
checkLogLine(file, sb.toString(), lineNumber, prevLine)
|
||||
foundViolation = (checkLogLine(file, sb.toString(), lineNumber, prevLine)) ? true : foundViolation
|
||||
state = 0
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return foundViolation
|
||||
}
|
||||
|
||||
@TaskAction
|
||||
def checkLogLines() {
|
||||
dirsToCheck.add(srcDir)
|
||||
|
||||
//nocommit. This is here to check 8x on another branch since I can't run Gradle
|
||||
// over 8x. Used periodically as a sanity check.
|
||||
// new File("/Users/Erick/apache/solrJiras/master").traverse(type: groovy.io.FileType.FILES, nameFilter: ~/.*\.java/) { File it ->
|
||||
// if (dirsToCheck.any { dir ->
|
||||
// it.getCanonicalPath().contains(dir)
|
||||
// }) {
|
||||
// if (checkFile(it)) {
|
||||
// println(it.getAbsolutePath())
|
||||
// // nocommit. This just makes it much easier to get to the files during this mass migration!
|
||||
// }
|
||||
// }
|
||||
|
||||
project.sourceSets.each { srcSet ->
|
||||
srcSet.java.each { f ->
|
||||
if (dirsToCheck.any {
|
||||
f.getCanonicalPath().contains(it)
|
||||
}) {
|
||||
checkFile(f)
|
||||
if (checkFile(f)) {
|
||||
// nocommit. This just makes it much easier to get to the files during this mass migration!
|
||||
println(f.getAbsolutePath())
|
||||
}
|
||||
}
|
||||
//nocommit move this to project based whenever
|
||||
// nocommit move
|
||||
// this to project based whenever
|
||||
// if (srcDir != null && f.getCanonicalPath().contains(srcDir)) {
|
||||
// checkFile(f)
|
||||
// }
|
||||
|
|
|
@ -115,11 +115,16 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
|
|||
// synchronized due to SOLR-11535
|
||||
@Override
|
||||
public synchronized boolean onStateChanged(Set<String> liveNodes, DocCollection collectionState) {
|
||||
log.debug("-- onStateChanged@" + Long.toHexString(hashCode()) + ": replicaIds=" + replicaIds + ", solrCoreNames=" + solrCoreNames +
|
||||
(latch != null ? "\nlatch count=" + latch.getCount() : "") +
|
||||
"\ncollectionState=" + collectionState);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("-- onStateChanged@{}: replicaIds={}, solrCoreNames={} {}\ncollectionState {}"
|
||||
, Long.toHexString(hashCode()), replicaIds, solrCoreNames
|
||||
, (latch != null ? "\nlatch count=" + latch.getCount() : "")
|
||||
, collectionState); // logOk
|
||||
}
|
||||
if (collectionState == null) { // collection has been deleted - don't wait
|
||||
log.debug("-- collection deleted, decrementing latch by " + replicaIds.size() + solrCoreNames.size());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("-- collection deleted, decrementing latch by {} ", replicaIds.size() + solrCoreNames.size()); // logOk
|
||||
}
|
||||
if (latch != null) {
|
||||
for (int i = 0; i < replicaIds.size() + solrCoreNames.size(); i++) {
|
||||
latch.countDown();
|
||||
|
@ -134,7 +139,7 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
|
|||
return true;
|
||||
}
|
||||
if (collectionState.getZNodeVersion() == lastZkVersion) {
|
||||
log.debug("-- spurious call with already seen zkVersion=" + lastZkVersion + ", ignoring...");
|
||||
log.debug("-- spurious call with already seen zkVersion= {}, ignoring...", lastZkVersion);
|
||||
return false;
|
||||
}
|
||||
lastZkVersion = collectionState.getZNodeVersion();
|
||||
|
@ -160,7 +165,9 @@ public class ActiveReplicaWatcher implements CollectionStateWatcher {
|
|||
}
|
||||
}
|
||||
}
|
||||
log.debug("-- " + Long.toHexString(hashCode()) + " now latch count=" + latch.getCount());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("-- {} now latchcount={}", Long.toHexString(hashCode()), latch.getCount());
|
||||
}
|
||||
if (replicaIds.isEmpty() && solrCoreNames.isEmpty()) {
|
||||
return true;
|
||||
} else {
|
||||
|
|
|
@ -94,7 +94,7 @@ public class CloudConfigSetService extends ConfigSetService {
|
|||
try {
|
||||
stat = zkController.getZkClient().exists(zkPath, null, true);
|
||||
} catch (KeeperException e) {
|
||||
log.warn("Unexpected exception when getting modification time of " + zkPath, e);
|
||||
log.warn("Unexpected exception when getting modification time of {}", zkPath, e);
|
||||
return null; // debatable; we'll see an error soon if there's a real problem
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
|
|
|
@ -91,9 +91,9 @@ public class CloudUtil {
|
|||
SolrException.log(log, "Failed to delete instance dir for core:"
|
||||
+ desc.getName() + " dir:" + desc.getInstanceDir());
|
||||
}
|
||||
log.error("", new SolrException(ErrorCode.SERVER_ERROR,
|
||||
"Will not load SolrCore " + desc.getName()
|
||||
+ " because it has been replaced due to failover."));
|
||||
log.error("{}",
|
||||
new SolrException(ErrorCode.SERVER_ERROR, "Will not load SolrCore " + desc.getName()
|
||||
+ " because it has been replaced due to failover.")); // logOk
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
||||
"Will not load SolrCore " + desc.getName()
|
||||
+ " because it has been replaced due to failover.");
|
||||
|
@ -249,7 +249,9 @@ public class CloudUtil {
|
|||
}
|
||||
Collection<Slice> slices = withInactive ? collectionState.getSlices() : collectionState.getActiveSlices();
|
||||
if (slices.size() != expectedShards) {
|
||||
log.debug("-- wrong number of slices for collection {}, expected={}, found={}: {}", collectionState.getName(), expectedShards, collectionState.getSlices().size(), collectionState.getSlices());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("-- wrong number of slices for collection {}, expected={}, found={}: {}", collectionState.getName(), expectedShards, collectionState.getSlices().size(), collectionState.getSlices());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
Set<String> leaderless = new HashSet<>();
|
||||
|
@ -268,7 +270,9 @@ public class CloudUtil {
|
|||
activeReplicas++;
|
||||
}
|
||||
if (activeReplicas != expectedReplicas) {
|
||||
log.debug("-- wrong number of active replicas for collection {} in slice {}, expected={}, found={}", collectionState.getName(), slice.getName(), expectedReplicas, activeReplicas);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("-- wrong number of active replicas for collection {} in slice {}, expected={}, found={}", collectionState.getName(), slice.getName(), expectedReplicas, activeReplicas);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -436,8 +436,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
|
||||
SolrIndexSearcher searcher = searchHolder.get();
|
||||
try {
|
||||
log.debug(core.getCoreContainer().getZkController().getNodeName() + " synched "
|
||||
+ searcher.count(new MatchAllDocsQuery()));
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{} synched {}", core.getCoreContainer().getZkController().getNodeName()
|
||||
, searcher.count(new MatchAllDocsQuery()));
|
||||
}
|
||||
} finally {
|
||||
searchHolder.decref();
|
||||
}
|
||||
|
@ -472,8 +474,8 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
}
|
||||
// in case of leaderVoteWait timeout, a replica with lower term can win the election
|
||||
if (setTermToMax) {
|
||||
log.error("WARNING: Potential data loss -- Replica {} became leader after timeout (leaderVoteWait) " +
|
||||
"without being up-to-date with the previous leader", coreNodeName);
|
||||
log.error("WARNING: Potential data loss -- Replica {} became leader after timeout (leaderVoteWait) {}"
|
||||
, "without being up-to-date with the previous leader", coreNodeName);
|
||||
zkController.getShardTerms(collection, shardId).setTermEqualsToLeader(coreNodeName);
|
||||
}
|
||||
super.runLeaderProcess(weAreReplacement, 0);
|
||||
|
@ -485,7 +487,9 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
return;
|
||||
}
|
||||
}
|
||||
log.info("I am the new leader: " + ZkCoreNodeProps.getCoreUrl(leaderProps) + " " + shardId);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("I am the new leader: {} {}", ZkCoreNodeProps.getCoreUrl(leaderProps), shardId);
|
||||
}
|
||||
|
||||
// we made it as leader - send any recovery requests we need to
|
||||
syncStrategy.requestRecoveries();
|
||||
|
@ -500,7 +504,9 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
try (SolrCore core = cc.getCore(coreName)) {
|
||||
|
||||
if (core == null) {
|
||||
log.debug("SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("SolrCore not found: {} in {}", coreName, cc.getLoadedCoreNames());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -532,8 +538,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
long timeoutAt = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeout, TimeUnit.MILLISECONDS);
|
||||
while (!isClosed && !cc.isShutDown()) {
|
||||
if (System.nanoTime() > timeoutAt) {
|
||||
log.warn("After waiting for {}ms, no other potential leader was found, {} try to become leader anyway (" +
|
||||
"core_term:{}, highest_term:{})",
|
||||
log.warn("After waiting for {}ms, no other potential leader was found, {} try to become leader anyway (core_term:{}, highest_term:{})",
|
||||
timeout, coreNodeName, zkShardTerms.getTerm(coreNodeName), zkShardTerms.getHighestTerm());
|
||||
return true;
|
||||
}
|
||||
|
@ -625,11 +630,11 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
return true;
|
||||
} else {
|
||||
if (cnt % 40 == 0) {
|
||||
log.info("Waiting until we see more replicas up for shard {}: total={}"
|
||||
+ " found={}"
|
||||
+ " timeoutin={}ms",
|
||||
shardId, slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size(), found,
|
||||
TimeUnit.MILLISECONDS.convert(timeoutAt - System.nanoTime(), TimeUnit.NANOSECONDS));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Waiting until we see more replicas up for shard {}: total={} found={} timeoute in={}ms"
|
||||
, shardId, slices.getReplicas(EnumSet.of(Replica.Type.TLOG, Replica.Type.NRT)).size(), found,
|
||||
TimeUnit.MILLISECONDS.convert(timeoutAt - System.nanoTime(), TimeUnit.NANOSECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -638,7 +643,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
return false;
|
||||
}
|
||||
} else {
|
||||
log.warn("Shard not found: " + shardId + " for collection " + collection);
|
||||
log.warn("Shard not found: {} for collection {}", shardId, collection);
|
||||
|
||||
return false;
|
||||
|
||||
|
@ -676,13 +681,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
|||
log.debug("All replicas are ready to participate in election.");
|
||||
return true;
|
||||
}
|
||||
|
||||
} else {
|
||||
log.warn("Shard not found: " + shardId + " for collection " + collection);
|
||||
|
||||
log.warn("Shard not found: {} for collection {}", shardId, collection);
|
||||
return false;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -279,12 +279,16 @@ class ExclusiveSliceProperty {
|
|||
}
|
||||
|
||||
private void removeProp(Slice origSlice, String replicaName) {
|
||||
log.debug("Removing property {} from slice {}, replica {}", property, origSlice.getName(), replicaName);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Removing property {} from slice {}, replica {}", property, origSlice.getName(), replicaName);
|
||||
}
|
||||
getReplicaFromChanged(origSlice, replicaName).getProperties().remove(property);
|
||||
}
|
||||
|
||||
private void addProp(Slice origSlice, String replicaName) {
|
||||
log.debug("Adding property {} to slice {}, replica {}", property, origSlice.getName(), replicaName);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Adding property {} to slice {}, replica {}", property, origSlice.getName(), replicaName);
|
||||
}
|
||||
getReplicaFromChanged(origSlice, replicaName).getProperties().put(property, "true");
|
||||
}
|
||||
|
||||
|
|
|
@ -341,7 +341,7 @@ public class LeaderElector {
|
|||
} catch (KeeperException.NoNodeException nne) {
|
||||
// expected . don't do anything
|
||||
} catch (Exception e) {
|
||||
log.warn("My watched node still exists and can't remove " + myNode, e);
|
||||
log.warn("My watched node still exists and can't remove {}", myNode, e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -141,7 +141,7 @@ public class LockTree {
|
|||
void unlock(LockImpl lockObject) {
|
||||
if (myLock == lockObject) myLock = null;
|
||||
else {
|
||||
log.info("Unlocked multiple times : {}", lockObject.toString());
|
||||
log.info("Unlocked multiple times : {}", lockObject);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ public class LockTree {
|
|||
|
||||
void clear() {
|
||||
if (myLock != null) {
|
||||
log.warn("lock_is_leaked at" + constructPath(new LinkedList<>()));
|
||||
log.warn("lock_is_leaked at {}", constructPath(new LinkedList<>()));
|
||||
myLock = null;
|
||||
}
|
||||
for (Node node : children.values()) node.clear();
|
||||
|
|
|
@ -149,7 +149,9 @@ public class Overseer implements SolrCloseable {
|
|||
isLeader = amILeader(); // not a no, not a yes, try ask again
|
||||
}
|
||||
|
||||
log.info("Starting to work on the main queue : {}", LeaderElector.getNodeName(myId));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Starting to work on the main queue : {}", LeaderElector.getNodeName(myId));
|
||||
}
|
||||
try {
|
||||
ZkStateWriter zkStateWriter = null;
|
||||
ClusterState clusterState = null;
|
||||
|
@ -182,7 +184,9 @@ public class Overseer implements SolrCloseable {
|
|||
byte[] data = fallbackQueue.peek();
|
||||
while (fallbackQueueSize > 0 && data != null) {
|
||||
final ZkNodeProps message = ZkNodeProps.load(data);
|
||||
log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("processMessage: fallbackQueueSize: {}, message = {}", fallbackQueue.getZkStats().getQueueLength(), message);
|
||||
}
|
||||
// force flush to ZK after each message because there is no fallback if workQueue items
|
||||
// are removed from workQueue but fail to be written to ZK
|
||||
try {
|
||||
|
@ -239,7 +243,9 @@ public class Overseer implements SolrCloseable {
|
|||
for (Pair<String, byte[]> head : queue) {
|
||||
byte[] data = head.second();
|
||||
final ZkNodeProps message = ZkNodeProps.load(data);
|
||||
log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getZkStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("processMessage: queueSize: {}, message = {} current state version: {}", stateUpdateQueue.getZkStats().getQueueLength(), message, clusterState.getZkClusterStateVersion());
|
||||
}
|
||||
|
||||
processedNodes.add(head.first());
|
||||
fallbackQueueSize = processedNodes.size();
|
||||
|
@ -274,7 +280,9 @@ public class Overseer implements SolrCloseable {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Overseer Loop exiting : {}", LeaderElector.getNodeName(myId));
|
||||
}
|
||||
//do this in a separate thread because any wait is interrupted in this main thread
|
||||
new Thread(this::checkIfIamStillLeader, "OverseerExitThread").start();
|
||||
}
|
||||
|
@ -306,7 +314,7 @@ public class Overseer implements SolrCloseable {
|
|||
// ZooKeeper in which case another Overseer should take over
|
||||
// TODO: if ordering for the message is not important, we could
|
||||
// track retries and put it back on the end of the queue
|
||||
log.error("Overseer could not process the current clusterstate state update message, skipping the message: " + message, e);
|
||||
log.error("Overseer could not process the current clusterstate state update message, skipping the message: {}", message, e);
|
||||
stats.error(operation);
|
||||
} finally {
|
||||
timerContext.stop();
|
||||
|
@ -346,7 +354,7 @@ public class Overseer implements SolrCloseable {
|
|||
} catch (KeeperException.BadVersionException e) {
|
||||
//no problem ignore it some other Overseer has already taken over
|
||||
} catch (Exception e) {
|
||||
log.error("Could not delete my leader node "+path, e);
|
||||
log.error("Could not delete my leader node {}", path, e);
|
||||
}
|
||||
|
||||
} else{
|
||||
|
@ -419,7 +427,9 @@ public class Overseer implements SolrCloseable {
|
|||
return Collections.singletonList(new SliceMutator(getSolrCloudManager()).updateShardState(clusterState, message));
|
||||
case QUIT:
|
||||
if (myId.equals(message.get(ID))) {
|
||||
log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
|
||||
}
|
||||
overseerCollectionConfigSetProcessor.close();
|
||||
close();
|
||||
} else {
|
||||
|
@ -562,7 +572,7 @@ public class Overseer implements SolrCloseable {
|
|||
closed = false;
|
||||
doClose();
|
||||
stats = new Stats();
|
||||
log.info("Overseer (id=" + id + ") starting");
|
||||
log.info("Overseer (id={}) starting", id);
|
||||
createOverseerNode(reader.getZkClient());
|
||||
//launch cluster state updater thread
|
||||
ThreadGroup tg = new ThreadGroup("Overseer state updater.");
|
||||
|
@ -745,7 +755,7 @@ public class Overseer implements SolrCloseable {
|
|||
|
||||
public synchronized void close() {
|
||||
if (this.id != null) {
|
||||
log.info("Overseer (id=" + id + ") closing");
|
||||
log.info("Overseer (id={}) closing", id);
|
||||
}
|
||||
this.closed = true;
|
||||
doClose();
|
||||
|
|
|
@ -98,7 +98,7 @@ public class OverseerConfigSetMessageHandler implements OverseerMessageHandler {
|
|||
+ " expected: " + CONFIGSETS_ACTION_PREFIX);
|
||||
}
|
||||
operation = operation.substring(CONFIGSETS_ACTION_PREFIX.length());
|
||||
log.info("OverseerConfigSetMessageHandler.processMessage : " + operation + " , " + message.toString());
|
||||
log.info("OverseerConfigSetMessageHandler.processMessage : {}, {}", operation, message);
|
||||
|
||||
ConfigSetParams.ConfigSetAction action = ConfigSetParams.ConfigSetAction.get(operation);
|
||||
if (action == null) {
|
||||
|
|
|
@ -92,7 +92,9 @@ public class OverseerNodePrioritizer {
|
|||
if(!designateNodeId.equals( electionNodes.get(1))) { //checking if it is already at no:1
|
||||
log.info("asking node {} to come join election at head", designateNodeId);
|
||||
invokeOverseerOp(designateNodeId, "rejoinAtHead"); //ask designate to come first
|
||||
log.info("asking the old first in line {} to rejoin election ",electionNodes.get(1) );
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("asking the old first in line {} to rejoin election ", electionNodes.get(1));
|
||||
}
|
||||
invokeOverseerOp(electionNodes.get(1), "rejoin");//ask second inline to go behind
|
||||
}
|
||||
//now ask the current leader to QUIT , so that the designate can takeover
|
||||
|
|
|
@ -208,7 +208,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
continue; // not a no, not a yes, try asking again
|
||||
}
|
||||
|
||||
log.debug("Cleaning up work-queue. #Running tasks: {} #Completed tasks: {}", runningTasksSize(), completedTasks.size());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Cleaning up work-queue. #Running tasks: {} #Completed tasks: {}", runningTasksSize(), completedTasks.size());
|
||||
}
|
||||
cleanUpWorkQueue();
|
||||
|
||||
printTrackingMaps();
|
||||
|
@ -236,7 +238,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
//instead of reading MAX_PARALLEL_TASKS items always, we should only fetch as much as we can execute
|
||||
int toFetch = Math.min(MAX_BLOCKED_TASKS - heads.size(), MAX_PARALLEL_TASKS - runningTasksSize());
|
||||
List<QueueEvent> newTasks = workQueue.peekTopN(toFetch, excludedTasks, 2000L);
|
||||
log.debug("Got {} tasks from work-queue : [{}]", newTasks.size(), newTasks);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Got {} tasks from work-queue : [{}]", newTasks.size(), newTasks);
|
||||
}
|
||||
heads.addAll(newTasks);
|
||||
} else {
|
||||
// Prevent free-spinning this loop.
|
||||
|
@ -281,14 +285,16 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
}
|
||||
String operation = message.getStr(Overseer.QUEUE_OPERATION);
|
||||
if (operation == null) {
|
||||
log.error("Msg does not have required " + Overseer.QUEUE_OPERATION + ": {}", message);
|
||||
log.error("Msg does not have required {} : {}", Overseer.QUEUE_OPERATION, message);
|
||||
workQueue.remove(head);
|
||||
continue;
|
||||
}
|
||||
OverseerMessageHandler messageHandler = selector.selectOverseerMessageHandler(message);
|
||||
OverseerMessageHandler.Lock lock = messageHandler.lockTask(message, taskBatch);
|
||||
if (lock == null) {
|
||||
log.debug("Exclusivity check failed for [{}]", message.toString());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Exclusivity check failed for [{}]", message);
|
||||
}
|
||||
//we may end crossing the size of the MAX_BLOCKED_TASKS. They are fine
|
||||
if (blockedTasks.size() < MAX_BLOCKED_TASKS)
|
||||
blockedTasks.put(head.getId(), head);
|
||||
|
@ -296,7 +302,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
}
|
||||
try {
|
||||
markTaskAsRunning(head, asyncId);
|
||||
log.debug("Marked task [{}] as running", head.getId());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Marked task [{}] as running", head.getId());
|
||||
}
|
||||
} catch (KeeperException.NodeExistsException e) {
|
||||
lock.unlock();
|
||||
// This should never happen
|
||||
|
@ -308,7 +316,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
Thread.currentThread().interrupt();
|
||||
continue;
|
||||
}
|
||||
log.debug(messageHandler.getName() + ": Get the message id:" + head.getId() + " message:" + message.toString());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{}: Get the message id: {} message: {}", messageHandler.getName(), head.getId(), message);
|
||||
}
|
||||
Runner runner = new Runner(messageHandler, message,
|
||||
operation, head, lock);
|
||||
tpe.execute(runner);
|
||||
|
@ -500,7 +510,9 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
|
||||
try {
|
||||
try {
|
||||
log.debug("Runner processing {}", head.getId());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Runner processing {}", head.getId());
|
||||
}
|
||||
response = messageHandler.processMessage(message, operation);
|
||||
} finally {
|
||||
timerContext.stop();
|
||||
|
@ -511,22 +523,31 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
if (response != null && (response.getResponse().get("failure") != null
|
||||
|| response.getResponse().get("exception") != null)) {
|
||||
failureMap.put(asyncId, OverseerSolrResponseSerializer.serialize(response));
|
||||
log.debug("Updated failed map for task with zkid:[{}]", head.getId());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Updated failed map for task with zkid:[{}]", head.getId());
|
||||
}
|
||||
} else {
|
||||
completedMap.put(asyncId, OverseerSolrResponseSerializer.serialize(response));
|
||||
log.debug("Updated completed map for task with zkid:[{}]", head.getId());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Updated completed map for task with zkid:[{}]", head.getId());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
head.setBytes(OverseerSolrResponseSerializer.serialize(response));
|
||||
log.debug("Completed task:[{}]", head.getId());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Completed task:[{}]", head.getId());
|
||||
}
|
||||
}
|
||||
|
||||
markTaskComplete(head.getId(), asyncId);
|
||||
log.debug("Marked task [{}] as completed.", head.getId());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Marked task [{}] as completed.", head.getId());
|
||||
}
|
||||
printTrackingMaps();
|
||||
|
||||
log.debug(messageHandler.getName() + ": Message id:" + head.getId() +
|
||||
" complete, response:" + response.getResponse().toString());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{}: Message id: {} complete, response: {}", messageHandler.getName(), head.getId(), response.getResponse());
|
||||
}
|
||||
success = true;
|
||||
} catch (AlreadyClosedException e) {
|
||||
|
||||
|
@ -565,7 +586,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
|
||||
if (asyncId != null) {
|
||||
if (!runningMap.remove(asyncId)) {
|
||||
log.warn("Could not find and remove async call [" + asyncId + "] from the running map.");
|
||||
log.warn("Could not find and remove async call [{}] from the running map.", asyncId );
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -577,7 +598,7 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
try {
|
||||
if (asyncId != null) {
|
||||
if (!runningMap.remove(asyncId)) {
|
||||
log.warn("Could not find and remove async call [" + asyncId + "] from the running map.");
|
||||
log.warn("Could not find and remove async call [{}] from the running map.", asyncId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -612,14 +633,18 @@ public class OverseerTaskProcessor implements Runnable, Closeable {
|
|||
private void printTrackingMaps() {
|
||||
if (log.isDebugEnabled()) {
|
||||
synchronized (runningTasks) {
|
||||
log.debug("RunningTasks: {}", runningTasks.toString());
|
||||
log.debug("RunningTasks: {}", runningTasks);
|
||||
}
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("BlockedTasks: {}", blockedTasks.keySet());
|
||||
}
|
||||
log.debug("BlockedTasks: {}", blockedTasks.keySet().toString());
|
||||
synchronized (completedTasks) {
|
||||
log.debug("CompletedTasks: {}", completedTasks.keySet().toString());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("CompletedTasks: {}", completedTasks.keySet());
|
||||
}
|
||||
}
|
||||
synchronized (runningZKTasks) {
|
||||
log.info("RunningZKTasks: {}", runningZKTasks.toString());
|
||||
log.info("RunningZKTasks: {}", runningZKTasks);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,7 +88,9 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
if (data != null) {
|
||||
ZkNodeProps message = ZkNodeProps.load(data);
|
||||
if (message.containsKey(requestIdKey)) {
|
||||
log.debug("Looking for {}, found {}", message.get(requestIdKey), requestId);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Looking for {}, found {}", message.get(requestIdKey), requestId);
|
||||
}
|
||||
if(message.get(requestIdKey).equals(requestId)) return true;
|
||||
}
|
||||
}
|
||||
|
@ -116,8 +118,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
zookeeper.setData(responsePath, event.getBytes(), true);
|
||||
} catch (KeeperException.NoNodeException ignored) {
|
||||
// we must handle the race case where the node no longer exists
|
||||
log.info("Response ZK path: " + responsePath + " doesn't exist."
|
||||
+ " Requestor may have disconnected from ZooKeeper");
|
||||
log.info("Response ZK path: {} doesn't exist. Requestor may have disconnected from ZooKeeper", responsePath);
|
||||
}
|
||||
try {
|
||||
zookeeper.delete(path, -1, true);
|
||||
|
@ -156,7 +157,9 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
return;
|
||||
}
|
||||
// If latchEventType is not null, only fire if the type matches
|
||||
log.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
|
||||
}
|
||||
if (latchEventType == null || event.getType() == latchEventType) {
|
||||
lock.lock();
|
||||
try {
|
||||
|
@ -283,7 +286,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
sb.append(queueEvent.getId()).append(", ");
|
||||
}
|
||||
sb.append("]");
|
||||
log.debug("Returning topN elements: {}", sb.toString());
|
||||
log.debug("Returning topN elements: {}", sb);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,9 @@ public class RecoveringCoreTermWatcher implements ZkShardTerms.CoreTermWatcher {
|
|||
solrCore.getUpdateHandler().getSolrCoreState().doRecovery(solrCore.getCoreContainer(), solrCore.getCoreDescriptor());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.info("Failed to watch term of core {}", coreDescriptor.getName(), e);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Failed to watch term of core {}", coreDescriptor.getName(), e);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -262,18 +262,17 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
|
||||
try {
|
||||
final IndexCommit commit = core.getDeletionPolicy().getLatestCommit();
|
||||
log.debug(core.getCoreContainer()
|
||||
.getZkController().getNodeName()
|
||||
+ " replicated "
|
||||
+ searcher.count(new MatchAllDocsQuery())
|
||||
+ " from "
|
||||
+ leaderUrl
|
||||
+ " gen:"
|
||||
+ (null == commit ? "null" : commit.getGeneration())
|
||||
+ " data:" + core.getDataDir()
|
||||
+ " index:" + core.getIndexDir()
|
||||
+ " newIndex:" + core.getNewIndexDir()
|
||||
+ " files:" + Arrays.asList(dir.listAll()));
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{} replicated {} from {} gen: {} data: {} index: {} newIndex: {} files: {}"
|
||||
, core.getCoreContainer().getZkController().getNodeName()
|
||||
, searcher.count(new MatchAllDocsQuery())
|
||||
, leaderUrl
|
||||
, (null == commit ? "null" : commit.getGeneration())
|
||||
, core.getDataDir()
|
||||
, core.getIndexDir()
|
||||
, core.getNewIndexDir()
|
||||
, Arrays.asList(dir.listAll()));
|
||||
}
|
||||
} finally {
|
||||
core.getDirectoryFactory().release(dir);
|
||||
searchHolder.decref();
|
||||
|
@ -310,7 +309,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
return;
|
||||
}
|
||||
|
||||
log.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup);
|
||||
log.info("Starting recovery process. recoveringAfterStartup={}", recoveringAfterStartup);
|
||||
|
||||
try {
|
||||
doRecovery(core);
|
||||
|
@ -373,12 +372,16 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
return;
|
||||
}
|
||||
|
||||
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
|
||||
ourUrl);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
|
||||
ourUrl);
|
||||
}
|
||||
zkController.publish(this.coreDescriptor, Replica.State.RECOVERING);
|
||||
|
||||
if (isClosed()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
}
|
||||
break;
|
||||
}
|
||||
log.info("Starting Replication Recovery.");
|
||||
|
@ -389,7 +392,9 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
replicate(zkController.getNodeName(), core, leaderprops);
|
||||
|
||||
if (isClosed()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -427,11 +432,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
try {
|
||||
|
||||
if (isClosed()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
log.error("Recovery failed - trying again... (" + retries + ")");
|
||||
log.error("Recovery failed - trying again... ({})", retries);
|
||||
|
||||
retries++;
|
||||
if (retries >= maxRetries) {
|
||||
|
@ -453,11 +460,15 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
|
||||
// order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
|
||||
int loopCount = retries < 4 ? (int) Math.min(Math.pow(2, retries), 12) : 12;
|
||||
log.info("Wait [{}] seconds before trying to recover again (attempt={})",
|
||||
TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Wait [{}] seconds before trying to recover again (attempt={})",
|
||||
TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries);
|
||||
}
|
||||
for (int i = 0; i < loopCount; i++) {
|
||||
if (isClosed()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
}
|
||||
break; // check if someone closed us
|
||||
}
|
||||
Thread.sleep(startingRecoveryDelayMilliSeconds);
|
||||
|
@ -471,7 +482,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
|
||||
}
|
||||
// We skip core.seedVersionBuckets(); We don't have a transaction log
|
||||
log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
|
||||
log.info("Finished recovery process, successful=[{}]", successfulRecovery);
|
||||
}
|
||||
|
||||
// TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
|
||||
|
@ -511,15 +522,19 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
|
||||
if (oldIdx > 0) {
|
||||
log.info("Found new versions added after startup: num=[{}]", oldIdx);
|
||||
log.info("currentVersions size={} range=[{} to {}]", recentVersions.size(), recentVersions.get(0),
|
||||
recentVersions.get(recentVersions.size() - 1));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("currentVersions size={} range=[{} to {}]", recentVersions.size(), recentVersions.get(0),
|
||||
recentVersions.get(recentVersions.size() - 1));
|
||||
}
|
||||
}
|
||||
|
||||
if (startingVersions.isEmpty()) {
|
||||
log.info("startupVersions is empty");
|
||||
} else {
|
||||
log.info("startupVersions size={} range=[{} to {}]", startingVersions.size(), startingVersions.get(0),
|
||||
startingVersions.get(startingVersions.size() - 1));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("startupVersions size={} range=[{} to {}]", startingVersions.size(), startingVersions.get(0),
|
||||
startingVersions.get(startingVersions.size() - 1));
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
SolrException.log(log, "Error getting recent versions.", e);
|
||||
|
@ -578,9 +593,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// recalling buffer updates will drop the old buffer tlog
|
||||
ulog.bufferUpdates();
|
||||
|
||||
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(),
|
||||
leader.getCoreUrl(),
|
||||
ourUrl);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(),
|
||||
leader.getCoreUrl(),
|
||||
ourUrl);
|
||||
}
|
||||
zkController.publish(this.coreDescriptor, Replica.State.RECOVERING);
|
||||
|
||||
final Slice slice = zkStateReader.getClusterState().getCollection(cloudDesc.getCollectionName())
|
||||
|
@ -618,8 +635,10 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// first thing we just try to sync
|
||||
if (firstTime) {
|
||||
firstTime = false; // only try sync the first time through the loop
|
||||
log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(),
|
||||
recoveringAfterStartup);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(),
|
||||
recoveringAfterStartup);
|
||||
}
|
||||
// System.out.println("Attempting to PeerSync from " + leaderUrl
|
||||
// + " i am:" + zkController.getNodeName());
|
||||
PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
|
||||
|
@ -713,7 +732,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
break;
|
||||
}
|
||||
|
||||
log.error("Recovery failed - trying again... (" + retries + ")");
|
||||
log.error("Recovery failed - trying again... ({})", retries);
|
||||
|
||||
retries++;
|
||||
if (retries >= maxRetries) {
|
||||
|
@ -759,7 +778,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
core.seedVersionBuckets();
|
||||
}
|
||||
|
||||
log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
|
||||
log.info("Finished recovery process, successful=[{}]", successfulRecovery);
|
||||
}
|
||||
|
||||
private final Replica pingLeader(String ourUrl, CoreDescriptor coreDesc, boolean mayPutReplicaAsDown)
|
||||
|
@ -894,7 +913,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd);
|
||||
prevSendPreRecoveryHttpUriRequest = mrr.httpUriRequest;
|
||||
|
||||
log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd.toString());
|
||||
log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd);
|
||||
|
||||
mrr.future.get();
|
||||
}
|
||||
|
|
|
@ -129,9 +129,13 @@ public class SolrZkServer {
|
|||
};
|
||||
|
||||
if (zkProps.getServers().size() > 1) {
|
||||
log.info("STARTING EMBEDDED ENSEMBLE ZOOKEEPER SERVER at port " + zkProps.getClientPortAddress().getPort());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("STARTING EMBEDDED ENSEMBLE ZOOKEEPER SERVER at port {}", zkProps.getClientPortAddress().getPort());
|
||||
}
|
||||
} else {
|
||||
log.info("STARTING EMBEDDED STANDALONE ZOOKEEPER SERVER at port " + zkProps.getClientPortAddress().getPort());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("STARTING EMBEDDED STANDALONE ZOOKEEPER SERVER at port {}", zkProps.getClientPortAddress().getPort());
|
||||
}
|
||||
}
|
||||
|
||||
log.warn("Embedded Zookeeper is not recommended in production environments. See Reference Guide for details.");
|
||||
|
@ -171,7 +175,7 @@ class SolrZkServerProps extends QuorumPeerConfig {
|
|||
public static Properties getProperties(String path) throws ConfigException {
|
||||
File configFile = new File(path);
|
||||
|
||||
log.info("Reading configuration from: " + configFile);
|
||||
log.info("Reading configuration from: {}", configFile);
|
||||
|
||||
try {
|
||||
if (!configFile.exists()) {
|
||||
|
|
|
@ -96,7 +96,9 @@ public class SyncStrategy {
|
|||
|
||||
recoveryRequests.clear();
|
||||
|
||||
log.info("Sync replicas to " + ZkCoreNodeProps.getCoreUrl(leaderProps));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Sync replicas to {}", ZkCoreNodeProps.getCoreUrl(leaderProps));
|
||||
}
|
||||
|
||||
if (core.getUpdateHandler().getUpdateLog() == null) {
|
||||
log.error("No UpdateLog found - cannot sync");
|
||||
|
@ -197,14 +199,18 @@ public class SyncStrategy {
|
|||
.getReplicaProps(collection, shardId,
|
||||
cd.getCloudDescriptor().getCoreNodeName());
|
||||
if (nodes == null) {
|
||||
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + " has no replicas");
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("{} has no replicas", ZkCoreNodeProps.getCoreUrl(leaderProps));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
ZkCoreNodeProps zkLeader = new ZkCoreNodeProps(leaderProps);
|
||||
for (ZkCoreNodeProps node : nodes) {
|
||||
try {
|
||||
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": try and ask " + node.getCoreUrl() + " to sync");
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("{}: try and ask {} to sync", ZkCoreNodeProps.getCoreUrl(leaderProps), node.getCoreUrl());
|
||||
}
|
||||
|
||||
requestSync(node.getBaseUrl(), node.getCoreUrl(), zkLeader.getCoreUrl(), node.getCoreName(), nUpdates);
|
||||
|
||||
|
@ -223,8 +229,10 @@ public class SyncStrategy {
|
|||
}
|
||||
|
||||
if (!success) {
|
||||
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": Sync failed - we will ask replica (" + srsp.getShardAddress()
|
||||
+ ") to recover.");
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("{}: Sync failed - we will ask replica ({}) to recover."
|
||||
, ZkCoreNodeProps.getCoreUrl(leaderProps), srsp.getShardAddress());
|
||||
}
|
||||
if (isClosed) {
|
||||
log.info("We have been closed, don't request that a replica recover");
|
||||
} else {
|
||||
|
@ -235,7 +243,9 @@ public class SyncStrategy {
|
|||
recoveryRequests.add(rr);
|
||||
}
|
||||
} else {
|
||||
log.info(ZkCoreNodeProps.getCoreUrl(leaderProps) + ": " + " sync completed with " + srsp.getShardAddress());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("{}: sync completed with {}", ZkCoreNodeProps.getCoreUrl(leaderProps), srsp.getShardAddress());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -266,7 +266,9 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
|
||||
public Object call() throws Exception {
|
||||
log.info("Registering core {} afterExpiration? {}", descriptor.getName(), afterExpiration);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Registering core {} afterExpiration? {}", descriptor.getName(), afterExpiration);
|
||||
}
|
||||
register(descriptor.getName(), descriptor, recoverReloadedCores, afterExpiration, false);
|
||||
return descriptor;
|
||||
}
|
||||
|
@ -432,7 +434,7 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
} catch (Exception exc) {
|
||||
// not much we can do here other than warn in the log
|
||||
log.warn("Error when notifying OnReconnect listener " + listener + " after session re-connected.", exc);
|
||||
log.warn("Error when notifying OnReconnect listener {} after session re-connected.", listener, exc);
|
||||
}
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -767,7 +769,7 @@ public class ZkController implements Closeable {
|
|||
String zkPath = ZkConfigManager.CONFIGS_ZKNODE + "/" + zkConfigName + "/" + fileName;
|
||||
byte[] bytes = zkClient.getData(zkPath, null, null, true);
|
||||
if (bytes == null) {
|
||||
log.error("Config file contains no data:" + zkPath);
|
||||
log.error("Config file contains no data:{}", zkPath);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Config file contains no data:" + zkPath);
|
||||
}
|
||||
|
@ -868,8 +870,9 @@ public class ZkController implements Closeable {
|
|||
if (zkClient.exists("/configs/_default", true) == false) {
|
||||
String configDirPath = getDefaultConfigDirPath();
|
||||
if (configDirPath == null) {
|
||||
log.warn("The _default configset could not be uploaded. Please provide 'solr.default.confdir' parameter that points to a configset" +
|
||||
" intended to be the default. Current 'solr.default.confdir' value: {}", System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE));
|
||||
log.warn("The _default configset could not be uploaded. Please provide 'solr.default.confdir' parameter that points to a configset {} {}"
|
||||
, "intended to be the default. Current 'solr.default.confdir' value:"
|
||||
, System.getProperty(SolrDispatchFilter.SOLR_DEFAULT_CONFDIR_ATTRIBUTE));
|
||||
} else {
|
||||
ZkMaintenanceUtils.upConfig(zkClient, Paths.get(configDirPath), ConfigSetsHandlerApi.DEFAULT_CONFIGSET_NAME);
|
||||
}
|
||||
|
@ -1019,7 +1022,7 @@ public class ZkController implements Closeable {
|
|||
} catch (KeeperException.NodeExistsException e) {
|
||||
// someone else already created this node - ignore
|
||||
} catch (KeeperException | InterruptedException e1) {
|
||||
log.warn("Unable to register nodeLost path for " + n, e1);
|
||||
log.warn("Unable to register nodeLost path for {}", n, e1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1109,7 +1112,7 @@ public class ZkController implements Closeable {
|
|||
String nodeName = getNodeName();
|
||||
String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
|
||||
String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
|
||||
log.info("Register node as live in ZooKeeper:" + nodePath);
|
||||
log.info("Register node as live in ZooKeeper:{}", nodePath);
|
||||
List<Op> ops = new ArrayList<>(2);
|
||||
ops.add(Op.create(nodePath, null, zkClient.getZkACLProvider().getACLsToAdd(nodePath), CreateMode.EPHEMERAL));
|
||||
// if there are nodeAdded triggers don't create nodeAdded markers
|
||||
|
@ -1130,7 +1133,7 @@ public class ZkController implements Closeable {
|
|||
String nodeName = getNodeName();
|
||||
String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
|
||||
String nodeAddedPath = ZkStateReader.SOLR_AUTOSCALING_NODE_ADDED_PATH + "/" + nodeName;
|
||||
log.info("Remove node as live in ZooKeeper:" + nodePath);
|
||||
log.info("Remove node as live in ZooKeeper:{}", nodePath);
|
||||
List<Op> ops = new ArrayList<>(2);
|
||||
ops.add(Op.delete(nodePath, -1));
|
||||
ops.add(Op.delete(nodeAddedPath, -1));
|
||||
|
@ -1230,7 +1233,7 @@ public class ZkController implements Closeable {
|
|||
String leaderUrl = getLeader(cloudDesc, leaderVoteWait + 600000);
|
||||
|
||||
String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
|
||||
log.debug("We are " + ourUrl + " and leader is " + leaderUrl);
|
||||
log.debug("We are {} and leader is {}", ourUrl, leaderUrl);
|
||||
boolean isLeader = leaderUrl.equals(ourUrl);
|
||||
assert !(isLeader && replica.getType() == Type.PULL) : "Pull replica became leader!";
|
||||
|
||||
|
@ -1261,13 +1264,15 @@ public class ZkController implements Closeable {
|
|||
if (slice.getState() != Slice.State.CONSTRUCTION || !isLeader) {
|
||||
Future<UpdateLog.RecoveryInfo> recoveryFuture = core.getUpdateHandler().getUpdateLog().recoverFromLog();
|
||||
if (recoveryFuture != null) {
|
||||
log.info("Replaying tlog for " + ourUrl + " during startup... NOTE: This can take a while.");
|
||||
log.info("Replaying tlog for {} during startup... NOTE: This can take a while.", ourUrl);
|
||||
recoveryFuture.get(); // NOTE: this could potentially block for
|
||||
// minutes or more!
|
||||
// TODO: public as recovering in the mean time?
|
||||
// TODO: in the future we could do peersync in parallel with recoverFromLog
|
||||
} else {
|
||||
log.debug("No LogReplay needed for core={} baseURL={}", core.getName(), baseUrl);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("No LogReplay needed for core={} baseURL={}", core.getName(), baseUrl);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1490,14 +1495,18 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
|
||||
if (doRecovery) {
|
||||
log.info("Core needs to recover:" + core.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Core needs to recover:{}", core.getName());
|
||||
}
|
||||
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
|
||||
return true;
|
||||
}
|
||||
|
||||
ZkShardTerms zkShardTerms = getShardTerms(collection, shardId);
|
||||
if (zkShardTerms.registered(coreZkNodeName) && !zkShardTerms.canBecomeLeader(coreZkNodeName)) {
|
||||
log.info("Leader's term larger than core " + core.getName() + "; starting recovery process");
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Leader's term larger than core {}; starting recovery process", core.getName());
|
||||
}
|
||||
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getCoreDescriptor());
|
||||
return true;
|
||||
}
|
||||
|
@ -1532,7 +1541,7 @@ public class ZkController implements Closeable {
|
|||
try {
|
||||
String collection = cd.getCloudDescriptor().getCollectionName();
|
||||
|
||||
log.debug("publishing state={}", state.toString());
|
||||
log.debug("publishing state={}", state);
|
||||
// System.out.println(Thread.currentThread().getStackTrace()[3]);
|
||||
Integer numShards = cd.getCloudDescriptor().getNumShards();
|
||||
if (numShards == null) { // XXX sys prop hack
|
||||
|
@ -1581,7 +1590,9 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
} catch (SolrCoreInitializationException ex) {
|
||||
// The core had failed to initialize (in a previous request, not this one), hence nothing to do here.
|
||||
log.info("The core '{}' had failed to initialize before.", cd.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("The core '{}' had failed to initialize before.", cd.getName());
|
||||
}
|
||||
}
|
||||
|
||||
// pull replicas are excluded because their terms are not considered
|
||||
|
@ -1719,7 +1730,9 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
|
||||
private void waitForShardId(CoreDescriptor cd) {
|
||||
log.debug("waiting to find shard id in clusterstate for " + cd.getName());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("waiting to find shard id in clusterstate for {}", cd.getName());
|
||||
}
|
||||
int retryCount = 320;
|
||||
while (retryCount-- > 0) {
|
||||
final String shardId = zkStateReader.getClusterState().getShardId(cd.getCollectionName(), getNodeName(), cd.getName());
|
||||
|
@ -1908,11 +1921,13 @@ public class ZkController implements Closeable {
|
|||
boolean isLeader = leaderProps.getCoreUrl().equals(ourUrl);
|
||||
if (!isLeader && !SKIP_AUTO_RECOVERY) {
|
||||
if (!getShardTerms(collection, shard).canBecomeLeader(myCoreNodeName)) {
|
||||
log.debug("Term of replica " + myCoreNodeName +
|
||||
" is already less than leader, so not waiting for leader to see down state.");
|
||||
log.debug("Term of replica {} is already less than leader, so not waiting for leader to see down state."
|
||||
, myCoreNodeName);
|
||||
} else {
|
||||
|
||||
log.info("replica={} is making a best effort attempt to wait for leader={} to see it's DOWN state.", myCoreNodeName, leaderProps.getCoreUrl());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("replica={} is making a best effort attempt to wait for leader={} to see it's DOWN state.", myCoreNodeName, leaderProps.getCoreUrl());
|
||||
}
|
||||
|
||||
try (HttpSolrClient client = new Builder(leaderBaseUrl)
|
||||
.withConnectionTimeout(8000) // short timeouts, we may be in a storm and this is best effort and maybe we should be the leader now
|
||||
|
@ -1975,7 +1990,7 @@ public class ZkController implements Closeable {
|
|||
|
||||
public static void linkConfSet(SolrZkClient zkClient, String collection, String confSetName) throws KeeperException, InterruptedException {
|
||||
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
|
||||
log.debug("Load collection config from:" + path);
|
||||
log.debug("Load collection config from:{}", path);
|
||||
byte[] data;
|
||||
try {
|
||||
data = zkClient.getData(path, null, null, true);
|
||||
|
@ -2024,7 +2039,9 @@ public class ZkController implements Closeable {
|
|||
//List<String> allCoreNames = cfg.getAllCoreNames();
|
||||
List<CoreDescriptor> cds = cc.getCoresLocator().discover(cc);
|
||||
|
||||
log.info("bootstrapping config for " + cds.size() + " cores into ZooKeeper using solr.xml from " + cc.getSolrHome());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("bootstrapping config for {} cores into ZooKeeper using solr.xml from {}", cds.size(), cc.getSolrHome());
|
||||
}
|
||||
|
||||
for (CoreDescriptor cd : cds) {
|
||||
String coreName = cd.getName();
|
||||
|
@ -2032,7 +2049,7 @@ public class ZkController implements Closeable {
|
|||
if (StringUtils.isEmpty(confName))
|
||||
confName = coreName;
|
||||
Path udir = cd.getInstanceDir().resolve("conf");
|
||||
log.info("Uploading directory " + udir + " with name " + confName + " for SolrCore " + coreName);
|
||||
log.info("Uploading directory {} with name {} for solrCore {}", udir, confName, coreName);
|
||||
configManager.uploadConfigDir(udir, confName);
|
||||
}
|
||||
}
|
||||
|
@ -2078,7 +2095,7 @@ public class ZkController implements Closeable {
|
|||
try {
|
||||
return asyncIdsMap.putIfAbsent(asyncId, new byte[0]);
|
||||
} catch (InterruptedException e) {
|
||||
log.error("Could not claim asyncId=" + asyncId, e);
|
||||
log.error("Could not claim asyncId={}", asyncId, e);
|
||||
Thread.currentThread().interrupt();
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
@ -2094,7 +2111,7 @@ public class ZkController implements Closeable {
|
|||
try {
|
||||
return asyncIdsMap.remove(asyncId);
|
||||
} catch (InterruptedException e) {
|
||||
log.error("Could not release asyncId=" + asyncId, e);
|
||||
log.error("Could not release asyncId={}", asyncId, e);
|
||||
Thread.currentThread().interrupt();
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
@ -2265,7 +2282,7 @@ public class ZkController implements Closeable {
|
|||
if (listener != null) {
|
||||
synchronized (reconnectListeners) {
|
||||
reconnectListeners.add(listener);
|
||||
log.debug("Added new OnReconnect listener "+listener);
|
||||
log.debug("Added new OnReconnect listener {}", listener);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2280,10 +2297,11 @@ public class ZkController implements Closeable {
|
|||
wasRemoved = reconnectListeners.remove(listener);
|
||||
}
|
||||
if (wasRemoved) {
|
||||
log.debug("Removed OnReconnect listener "+listener);
|
||||
log.debug("Removed OnReconnect listener {}", listener);
|
||||
} else {
|
||||
log.warn("Was asked to remove OnReconnect listener "+listener+
|
||||
", but remove operation did not find it in the list of registered listeners.");
|
||||
log.warn("Was asked to remove OnReconnect listener {}{}"
|
||||
, listener
|
||||
, ", but remove operation did not find it in the list of registered listeners.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2324,12 +2342,16 @@ public class ZkController implements Closeable {
|
|||
} catch (KeeperException.NodeExistsException nee) {
|
||||
try {
|
||||
Stat stat = zkClient.exists(resourceLocation, null, true);
|
||||
log.debug("failed to set data version in zk is {} and expected version is {} ", stat.getVersion(), znodeVersion);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("failed to set data version in zk is {} and expected version is {} ", stat.getVersion(), znodeVersion);
|
||||
}
|
||||
} catch (Exception e1) {
|
||||
log.warn("could not get stat");
|
||||
}
|
||||
|
||||
log.info(StrUtils.formatString(errMsg, resourceLocation, znodeVersion));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info(StrUtils.formatString(errMsg, resourceLocation, znodeVersion));
|
||||
}
|
||||
throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry.");
|
||||
}
|
||||
}
|
||||
|
@ -2344,7 +2366,9 @@ public class ZkController implements Closeable {
|
|||
log.error(e.getMessage());
|
||||
|
||||
}
|
||||
log.info(StrUtils.formatString(errMsg + " zkVersion= " + v, resourceLocation, znodeVersion));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info(StrUtils.formatString("%s zkVersion= %d %s %d", errMsg, resourceLocation, znodeVersion));
|
||||
}
|
||||
throw new ResourceModifiedInZkException(ErrorCode.CONFLICT, StrUtils.formatString(errMsg, resourceLocation, znodeVersion) + ", retry.");
|
||||
} catch (ResourceModifiedInZkException e) {
|
||||
throw e;
|
||||
|
@ -2384,7 +2408,7 @@ public class ZkController implements Closeable {
|
|||
synchronized (confDirectoryListeners) {
|
||||
final Set<Runnable> listeners = confDirectoryListeners.get(confDir);
|
||||
if (listeners == null) {
|
||||
log.warn(confDir + " has no more registered listeners, but a live one attempted to unregister!");
|
||||
log.warn("{} has no more registered listeners, but a live one attempted to unregister!", confDir);
|
||||
return;
|
||||
}
|
||||
if (listeners.remove(listener)) {
|
||||
|
@ -2627,7 +2651,7 @@ public class ZkController implements Closeable {
|
|||
Thread.currentThread().interrupt();
|
||||
log.debug("Publish node as down was interrupted.");
|
||||
} catch (KeeperException e) {
|
||||
log.warn("Could not publish node as down: " + e.getMessage());
|
||||
log.warn("Could not publish node as down: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2638,31 +2662,41 @@ public class ZkController implements Closeable {
|
|||
if (!core.getSolrConfig().useColdSearcher) {
|
||||
RefCounted<SolrIndexSearcher> registeredSearcher = core.getRegisteredSearcher();
|
||||
if (registeredSearcher != null) {
|
||||
log.debug("Found a registered searcher: {} for core: {}", registeredSearcher.get(), core);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Found a registered searcher: {} for core: {}", registeredSearcher.get(), core);
|
||||
}
|
||||
registeredSearcher.decref();
|
||||
} else {
|
||||
Future[] waitSearcher = new Future[1];
|
||||
log.info("No registered searcher found for core: {}, waiting until a searcher is registered before publishing as active", core.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("No registered searcher found for core: {}, waiting until a searcher is registered before publishing as active", core.getName());
|
||||
}
|
||||
final RTimer timer = new RTimer();
|
||||
RefCounted<SolrIndexSearcher> searcher = null;
|
||||
try {
|
||||
searcher = core.getSearcher(false, true, waitSearcher, true);
|
||||
boolean success = true;
|
||||
if (waitSearcher[0] != null) {
|
||||
log.debug("Waiting for first searcher of core {}, id: {} to be registered", core.getName(), core);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Waiting for first searcher of core {}, id: {} to be registered", core.getName(), core);
|
||||
}
|
||||
try {
|
||||
waitSearcher[0].get();
|
||||
} catch (ExecutionException e) {
|
||||
log.warn("Wait for a searcher to be registered for core " + core.getName() + ",id: " + core + " failed due to: " + e, e);
|
||||
log.warn("Wait for a searcher to be registered for core {}, id: {} failed due to: {}", core.getName(), core, e, e);
|
||||
success = false;
|
||||
}
|
||||
}
|
||||
if (success) {
|
||||
if (searcher == null) {
|
||||
// should never happen
|
||||
log.debug("Did not find a searcher even after the future callback for core: {}, id: {}!!!", core.getName(), core);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Did not find a searcher even after the future callback for core: {}, id: {}!!!", core.getName(), core);
|
||||
}
|
||||
} else {
|
||||
log.info("Found a registered searcher: {}, took: {} ms for core: {}, id: {}", searcher.get(), timer.getTime(), core.getName(), core);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Found a registered searcher: {}, took: {} ms for core: {}, id: {}", searcher.get(), timer.getTime(), core.getName(), core);
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
@ -2673,7 +2707,9 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
RefCounted<SolrIndexSearcher> newestSearcher = core.getNewestSearcher(false);
|
||||
if (newestSearcher != null) {
|
||||
log.debug("Found newest searcher: {} for core: {}, id: {}", newestSearcher.get(), core.getName(), core);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Found newest searcher: {} for core: {}, id: {}", newestSearcher.get(), core.getName(), core);
|
||||
}
|
||||
newestSearcher.decref();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -250,7 +250,9 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
try {
|
||||
zookeeper.delete(ops.get(j).getPath(), -1, true);
|
||||
} catch (KeeperException.NoNodeException e2) {
|
||||
log.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Can not remove node which is not exist : {}", ops.get(j).getPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -417,7 +419,7 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
for (String childName : childNames) {
|
||||
// Check format
|
||||
if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) {
|
||||
log.debug("Found child node with improper name: " + childName);
|
||||
log.debug("Found child node with improper name: {}", childName);
|
||||
continue;
|
||||
}
|
||||
orderedChildren.add(childName);
|
||||
|
|
|
@ -91,7 +91,7 @@ public class ZkSolrResourceLoader extends SolrResourceLoader {
|
|||
// Retry in case of session expiry
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
log.debug("Sleeping for 1s before retrying fetching resource=" + resource);
|
||||
log.debug("Sleeping for 1s before retrying fetching resource={}", resource);
|
||||
} catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new IOException("Could not load resource=" + resource, ie);
|
||||
|
|
Loading…
Reference in New Issue