LUCENE-7788: fail precommit on unparameterised log messages and examine for wasted work/objects

This commit is contained in:
Erick Erickson 2020-04-26 09:15:21 -04:00
parent ecc98e8698
commit 8867f465dc
26 changed files with 262 additions and 138 deletions

View File

@ -34,6 +34,7 @@ configure(rootProject) {
if (project.hasProperty('srcDir')) {
srcDir = project.getProperty('srcDir')
} else { // Remove this later, make it optional
//nocommit
throw new BuildException(String.format(Locale.ENGLISH,
'''Until we get all the calls cleaned up, you MUST specify -PsrcDir=relative_path, e.g.
"-PsrcDir=solr/core/src/java/org/apache/solr/core". This task will recursively check all
@ -58,8 +59,8 @@ class ValidateLogCallsTask extends DefaultTask {
, "solr/core/src/java/org/apache/solr/api"
, "solr/core/src/java/org/apache/solr/client"
// , "solr/core/src/java/org/apache/solr/cloud" // 120
, "solr/core/src/java/org/apache/solr/cloud/api" // 107
// , "solr/core/src/java/org/apache/solr/cloud/autoscaling" // 120
, "solr/core/src/java/org/apache/solr/cloud/api"
, "solr/core/src/java/org/apache/solr/cloud/autoscaling"
, "solr/core/src/java/org/apache/solr/cloud/cdcr"
, "solr/core/src/java/org/apache/solr/cloud/hdfs"
, "solr/core/src/java/org/apache/solr/cloud/overseer"
@ -95,6 +96,26 @@ class ValidateLogCallsTask extends DefaultTask {
, "solr/core/src/java/org/apache/solr/util"
]
//nocommit REMOVE ME! Really! and the check for bare parens. Several times I've put in () when I meant {} and only
// caught it by chance. So for this mass edit, I created a check with a a lot of false positives. This is a list of
// them and we won't report them.
Map<String, List<Integer>> parenHack = [
"AddReplicaCmd.java" : [99]
, "Assign.java" : [329]
, "CommitTracker.java" : [135]
, "DeleteReplicaCmd.java" : [75]
, "DirectUpdateHandler2.java" : [838, 859]
, "ManagedIndexSchemaFactory.java": [284]
, "MoveReplicaCmd.java" : [75]
, "PeerSync.java" : [704]
, "RecordingJSONParser.java" : [76]
, "SliceMutator.java" : [61]
, "SolrDispatchFilter.java" : [150, 205, 242]
, "Suggester.java" : [147, 181]
, "UpdateLog.java" : [1976]
, "V2HttpCall.java" : [158]
]
def logLevels = ["log.trace", "log.debug", "log.info", "log.warn", "log.error", "log.fatal"]
def errsFound = 0;
@ -105,8 +126,21 @@ class ValidateLogCallsTask extends DefaultTask {
errsFound++;
}
// We have a log.someting line, check for patterns we're not fond of.
// We have a log.something line, check for patterns we're not fond of.
def checkLogLine(File file, String line, int lineNumber, String previous) {
boolean violation = false
def bareParens = (line =~ /".*?"/).findAll()
bareParens.each({ part ->
if (part.contains("()")) {
List<Integer> hack = parenHack.get(file.name)
if (hack == null || hack.contains(lineNumber) == false) {
violation = true
}
}
})
// If the line has been explicitly checked, skip it.
if (line.replaceAll("\\s", "").toLowerCase().contains("//logok")) {
return
@ -134,7 +168,7 @@ class ValidateLogCallsTask extends DefaultTask {
// Check that previous line isn't an if statement for always-reported log levels. Arbitrary decision: we don't
// really care about checking for awkward constructions for WARN and above, so report a violation if the previous
// line contains an if for those levels.
boolean violation = false
boolean dontReportLevels = level.equals("fatal") || level.equals("error") || level.equals("warn")
// There's a convention to declare a member variable for whether a level is enabled and check that rather than
@ -167,9 +201,11 @@ class ValidateLogCallsTask extends DefaultTask {
if (violation) {
reportViolation(String.format("Suspicious logging call File: '%s' line: '%d' log message: '%s' parent path: '%s'. Parameterize or surround with 'if (log.is*Enabled) {... stripped: '%s'"
, file.name
, lineNumber, line
, lineNumber
, line
, file.getParentFile().getAbsolutePath()
, stripped))
, stripped
))
}
}

View File

@ -326,7 +326,7 @@ public class Assign {
public static List<ReplicaPosition> getNodesForNewReplicas(ClusterState clusterState, String collectionName,
String shard, int nrtReplicas, int tlogReplicas, int pullReplicas,
Object createNodeSet, SolrCloudManager cloudManager) throws IOException, InterruptedException, AssignmentException {
log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}", shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet );
log.debug("getNodesForNewReplicas() shard: {} , nrtReplicas : {} , tlogReplicas: {} , pullReplicas: {} , createNodeSet {}", shard, nrtReplicas, tlogReplicas, pullReplicas, createNodeSet);
DocCollection coll = clusterState.getCollection(collectionName);
Integer maxShardsPerNode = coll.getMaxShardsPerNode() == -1 ? Integer.MAX_VALUE : coll.getMaxShardsPerNode();
List<String> createNodeList = null;

View File

@ -80,7 +80,9 @@ public class ComputePlanAction extends TriggerActionBase {
@Override
public void process(TriggerEvent event, ActionContext context) throws Exception {
log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
if (log.isDebugEnabled()) {
log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
}
SolrCloudManager cloudManager = context.getCloudManager();
try {
AutoScalingConfig autoScalingConf = cloudManager.getDistribStateManager().getAutoScalingConfig();
@ -133,11 +135,13 @@ public class ComputePlanAction extends TriggerActionBase {
// PolicyHelper.logState(cloudManager, initialSuggester);
break;
} else {
log.info("Computed plan empty, remained " + (opCount - opLimit) + " requested ops to try.");
log.info("Computed plan empty, remained {} requested ops to try.", opCount - opLimit);
continue;
}
}
log.debug("Computed Plan: {}", operation.getParams());
if (log.isDebugEnabled()) {
log.debug("Computed Plan: {}", operation.getParams());
}
if (!collections.isEmpty()) {
String coll = operation.getParams().get(CoreAdminParams.COLLECTION);
if (coll != null && !collections.contains(coll)) {
@ -190,7 +194,7 @@ public class ComputePlanAction extends TriggerActionBase {
try {
return Integer.parseInt(String.valueOf(o));
} catch (Exception e) {
log.warn("Invalid '" + AutoScalingParams.MAX_COMPUTE_OPERATIONS + "' event property: " + o + ", using default " + maxOp);
log.warn("Invalid '{}' event property: {}, using default {}", AutoScalingParams.MAX_COMPUTE_OPERATIONS, o, maxOp);
return maxOp;
}
}

View File

@ -77,7 +77,9 @@ public class ExecutePlanAction extends TriggerActionBase {
@Override
public void process(TriggerEvent event, ActionContext context) throws Exception {
log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
if (log.isDebugEnabled()) {
log.debug("-- processing event: {} with context properties: {}", event, context.getProperties());
}
SolrCloudManager cloudManager = context.getCloudManager();
List<SolrRequest> operations = (List<SolrRequest>) context.getProperty("operations");
if (operations == null || operations.isEmpty()) {
@ -87,7 +89,9 @@ public class ExecutePlanAction extends TriggerActionBase {
try {
int counter = 0;
for (SolrRequest operation : operations) {
log.debug("Executing operation: {}", operation.getParams());
if (log.isDebugEnabled()) {
log.debug("Executing operation: {}", operation.getParams());
}
try {
SolrResponse response = null;
if (operation instanceof CollectionAdminRequest.AsyncCollectionAdminRequest) {
@ -130,7 +134,7 @@ public class ExecutePlanAction extends TriggerActionBase {
try {
cloudManager.getDistribStateManager().removeData(znode, -1);
} catch (Exception e) {
log.warn("Unexpected exception while trying to delete znode: " + znode, e);
log.warn("Unexpected exception while trying to delete znode: {}", znode, e);
}
}
response = statusResponse;
@ -149,7 +153,7 @@ public class ExecutePlanAction extends TriggerActionBase {
try {
cloudManager.getDistribStateManager().removeData(znode, -1);
} catch (Exception e) {
log.warn("Unexpected exception while trying to delete znode: " + znode, e);
log.warn("Unexpected exception while trying to delete znode: {}", znode, e);
}
throw new IOException("Task " + asyncId + " failed: " + (statusResponse != null ? statusResponse : " timed out. Operation: " + req));
}
@ -212,7 +216,7 @@ public class ExecutePlanAction extends TriggerActionBase {
if (rootCause instanceof SolrServerException) {
throw e;
}
log.error("Unexpected Exception while querying status of requestId=" + requestId, e);
log.error("Unexpected Exception while querying status of requestId={}", requestId, e);
throw e;
}
if (i > 0 && i % 5 == 0) {

View File

@ -158,7 +158,7 @@ public class HttpTriggerListener extends TriggerListenerBase {
try {
cloudManager.httpRequest(url, SolrRequest.METHOD.POST, headers, payload, timeout, followRedirects);
} catch (IOException e) {
log.warn("Exception sending request for event " + event, e);
log.warn("Exception sending request for event {}", event, e);
}
}
}

View File

@ -75,7 +75,9 @@ public class InactiveMarkersPlanAction extends TriggerActionBase {
@Override
public void process(TriggerEvent event, ActionContext context) throws Exception {
log.trace("-- {} cleaning markers", getName());
if (log.isTraceEnabled()) {
log.trace("-- {} cleaning markers", getName());
}
// use epoch time to track this across JVMs and nodes
long currentTimeNs = cloudManager.getTimeSource().getEpochTimeNs();
Map<String, Object> results = new LinkedHashMap<>();
@ -123,7 +125,7 @@ public class InactiveMarkersPlanAction extends TriggerActionBase {
} catch (BadVersionException be) {
throw new RuntimeException("should never happen", be);
} catch (NotEmptyException ne) {
log.error("Marker znode should be empty but it's not! Ignoring {} ({})", markerPath, ne.toString());
log.error("Marker znode should be empty but it's not! Ignoring {} ({})", markerPath, ne);
}
} else {
log.trace(" -- keep {}, delta={}, ttl={}, active={}", markerPath, delta, cleanupTTL, activeMarker);

View File

@ -90,9 +90,13 @@ public class InactiveShardPlanAction extends TriggerActionBase {
// this timestamp uses epoch time
long currentTime = cloudManager.getTimeSource().getEpochTimeNs();
long delta = TimeUnit.NANOSECONDS.toSeconds(currentTime - timestamp);
log.debug("{}/{}: tstamp={}, time={}, delta={}", coll.getName(), s.getName(), timestamp, currentTime, delta);
if (log.isDebugEnabled()) {
log.debug("{}/{}: tstamp={}, time={}, delta={}", coll.getName(), s.getName(), timestamp, currentTime, delta);
}
if (delta > cleanupTTL) {
log.debug("-- delete inactive {} / {}", coll.getName(), s.getName());
if (log.isDebugEnabled()) {
log.debug("-- delete inactive {} / {}", coll.getName(), s.getName());
}
List<SolrRequest> operations = (List<SolrRequest>)context.getProperties().computeIfAbsent("operations", k -> new ArrayList<>());
operations.add(CollectionAdminRequest.deleteShard(coll.getName(), s.getName()));
cleanup.computeIfAbsent(coll.getName(), c -> new ArrayList<>()).add(s.getName());
@ -117,23 +121,29 @@ public class InactiveShardPlanAction extends TriggerActionBase {
// this timestamp uses epoch time
long currentTime = cloudManager.getTimeSource().getEpochTimeNs();
long delta = TimeUnit.NANOSECONDS.toSeconds(currentTime - timestamp);
log.debug("{}/{}: locktstamp={}, time={}, delta={}", coll.getName(), lock, timestamp, currentTime, delta);
if (log.isDebugEnabled()) {
log.debug("{}/{}: locktstamp={}, time={}, delta={}", coll.getName(), lock, timestamp, currentTime, delta);
}
if (delta > cleanupTTL) {
log.debug("-- delete inactive split lock for {}/{}, delta={}", coll.getName(), lock, delta);
if (log.isDebugEnabled()) {
log.debug("-- delete inactive split lock for {}/{}, delta={}", coll.getName(), lock, delta);
}
cloudManager.getDistribStateManager().removeData(lockPath, -1);
lockData.put("currentTimeNs", currentTime);
lockData.put("deltaSec", delta);
lockData.put("ttlSec", cleanupTTL);
staleLocks.put(coll.getName() + "/" + lock, lockData);
} else {
log.debug("-- lock " + coll.getName() + "/" + lock + " still active (delta=" + delta + ")");
if (log.isDebugEnabled()) {
log.debug("-- lock {}/{} still active (delta={})", coll.getName(), lock, delta);
}
}
} catch (NoSuchElementException nse) {
// already removed by someone else - ignore
}
}
} catch (Exception e) {
log.warn("Exception checking for inactive shard split locks in " + parentPath, e);
log.warn("Exception checking for inactive shard split locks in {}", parentPath, e);
}
})
);

View File

@ -251,7 +251,7 @@ public class IndexSizeTrigger extends TriggerBase {
public void run() {
synchronized(this) {
if (isClosed) {
log.warn(getName() + " ran but was already closed");
log.warn("{} ran but was already closed", getName());
return;
}
}
@ -325,11 +325,11 @@ public class IndexSizeTrigger extends TriggerBase {
sizes.forEach((tag, size) -> {
final ReplicaInfo info = metricTags.get(tag);
if (info == null) {
log.warn("Missing replica info for response tag " + tag);
log.warn("Missing replica info for response tag {}", tag);
} else {
// verify that it's a Number
if (!(size instanceof Number)) {
log.warn("invalid size value for tag " + tag + " - not a number: '" + size + "' is " + size.getClass().getName());
log.warn("invalid size value for tag {} - not a number: '{}' is {}", tag, size, size.getClass().getName());
return;
}
@ -347,7 +347,7 @@ public class IndexSizeTrigger extends TriggerBase {
});
}
} catch (IOException e) {
log.warn("Error running trigger " + getName(), e);
log.warn("Error running trigger {}", getName(), e);
return;
}

View File

@ -85,7 +85,7 @@ public class NodeAddedTrigger extends TriggerBase {
return;
}
} catch (InterruptedException | IOException | KeeperException e) {
log.debug("-- ignoring marker " + markerPath + " state due to error", e);
log.debug("-- ignoring marker {} state due to error{}", markerPath, e);
}
// don't add nodes that have since gone away
if (lastLiveNodes.contains(n) && !nodeNameVsTimeAdded.containsKey(n)) {
@ -175,7 +175,9 @@ public class NodeAddedTrigger extends TriggerBase {
log.debug("Running NodeAddedTrigger {}", name);
Set<String> newLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
log.debug("Found livenodes: {}", newLiveNodes.size());
if (log.isDebugEnabled()) {
log.debug("Found livenodes: {}", newLiveNodes.size());
}
// have any nodes that we were tracking been removed from the cluster?
// if so, remove them from the tracking map
@ -207,8 +209,10 @@ public class NodeAddedTrigger extends TriggerBase {
AutoScaling.TriggerEventProcessor processor = processorRef.get();
if (!nodeNames.isEmpty()) {
if (processor != null) {
log.debug("NodeAddedTrigger {} firing registered processor for nodes: {} added at times {}, now={}", name,
nodeNames, times, cloudManager.getTimeSource().getTimeNs());
if (log.isDebugEnabled()) {
log.debug("NodeAddedTrigger {} firing registered processor for nodes: {} added at times {}, now={}", name,
nodeNames, times, cloudManager.getTimeSource().getTimeNs());
}
if (processor.process(new NodeAddedEvent(getEventType(), getName(), times, nodeNames, preferredOp, replicaType))) {
// remove from tracking set only if the fire was accepted
nodeNames.forEach(n -> {

View File

@ -82,7 +82,7 @@ public class NodeLostTrigger extends TriggerBase {
return;
}
} catch (InterruptedException | IOException | KeeperException e) {
log.debug("-- ignoring marker " + markerPath + " state due to error", e);
log.debug("-- ignoring marker {} state due to error", markerPath, e);
}
// don't add nodes that have since came back
if (!lastLiveNodes.contains(n) && !nodeNameVsTimeRemoved.containsKey(n)) {
@ -164,7 +164,9 @@ public class NodeLostTrigger extends TriggerBase {
}
Set<String> newLiveNodes = new HashSet<>(cloudManager.getClusterStateProvider().getLiveNodes());
log.debug("Running NodeLostTrigger: {} with currently live nodes: {} and last live nodes: {}", name, newLiveNodes.size(), lastLiveNodes.size());
if (log.isDebugEnabled()) {
log.debug("Running NodeLostTrigger: {} with currently live nodes: {} and last live nodes: {}", name, newLiveNodes.size(), lastLiveNodes.size());
}
log.trace("Current Live Nodes for {}: {}", name, newLiveNodes);
log.trace("Last Live Nodes for {}: {}", name, lastLiveNodes);

View File

@ -164,8 +164,8 @@ public class OverseerTriggerThread implements Runnable, SolrCloseable {
catch (IOException | KeeperException e) {
if (e instanceof KeeperException.SessionExpiredException ||
(e.getCause()!=null && e.getCause() instanceof KeeperException.SessionExpiredException)) {
log.warn("Solr cannot talk to ZK, exiting " +
getClass().getSimpleName() + " main queue loop", e);
log.warn("Solr cannot talk to ZK, exiting {} main queue loop"
, getClass().getSimpleName(), e);
return;
} else {
log.error("A ZK error has occurred", e);
@ -247,7 +247,7 @@ public class OverseerTriggerThread implements Runnable, SolrCloseable {
} catch (AlreadyClosedException e) {
} catch (Exception e) {
log.warn("Exception initializing trigger " + entry.getKey() + ", configuration ignored", e);
log.warn("Exception initializing trigger {}, configuration ignored", entry.getKey(), e);
}
}
} catch (AlreadyClosedException e) {
@ -316,7 +316,9 @@ public class OverseerTriggerThread implements Runnable, SolrCloseable {
return;
}
AutoScalingConfig currentConfig = cloudManager.getDistribStateManager().getAutoScalingConfig(watcher);
log.debug("Refreshing {} with znode version {}", ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, currentConfig.getZkVersion());
if (log.isDebugEnabled()) {
log.debug("Refreshing {} with znode version {}", ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, currentConfig.getZkVersion());
}
if (znodeVersion >= currentConfig.getZkVersion()) {
// protect against reordered watcher fires by ensuring that we only move forward
return;
@ -397,7 +399,7 @@ public class OverseerTriggerThread implements Runnable, SolrCloseable {
try {
triggerMap.put(triggerName, triggerFactory.create(eventType, triggerName, cfg.properties));
} catch (TriggerValidationException e) {
log.warn("Error in trigger '" + triggerName + "' configuration, trigger config ignored: " + cfg, e);
log.warn("Error in trigger '{}' configuration, trigger config ignored: {}", triggerName, cfg, e);
}
}
return triggerMap;

View File

@ -171,7 +171,9 @@ public class ScheduledTriggers implements Closeable {
Map<String, Object> newProps = autoScalingConfig.getProperties();
String key = entry.getKey();
if (newProps.containsKey(key) && !entry.getValue().equals(newProps.get(key))) {
log.debug("Changing value of autoscaling property: {} from: {} to: {}", key, entry.getValue(), newProps.get(key));
if (log.isDebugEnabled()) {
log.debug("Changing value of autoscaling property: {} from: {} to: {}", key, entry.getValue(), newProps.get(key));
}
switch (key) {
case TRIGGER_SCHEDULE_DELAY_SECONDS:
triggerDelay.set(((Number) newProps.get(key)).intValue());
@ -223,9 +225,9 @@ public class ScheduledTriggers implements Closeable {
throw new AlreadyClosedException("ScheduledTriggers has been closed and cannot be used anymore");
}
if (cloudManager.isClosed()) {
log.error("Failed to add trigger " + newTrigger.getName() + " - closing or disconnected from data provider", e);
log.error("Failed to add trigger {} - closing or disconnected from data provider", newTrigger.getName(), e);
} else {
log.error("Failed to add trigger " + newTrigger.getName(), e);
log.error("Failed to add trigger {}", newTrigger.getName(), e);
}
return;
}
@ -267,7 +269,7 @@ public class ScheduledTriggers implements Closeable {
return false;
}
if (event.isIgnored()) {
log.debug("-------- Ignoring event: " + event);
log.debug("-------- Ignoring event: {}", event);
event.getProperties().put(TriggerEvent.IGNORED, true);
triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.IGNORED, "Event was ignored.");
return true; // always return true for ignored events
@ -275,12 +277,12 @@ public class ScheduledTriggers implements Closeable {
// even though we pause all triggers during action execution there is a possibility that a trigger was already
// running at the time and would have already created an event so we reject such events during cooldown period
if (cooldownStart.get() + cooldownPeriod.get() > cloudManager.getTimeSource().getTimeNs()) {
log.debug("-------- Cooldown period - rejecting event: " + event);
log.debug("-------- Cooldown period - rejecting event: {}", event);
event.getProperties().put(TriggerEvent.COOLDOWN, true);
triggerListeners.fireListeners(event.getSource(), event, TriggerEventProcessorStage.IGNORED, "In cooldown period.");
return false;
} else {
log.debug("++++++++ Cooldown inactive - processing event: " + event);
log.debug("++++++++ Cooldown inactive - processing event: {}", event);
// start cooldown here to immediately reject other events
cooldownStart.set(cloudManager.getTimeSource().getTimeNs());
}
@ -310,7 +312,7 @@ public class ScheduledTriggers implements Closeable {
assert hasPendingActions.get();
long eventProcessingStart = cloudManager.getTimeSource().getTimeNs();
TriggerListeners triggerListeners1 = triggerListeners.copy();
log.debug("-- processing actions for " + event);
log.debug("-- processing actions for {}", event);
try {
// in future, we could wait for pending tasks in a different thread and re-enqueue
// this event so that we continue processing other events and not block this action executor
@ -348,8 +350,10 @@ public class ScheduledTriggers implements Closeable {
// resume triggers after cool down period
resumeTriggers(cloudManager.getTimeSource().convertDelay(TimeUnit.NANOSECONDS, cooldownPeriod.get(), TimeUnit.MILLISECONDS));
}
log.debug("-- processing took {} ms for event id={}",
TimeUnit.NANOSECONDS.toMillis(cloudManager.getTimeSource().getTimeNs() - eventProcessingStart), event.id);
if (log.isDebugEnabled()) {
log.debug("-- processing took {} ms for event id={}",
TimeUnit.NANOSECONDS.toMillis(cloudManager.getTimeSource().getTimeNs() - eventProcessingStart), event.id);
}
});
} else {
if (enqueued) {
@ -447,7 +451,7 @@ public class ScheduledTriggers implements Closeable {
if (rootCause instanceof TimeoutException && rootCause.getMessage().contains("Could not connect to ZooKeeper")) {
throw e;
}
log.error("Unexpected exception while waiting for pending task with requestid: " + requestid + " to finish", e);
log.error("Unexpected exception while waiting for pending task with requestid: {} to finish", requestid, e);
}
}
}
@ -473,7 +477,7 @@ public class ScheduledTriggers implements Closeable {
*/
public synchronized void removeAll() {
getScheduledTriggerNames().forEach(t -> {
log.info("-- removing trigger: " + t);
log.info("-- removing trigger: {}", t);
remove(t);
});
}
@ -496,12 +500,12 @@ public class ScheduledTriggers implements Closeable {
try {
stateManager.removeRecursively(statePath, true, true);
} catch (Exception e) {
log.warn("Failed to remove state for removed trigger " + statePath, e);
log.warn("Failed to remove state for removed trigger {}", statePath, e);
}
try {
stateManager.removeRecursively(eventsPath, true, true);
} catch (Exception e) {
log.warn("Failed to remove events for removed trigger " + eventsPath, e);
log.warn("Failed to remove events for removed trigger {}", eventsPath, e);
}
}
@ -628,7 +632,7 @@ public class ScheduledTriggers implements Closeable {
// override REPLAYING=true
event.getProperties().put(TriggerEvent.REPLAYING, true);
if (!trigger.getProcessor().process(event)) {
log.error("Failed to re-play event, discarding: " + event);
log.error("Failed to re-play event, discarding: {}", event);
}
queue.pollEvent(); // always remove it from queue
}
@ -637,14 +641,14 @@ public class ScheduledTriggers implements Closeable {
trigger.restoreState();
} catch (Exception e) {
// log but don't throw - see below
log.error("Error restoring trigger state " + trigger.getName(), e);
log.error("Error restoring trigger state {}", trigger.getName(), e);
}
replay = false;
}
} catch (AlreadyClosedException e) {
} catch (Exception e) {
log.error("Unexpected exception from trigger: " + trigger.getName(), e);
log.error("Unexpected exception from trigger: {}", trigger.getName(), e);
}
try {
trigger.run();
@ -653,7 +657,7 @@ public class ScheduledTriggers implements Closeable {
} catch (Exception e) {
// log but do not propagate exception because an exception thrown from a scheduled operation
// will suppress future executions
log.error("Unexpected exception from trigger: " + trigger.getName(), e);
log.error("Unexpected exception from trigger: {}", trigger.getName(), e);
} finally {
// checkpoint after each run
trigger.saveState();
@ -751,7 +755,7 @@ public class ScheduledTriggers implements Closeable {
try {
listener.close();
} catch (Exception e) {
log.warn("Exception closing old listener " + listener.getConfig(), e);
log.warn("Exception closing old listener {}", listener.getConfig(), e);
}
it.remove();
}
@ -770,7 +774,7 @@ public class ScheduledTriggers implements Closeable {
try {
oldListener.close();
} catch (Exception e) {
log.warn("Exception closing old listener " + oldListener.getConfig(), e);
log.warn("Exception closing old listener {}", oldListener.getConfig(), e);
}
} else {
listener = oldListener; // reuse
@ -781,7 +785,7 @@ public class ScheduledTriggers implements Closeable {
try {
listener = loader.newInstance(clazz, TriggerListener.class);
} catch (Exception e) {
log.warn("Invalid TriggerListener class name '" + clazz + "', skipping...", e);
log.warn("Invalid TriggerListener class name '{}', skipping...", clazz, e);
}
if (listener != null) {
try {
@ -789,7 +793,7 @@ public class ScheduledTriggers implements Closeable {
listener.init();
listenersPerName.put(config.name, listener);
} catch (Exception e) {
log.warn("Error initializing TriggerListener " + config, e);
log.warn("Error initializing TriggerListener {}", config, e);
IOUtils.closeQuietly(listener);
listener = null;
}
@ -895,7 +899,7 @@ public class ScheduledTriggers implements Closeable {
try {
listener.onEvent(event, stage, actionName, context, error, message);
} catch (Exception e) {
log.warn("Exception running listener " + listener.getConfig(), e);
log.warn("Exception running listener {}", listener.getConfig(), e);
}
}
} finally {

View File

@ -371,13 +371,13 @@ public class SearchRateTrigger extends TriggerBase {
}
Map<String, Object> rates = cloudManager.getNodeStateProvider().getNodeValues(node, metricTags.keySet());
if (log.isDebugEnabled()) {
log.debug("### rates for node " + node);
rates.forEach((tag, rate) -> log.debug("### " + tag + "\t" + rate));
log.debug("### rates for node {}", node);
rates.forEach((tag, rate) -> log.debug("### " + tag + "\t" + rate)); // logOk
}
rates.forEach((tag, rate) -> {
ReplicaInfo info = metricTags.get(tag);
if (info == null) {
log.warn("Missing replica info for response tag " + tag);
log.warn("Missing replica info for response tag {}", tag);
} else {
Map<String, List<ReplicaInfo>> perCollection = collectionRates.computeIfAbsent(info.getCollection(), s -> new HashMap<>());
List<ReplicaInfo> perShard = perCollection.computeIfAbsent(info.getShard(), s -> new ArrayList<>());
@ -395,7 +395,7 @@ public class SearchRateTrigger extends TriggerBase {
log.debug("## Collection: {}", coll);
collRates.forEach((s, replicas) -> {
log.debug("## - {}", s);
replicas.forEach(ri -> log.debug("## {} {}", ri.getCore(), ri.getVariable(AutoScalingParams.RATE)));
replicas.forEach(ri -> log.debug("## {} {}", ri.getCore(), ri.getVariable(AutoScalingParams.RATE))); //logOk
});
});
}
@ -464,7 +464,9 @@ public class SearchRateTrigger extends TriggerBase {
if (log.isDebugEnabled()) {
Long lastTime = lastShardEvent.computeIfAbsent(elapsedKey, s -> now);
long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
log.debug("-- waitFor didn't elapse for {}, waitFor={}, elapsed={}", elapsedKey, getWaitForSecond(), elapsed);
if (log.isDebugEnabled()) {
log.debug("-- waitFor didn't elapse for {}, waitFor={}, elapsed={}", elapsedKey, getWaitForSecond(), elapsed);
}
}
}
} else {
@ -763,7 +765,9 @@ public class SearchRateTrigger extends TriggerBase {
private boolean waitForElapsed(String name, long now, Map<String, Long> lastEventMap) {
Long lastTime = lastEventMap.computeIfAbsent(name, s -> now);
long elapsed = TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS);
log.trace("name={}, lastTime={}, elapsed={}, waitFor={}", name, lastTime, elapsed, getWaitForSecond());
if (log.isTraceEnabled()) {
log.trace("name={}, lastTime={}, elapsed={}, waitFor={}", name, lastTime, elapsed, getWaitForSecond());
}
if (TimeUnit.SECONDS.convert(now - lastTime, TimeUnit.NANOSECONDS) < getWaitForSecond()) {
return false;
}

View File

@ -143,7 +143,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
} catch (AlreadyExistsException e) {
// ignore
} catch (InterruptedException | KeeperException | IOException e) {
log.warn("Exception checking ZK path " + ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
log.warn("Exception checking ZK path {}", ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
throw e;
}
for (TriggerAction action : actions) {
@ -268,7 +268,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
} catch (AlreadyExistsException e) {
} catch (InterruptedException | BadVersionException | IOException | KeeperException e) {
log.warn("Exception updating trigger state '" + path + "'", e);
log.warn("Exception updating trigger state '{}'", path, e);
}
}
@ -284,7 +284,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
} catch (AlreadyClosedException e) {
} catch (Exception e) {
log.warn("Exception getting trigger state '" + path + "'", e);
log.warn("Exception getting trigger state '{}'", path, e);
}
if (data != null) {
Map<String, Object> restoredState = (Map<String, Object>)Utils.fromJSON(data);

View File

@ -58,7 +58,7 @@ public class TriggerEventQueue {
delegate.offer(data);
return true;
} catch (Exception e) {
log.warn("Exception adding event " + event + " to queue " + triggerName, e);
log.warn("Exception adding event {} to queue {}", event, triggerName, e);
return false;
}
}
@ -75,7 +75,7 @@ public class TriggerEventQueue {
Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
return fromMap(map);
} catch (Exception e) {
log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
log.warn("Invalid event data, ignoring: {}", new String(data, StandardCharsets.UTF_8));
continue;
}
}
@ -84,7 +84,7 @@ public class TriggerEventQueue {
}
catch (Exception e) {
log.warn("Exception peeking queue of trigger " + triggerName, e);
log.warn("Exception peeking queue of trigger {}", triggerName, e);
}
return null;
}
@ -101,12 +101,12 @@ public class TriggerEventQueue {
Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
return fromMap(map);
} catch (Exception e) {
log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
log.warn("Invalid event data, ignoring: {}", new String(data, StandardCharsets.UTF_8));
continue;
}
}
} catch (Exception e) {
log.warn("Exception polling queue of trigger " + triggerName, e);
log.warn("Exception polling queue of trigger {}", triggerName, e);
}
return null;
}

View File

@ -252,7 +252,9 @@ public class GenericDistributedQueue implements DistributedQueue {
try {
stateManager.removeData(ops.get(j).getPath(), -1);
} catch (NoSuchElementException e2) {
log.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
if (log.isDebugEnabled()) {
log.debug("Can not remove node which is not exist : {}", ops.get(j).getPath());
}
}
}
}
@ -419,7 +421,7 @@ public class GenericDistributedQueue implements DistributedQueue {
for (String childName : childNames) {
// Check format
if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) {
log.debug("Found child node with improper name: " + childName);
log.debug("Found child node with improper name: {}", childName);
continue;
}
orderedChildren.add(childName);

View File

@ -476,7 +476,7 @@ public class SimCloudManager implements SolrCloudManager {
String nodeId = (String)values.get(ImplicitSnitch.NODE);
nodeStateProvider.simSetNodeValues(nodeId, values);
clusterStateProvider.simAddNode(nodeId);
log.trace("-- added node " + nodeId);
log.trace("-- added node {}", nodeId);
// initialize history handler if this is the first node
if (metricsHistoryHandler == null && liveNodesSet.size() == 1) {
metricsHandler = new MetricsHandler(metricManager);
@ -510,7 +510,7 @@ public class SimCloudManager implements SolrCloudManager {
metricsHandler = null;
}
}
log.trace("-- removed node " + nodeId);
log.trace("-- removed node {}", nodeId);
}
/**
@ -599,7 +599,7 @@ public class SimCloudManager implements SolrCloudManager {
triggerThread.interrupt();
IOUtils.closeQuietly(triggerThread);
if (killNodeId != null) {
log.info(" = killing node " + killNodeId);
log.info(" = killing node {}", killNodeId);
simRemoveNode(killNodeId, false);
}
objectCache.clear();
@ -747,8 +747,10 @@ public class SimCloudManager implements SolrCloudManager {
// pay the penalty for remote request, at least 5 ms
timeSource.sleep(5);
log.trace("--- got SolrRequest: " + req.getMethod() + " " + req.getPath() +
(req.getParams() != null ? " " + req.getParams() : ""));
if (log.isTraceEnabled()) {
log.trace("--- got SolrRequest: {} {} {}", req.getMethod(), req.getPath(),
(req.getParams() != null ? " " + req.getParams() : "")); // logOk
}
if (req.getPath() != null) {
if (req.getPath().startsWith("/admin/autoscaling") ||
req.getPath().startsWith("/cluster/autoscaling") ||
@ -799,7 +801,9 @@ public class SimCloudManager implements SolrCloudManager {
}
}
if (queryResponse.getException() != null) {
log.debug("-- exception handling request", queryResponse.getException());
if (log.isDebugEnabled()) {
log.debug("-- exception handling request", queryResponse.getException());
}
throw new IOException(queryResponse.getException());
}
SolrResponse rsp = new SolrResponseBase();
@ -859,7 +863,9 @@ public class SimCloudManager implements SolrCloudManager {
if (action == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
}
log.trace("Invoking Collection Action :{} with params {}", action.toLower(), params.toQueryString());
if (log.isTraceEnabled()) {
log.trace("Invoking Collection Action :{} with params {}", action.toLower(), params.toQueryString());
}
NamedList results = new NamedList();
rsp.setResponse(results);
incrementCount(action.name());

View File

@ -243,7 +243,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
colShardReplicaMap.computeIfAbsent(ri.getCollection(), name -> new ConcurrentHashMap<>())
.computeIfAbsent(ri.getShard(), shard -> new ArrayList<>()).add(ri);
} else {
log.warn("- dropping replica because its node " + r.getNodeName() + " is not live: " + r);
log.warn("- dropping replica because its node {} is not live: {}", r.getNodeName(), r);
}
});
});
@ -386,7 +386,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
}
// pick first
overseerLeader = liveNodes.iterator().next();
log.debug("--- new Overseer leader: " + overseerLeader);
log.debug("--- new Overseer leader: {}", overseerLeader);
// record it in ZK
Map<String, Object> id = new HashMap<>();
id.put("id", cloudManager.getTimeSource().getTimeNs() +
@ -680,7 +680,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
int version = oldData != null ? oldData.getVersion() : 0;
assert clusterStateVersion == version : "local clusterStateVersion out of sync";
stateManager.setData(ZkStateReader.CLUSTER_STATE, data, version);
log.debug("** saved cluster state version " + (version));
log.debug("** saved cluster state version {}", version);
clusterStateVersion++;
} catch (Exception e) {
throw new IOException(e);
@ -736,7 +736,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
return;
}
dc.getSlices().forEach(s -> {
if (log.isTraceEnabled()) {
log.trace("-- submit leader election for {} / {}", dc.getName(), s.getName());
}
cloudManager.submit(() -> {
simRunLeaderElection(dc.getName(), s.getName(), saveClusterState);
return true;
@ -768,8 +770,10 @@ public class SimClusterStateProvider implements ClusterStateProvider {
return;
}
if (s.getState() == Slice.State.INACTIVE) {
log.trace("-- slice state is {}, skipping leader election ({} / {})",
s.getState(), collection, slice);
if (log.isTraceEnabled()) {
log.trace("-- slice state is {}, skipping leader election ({} / {})",
s.getState(), collection, slice);
}
return;
}
if (s.getReplicas().isEmpty()) {
@ -785,8 +789,10 @@ public class SimClusterStateProvider implements ClusterStateProvider {
}
if (s.getState() != Slice.State.ACTIVE) {
log.trace("-- slice state is {}, but I will run leader election anyway ({} / {})",
s.getState(), collection, slice);
if (log.isTraceEnabled()) {
log.trace("-- slice state is {}, but I will run leader election anyway ({} / {})",
s.getState(), collection, slice);
}
}
log.debug("Running leader election ({} / {})", collection, slice);
@ -804,14 +810,18 @@ public class SimClusterStateProvider implements ClusterStateProvider {
synchronized (ri) {
if (r.isActive(liveNodes.get())) {
if (ri.getVariables().get(ZkStateReader.LEADER_PROP) != null) {
log.trace("-- found existing leader {} / {}: {}, {}", collection, s.getName(), ri, r);
if (log.isTraceEnabled()) {
log.trace("-- found existing leader {} / {}: {}, {}", collection, s.getName(), ri, r);
}
alreadyHasLeader.set(true);
return;
} else {
active.add(ri);
}
} else { // if it's on a node that is not live mark it down
log.trace("-- replica not active on live nodes: {}, {}", liveNodes.get(), r);
if (log.isTraceEnabled()) {
log.trace("-- replica not active on live nodes: {}, {}", liveNodes.get(), r);
}
if (!liveNodes.contains(r.getNodeName())) {
ri.getVariables().put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
ri.getVariables().remove(ZkStateReader.LEADER_PROP);
@ -821,12 +831,18 @@ public class SimClusterStateProvider implements ClusterStateProvider {
}
});
if (alreadyHasLeader.get()) {
log.trace("-- already has leader {} / {}: {}", collection, s.getName(), s);
if (log.isTraceEnabled()) {
log.trace("-- already has leader {} / {}: {}", collection, s.getName(), s);
}
return;
}
if (active.isEmpty()) {
log.warn("Can't find any active replicas for {} / {}: {}", collection, s.getName(), s);
log.debug("-- liveNodes: {}", liveNodes.get());
if (log.isWarnEnabled()) {
log.warn("Can't find any active replicas for {} / {}: {}", collection, s.getName(), s);
}
if (log.isDebugEnabled()) {
log.debug("-- liveNodes: {}", liveNodes.get());
}
return;
}
// pick first active one
@ -847,8 +863,10 @@ public class SimClusterStateProvider implements ClusterStateProvider {
synchronized (ri) {
ri.getVariables().put(ZkStateReader.LEADER_PROP, "true");
}
log.debug("-- elected new leader for {} / {} (currentVersion={}): {}", collection,
s.getName(), clusterStateVersion, ri);
if (log.isDebugEnabled()) {
log.debug("-- elected new leader for {} / {} (currentVersion={}): {}", collection,
s.getName(), clusterStateVersion, ri);
}
stateChanged.set(true);
}
} finally {
@ -1188,7 +1206,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
.filter(e -> !NO_COPY_PROPS.contains(e.getKey()))
.collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()));
ReplicaInfo newReplica = new ReplicaInfo(coreNodeName, newSolrCoreName, collection, slice.getName(), replica.getType(), targetNode, props);
log.debug("-- new replica: " + newReplica);
log.debug("-- new replica: {}", newReplica);
// xxx should run leader election here already?
simAddReplica(targetNode, newReplica, false);
// this will trigger leader election
@ -1393,7 +1411,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
return false;
}
if (s.getReplicas().size() < repFactor) {
log.debug("** expected {} repFactor but there are {} replicas", repFactor, s.getReplicas().size());
if (log.isDebugEnabled()) {
log.debug("** expected {} repFactor but there are {} replicas", repFactor, s.getReplicas().size());
}
return false;
}
}
@ -1409,8 +1429,10 @@ public class SimClusterStateProvider implements ClusterStateProvider {
}
}
// mark the new slices as active and the old slice as inactive
log.trace("-- switching slice states after split shard: collection={}, parent={}, subSlices={}", collectionName,
sliceName.get(), subSlices);
if (log.isTraceEnabled()) {
log.trace("-- switching slice states after split shard: collection={}, parent={}, subSlices={}", collectionName,
sliceName.get(), subSlices);
}
lock.lockInterruptibly();
try {
Map<String, Object> sProps = sliceProperties.computeIfAbsent(collectionName, c -> new ConcurrentHashMap<>())
@ -1422,8 +1444,10 @@ public class SimClusterStateProvider implements ClusterStateProvider {
// apply buffered updates
long perShard = bufferedUpdates.get() / subSlices.size();
long remainder = bufferedUpdates.get() % subSlices.size();
log.debug("-- applying {} buffered docs from {} / {}, perShard={}, remainder={}", bufferedUpdates.get(),
collectionName, parentSlice.getName(), perShard, remainder);
if (log.isDebugEnabled()) {
log.debug("-- applying {} buffered docs from {} / {}, perShard={}, remainder={}", bufferedUpdates.get(),
collectionName, parentSlice.getName(), perShard, remainder);
}
for (int i = 0; i < subSlices.size(); i++) {
String sub = subSlices.get(i);
long numUpdates = perShard;
@ -1565,7 +1589,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
ReplicaInfo ri = getReplicaInfo(leader);
Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
if (numDocs == null || numDocs.intValue() <= 0) {
log.debug("-- attempting to delete nonexistent doc " + id + " from " + s.getLeader());
if (log.isDebugEnabled()) {
log.debug("-- attempting to delete nonexistent doc {} from {}", id, s.getLeader());
}
continue;
}
@ -1580,7 +1606,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
if (bufferedUpdates.get() > 0) {
bufferedUpdates.decrementAndGet();
} else {
log.debug("-- attempting to delete nonexistent buffered doc " + id + " from " + s.getLeader());
if (log.isDebugEnabled()) {
log.debug("-- attempting to delete nonexistent buffered doc {} from {}", id, s.getLeader());
}
}
continue;
}
@ -1625,7 +1653,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
if (!"*:*".equals(q)) {
throw new UnsupportedOperationException("Only '*:*' query is supported in deleteByQuery");
}
//log.debug("-- req delByQ " + collection);
//log.debug("-- req delByQ {}", collection);
for (Slice s : coll.getSlices()) {
Replica leader = s.getLeader();
if (leader == null) {
@ -1679,7 +1707,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
}
}
if (docCount > 0) {
//log.debug("-- req update " + collection + " / " + docCount);
//log.debug("-- req update {}/{}", collection, docCount);
// this approach to updating counters and metrics drastically increases performance
// of bulk updates, because simSetShardValue is relatively costly
@ -1816,7 +1844,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
double deltaGB = (Double) Type.FREEDISK.convertVal(delta.get());
freedisk += deltaGB;
if (freedisk < 0) {
log.warn("-- freedisk=" + freedisk + " - ran out of disk space on node " + node);
log.warn("-- freedisk={} - ran out of disk space on node {}", freedisk, node);
freedisk = 0;
}
return freedisk;

View File

@ -244,7 +244,9 @@ public class SimDistribStateManager implements DistribStateManager {
*/
public void copyFrom(DistribStateManager other, boolean failOnExists) throws InterruptedException, IOException, KeeperException, AlreadyExistsException, BadVersionException {
List<String> tree = other.listTree("/");
log.info("- copying " + tree.size() + " resources...");
if (log.isInfoEnabled()) {
log.info("- copying {} resources...", tree.size());
}
// check if any node exists
for (String path : tree) {
if (hasData(path) && failOnExists) {

View File

@ -190,7 +190,9 @@ public class SimDistributedQueueFactory implements DistributedQueueFactory {
try {
queue.offer(new Pair(String.format(Locale.ROOT, "qn-%010d", seq), data));
seq++;
log.trace("=== offer " + System.nanoTime());
if (log.isTraceEnabled()) {
log.trace("=== offer {}", System.nanoTime());
}
changed.signalAll();
} finally {
updateLock.unlock();

View File

@ -181,7 +181,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
* @param node node id
*/
public void simRemoveNodeValues(String node) throws InterruptedException {
log.debug("--removing value for " + node);
log.debug("--removing value for {}", node);
lock.lockInterruptibly();
try {
Map<String, Object> values = nodeValues.remove(node);
@ -204,7 +204,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
try {
AtomicBoolean updateRoles = new AtomicBoolean(false);
myNodes.forEach(n -> {
log.debug("- removing dead node values: " + n);
log.debug("- removing dead node values: {}", n);
Map<String, Object> vals = nodeValues.remove(n);
if (vals.containsKey("nodeRole")) {
updateRoles.set(true);
@ -272,7 +272,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
for (String tag : tags) {
Matcher m = METRIC_KEY_PATTERN.matcher(tag);
if (!m.matches() || m.groupCount() < 2) {
log.warn("Invalid metrics: tag: " + tag);
log.warn("Invalid metrics: tag: {}", tag);
continue;
}
String registryName = m.group(1);
@ -284,7 +284,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
m = REGISTRY_PATTERN.matcher(registryName);
if (!m.matches()) {
log.warn("Invalid registry name: " + registryName);
log.warn("Invalid registry name: {}", registryName);
continue;
}
String collection = m.group(1);
@ -312,7 +312,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
@Override
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
log.trace("-- requested values for " + node + ": " + tags);
log.trace("-- requested values for {}: {}", node, tags);
if (!liveNodesSet.contains(node)) {
throw new RuntimeException("non-live node " + node);
}

View File

@ -298,17 +298,19 @@ public class SimScenario implements AutoCloseable {
iterations = Integer.parseInt(params.get("iterations", "10"));
for (int i = 0; i < iterations; i++) {
if (scenario.abortLoop) {
log.info(" -- abortLoop requested, aborting after " + i + " iterations.");
log.info(" -- abortLoop requested, aborting after {} iterations.", i);
return;
}
scenario.context.put(LOOP_ITER_PROP, String.valueOf(i));
log.info(" * iter " + (i + 1) + ":");
log.info(" * iter {} :", i + 1); // logOK
for (SimOp op : ops) {
op.prepareCurrentParams(scenario);
log.info(" - " + op.getClass().getSimpleName() + "\t" + op.params.toString());
if (log.isInfoEnabled()) {
log.info(" - {}\t{})", op.getClass().getSimpleName(), op.params);
}
op.execute(scenario);
if (scenario.abortLoop) {
log.info(" -- abortLoop requested, aborting after " + i + " iterations.");
log.info(" -- abortLoop requested, aborting after {} iterations.", i);
return;
}
}
@ -522,7 +524,9 @@ public class SimScenario implements AutoCloseable {
public void execute(SimScenario scenario) throws Exception {
List<Suggester.SuggestionInfo> suggestions = PolicyHelper.getSuggestions(scenario.config, scenario.cluster);
scenario.context.put(SUGGESTIONS_CTX_PROP, suggestions);
log.info(" - " + suggestions.size() + " suggestions");
if (log.isInfoEnabled()) {
log.info(" - {} suggestions", suggestions.size());
}
if (suggestions.isEmpty()) {
scenario.abortLoop = true;
}
@ -542,7 +546,7 @@ public class SimScenario implements AutoCloseable {
if (operation == null) {
unresolvedCount++;
if (suggestion.getViolation() == null) {
log.error(" -- ignoring suggestion without violation and without operation: " + suggestion);
log.error(" -- ignoring suggestion without violation and without operation: {}", suggestion);
}
continue;
}
@ -555,7 +559,7 @@ public class SimScenario implements AutoCloseable {
ReplicaInfo info = scenario.cluster.getSimClusterStateProvider().simGetReplicaInfo(
params.get(CollectionAdminParams.COLLECTION), params.get("replica"));
if (info == null) {
log.error("Could not find ReplicaInfo for params: " + params);
log.error("Could not find ReplicaInfo for params: {}", params);
} else if (scenario.verbose) {
paramsMap.put("replicaInfo", info);
} else if (info.getVariable(Variable.Type.CORE_IDX.tagName) != null) {
@ -564,12 +568,12 @@ public class SimScenario implements AutoCloseable {
try {
scenario.cluster.request(operation);
} catch (Exception e) {
log.error("Aborting - error executing suggestion " + suggestion, e);
log.error("Aborting - error executing suggestion {}", suggestion, e);
break;
}
}
if (suggestions.size() > 0 && unresolvedCount == suggestions.size()) {
log.info(" -- aborting simulation, only " + unresolvedCount + " unresolved violations remain");
log.info(" -- aborting simulation, only {} unresolved violations remain.", unresolvedCount);
scenario.abortLoop = true;
}
}
@ -611,7 +615,7 @@ public class SimScenario implements AutoCloseable {
String key = e.getKey();
CollectionParams.CollectionAction a = CollectionParams.CollectionAction.get(key);
if (a == null) {
log.warn("Invalid collection action " + key + ", skipping...");
log.warn("Invalid collection action {}, skipping...", key);
return;
}
String[] values = e.getValue();
@ -998,12 +1002,12 @@ public class SimScenario implements AutoCloseable {
// split on blank
String[] parts = expr.split("\\s+");
if (parts.length > 2) {
log.warn("Invalid line - wrong number of parts " + parts.length + ", skipping: " + line);
log.warn("Invalid line - wrong number of parts {}, skipping: {}", parts.length, line);
continue;
}
SimAction action = SimAction.get(parts[0]);
if (action == null) {
log.warn("Invalid scenario action " + parts[0] + ", skipping...");
log.warn("Invalid scenario action {}, skipping...", parts[0]);
continue;
}
if (action == SimAction.LOOP_END) {
@ -1076,11 +1080,13 @@ public class SimScenario implements AutoCloseable {
public void run() throws Exception {
for (int i = 0; i < ops.size(); i++) {
if (abortScenario) {
log.info("-- abortScenario requested, aborting after " + i + " ops.");
log.info("-- abortScenario requested, aborting after {} ops.", i);
return;
}
SimOp op = ops.get(i);
log.info((i + 1) + ".\t" + op.getClass().getSimpleName() + "\t" + op.initParams.toString());
if (log.isInfoEnabled()) {
log.info("{}.\t{}\t{}", i + 1, op.getClass().getSimpleName(), op.initParams); // logOk
}
// substitute parameters based on the current context
if (cluster != null && cluster.getLiveNodesSet().size() > 0) {
context.put(LIVE_NODES_CTX_PROP, new ArrayList<>(cluster.getLiveNodesSet().get()));
@ -1095,7 +1101,9 @@ public class SimScenario implements AutoCloseable {
context.remove(OVERSEER_LEADER_CTX_PROP);
}
op.prepareCurrentParams(this);
log.info("\t\t" + op.getClass().getSimpleName() + "\t" + op.params.toString());
if (log.isInfoEnabled()) {
log.info("\t\t{}\t{}", op.getClass().getSimpleName(), op.params);
}
op.execute(this);
}
}

View File

@ -339,7 +339,7 @@ public class SimUtils {
}
v2v1Mapping.put(key, meta.action.toLower());
} else {
log.warn("V2 action " + meta + " has no equivalent V1 action");
log.warn("V2 action {} has no equivalent V1 action", meta);
}
}
}
@ -422,7 +422,7 @@ public class SimUtils {
// node name format
ctx.addEquivalentName(hostPort, u.getHost() + "_" + u.getPort() + "_", RedactionUtils.NODE_REDACTION_PREFIX);
} catch (MalformedURLException e) {
log.warn("Invalid URL for node name " + nodeName + ", replacing including protocol and path", e);
log.warn("Invalid URL for node name {}, replacing including protocol and path", nodeName, e);
ctx.addName(urlString, RedactionUtils.NODE_REDACTION_PREFIX);
ctx.addEquivalentName(urlString, Utils.getBaseUrlForNodeName(nodeName, "https"), RedactionUtils.NODE_REDACTION_PREFIX);
}

View File

@ -194,7 +194,7 @@ public class SnapshotCloudManager implements SolrCloudManager {
ReplicaInfo info = nodeStateProvider.getReplicaInfo(
params.get(CollectionAdminParams.COLLECTION), params.get("replica"));
if (info == null) {
log.warn("Can't find ReplicaInfo for suggested operation: " + s);
log.warn("Can't find ReplicaInfo for suggested operation: {}", s);
} else {
map.put("replica", info);
}

View File

@ -63,7 +63,9 @@ public class SnapshotDistribStateManager implements DistribStateManager {
*/
public SnapshotDistribStateManager(DistribStateManager other, AutoScalingConfig config) throws Exception {
List<String> tree = other.listTree("/");
log.debug("- copying {} resources from {}", tree.size(), other.getClass().getSimpleName());
if (log.isDebugEnabled()) {
log.debug("- copying {} resources from {}", tree.size(), other.getClass().getSimpleName());
}
for (String path : tree) {
dataMap.put(path, other.getData(path));
}
@ -102,7 +104,9 @@ public class SnapshotDistribStateManager implements DistribStateManager {
VersionedData vd = new VersionedData(config.getZkVersion(), Utils.toJSON(config), CreateMode.PERSISTENT, "0");
dataMap.put(ZkStateReader.SOLR_AUTOSCALING_CONF_PATH, vd);
}
log.debug("- loaded snapshot of {} resources", dataMap.size());
if (log.isDebugEnabled()) {
log.debug("- loaded snapshot of {} resources", dataMap.size());
}
}
// content of these nodes is a UTF-8 String and it needs to be redacted

View File

@ -132,7 +132,7 @@ public final class CommitTracker implements Runnable {
if (pending != null && pending.getDelay(TimeUnit.MILLISECONDS) <= commitMaxTime) {
// There is already a pending commit that will happen first, so
// nothing else to do here.
// log.info("###returning since getDelay()==" + pending.getDelay(TimeUnit.MILLISECONDS) + " less than " + commitMaxTime);
// log.info("###returning since getDelay()=={} less than {}", pending.getDelay(TimeUnit.MILLISECONDS), commitMaxTime);
return;
}