LUCENE-7788: fail precommit on unparameterised log messages and examine for wasted work/objects

This commit is contained in:
Erick Erickson 2020-04-19 15:58:50 -04:00
parent f5d91395db
commit f01c040ab3
28 changed files with 234 additions and 159 deletions

View File

@ -66,7 +66,7 @@ class ValidateLogCallsTask extends DefaultTask {
, "solr/core/src/java/org/apache/solr/internal"
, "solr/core/src/java/org/apache/solr/legacy"
, "solr/core/src/java/org/apache/solr/logging"
// , "solr/core/src/java/org/apache/solr/metrics"
, "solr/core/src/java/org/apache/solr/metrics"
, "solr/core/src/java/org/apache/solr/packagemanager"
, "solr/core/src/java/org/apache/solr/parser"
, "solr/core/src/java/org/apache/solr/pkg"
@ -74,7 +74,7 @@ class ValidateLogCallsTask extends DefaultTask {
, "solr/core/src/java/org/apache/solr/request"
, "solr/core/src/java/org/apache/solr/response"
, "solr/core/src/java/org/apache/solr/rest"
// , "solr/core/src/java/org/apache/solr/schema"
, "solr/core/src/java/org/apache/solr/schema"
, "solr/core/src/java/org/apache/solr/search"
, "solr/core/src/java/org/apache/solr/security"
, "solr/core/src/java/org/apache/solr/servlet"

View File

@ -205,7 +205,7 @@ public class MetricSuppliers {
}
return reservoir;
} catch (Exception e) {
log.warn("Error initializing custom Reservoir implementation (will use default): " + info, e);
log.warn("Error initializing custom Reservoir implementation (will use default): {}", info, e);
return new ExponentiallyDecayingReservoir(size, alpha, clk);
}
}
@ -285,7 +285,7 @@ public class MetricSuppliers {
try {
supplier = loader.newInstance(info.className, MetricRegistry.MetricSupplier.class);
} catch (Exception e) {
log.warn("Error creating custom Counter supplier (will use default): " + info, e);
log.warn("Error creating custom Counter supplier (will use default): {}", info, e);
supplier = new DefaultCounterSupplier();
}
if (supplier instanceof PluginInfoInitialized) {
@ -310,7 +310,7 @@ public class MetricSuppliers {
try {
supplier = loader.newInstance(info.className, MetricRegistry.MetricSupplier.class);
} catch (Exception e) {
log.warn("Error creating custom Meter supplier (will use default): " + info, e);
log.warn("Error creating custom Meter supplier (will use default): {}",info, e);
supplier = new DefaultMeterSupplier();
}
}
@ -336,7 +336,7 @@ public class MetricSuppliers {
try {
supplier = loader.newInstance(info.className, MetricRegistry.MetricSupplier.class);
} catch (Exception e) {
log.warn("Error creating custom Timer supplier (will use default): " + info, e);
log.warn("Error creating custom Timer supplier (will use default): {}", info, e);
supplier = new DefaultTimerSupplier(loader);
}
}
@ -361,7 +361,7 @@ public class MetricSuppliers {
try {
supplier = loader.newInstance(info.className, MetricRegistry.MetricSupplier.class);
} catch (Exception e) {
log.warn("Error creating custom Histogram supplier (will use default): " + info, e);
log.warn("Error creating custom Histogram supplier (will use default): {}", info, e);
supplier = new DefaultHistogramSupplier(loader);
}
}

View File

@ -127,7 +127,7 @@ public class MetricsMap implements Gauge<Map<String,Object>>, DynamicMBean {
try {
list.add(new Attribute(attribute, getAttribute(attribute)));
} catch (Exception e) {
log.warn("Could not get attribute " + attribute);
log.warn("Could not get attribute {}", attribute);
}
}
return list;

View File

@ -909,7 +909,7 @@ public class SolrMetricManager {
try {
loadReporter(registryName, loader, coreContainer, solrCore, info, tag);
} catch (Exception e) {
log.warn("Error loading metrics reporter, plugin info: " + info, e);
log.warn("Error loading metrics reporter, plugin info: {}", info, e);
}
}
}
@ -1008,7 +1008,7 @@ public class SolrMetricManager {
}
SolrMetricReporter oldReporter = perRegistry.get(name);
if (oldReporter != null) { // close it
log.info("Replacing existing reporter '" + name + "' in registry '" + registry + "': " + oldReporter.toString());
log.info("Replacing existing reporter '{}' in registry'{}': {}", name, registry, oldReporter);
oldReporter.close();
}
perRegistry.put(name, reporter);
@ -1032,11 +1032,11 @@ public class SolrMetricManager {
registry = enforcePrefix(registry);
try {
if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
log.warn("Could not obtain lock to modify reporters registry: " + registry);
log.warn("Could not obtain lock to modify reporters registry: {}", registry);
return false;
}
} catch (InterruptedException e) {
log.warn("Interrupted while trying to obtain lock to modify reporters registry: " + registry);
log.warn("Interrupted while trying to obtain lock to modify reporters registry: {}", registry);
return false;
}
try {
@ -1054,7 +1054,7 @@ public class SolrMetricManager {
try {
reporter.close();
} catch (Exception e) {
log.warn("Error closing metric reporter, registry=" + registry + ", name=" + name, e);
log.warn("Error closing metric reporter, registry={}, name={}", registry, name, e);
}
return true;
} finally {
@ -1085,14 +1085,14 @@ public class SolrMetricManager {
registry = enforcePrefix(registry);
try {
if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
log.warn("Could not obtain lock to modify reporters registry: " + registry);
log.warn("Could not obtain lock to modify reporters registry: {}", registry);
return Collections.emptySet();
}
} catch (InterruptedException e) {
log.warn("Interrupted while trying to obtain lock to modify reporters registry: " + registry);
log.warn("Interrupted while trying to obtain lock to modify reporters registry: {}", registry);
return Collections.emptySet();
}
log.info("Closing metric reporters for registry=" + registry + ", tag=" + tag);
log.info("Closing metric reporters for registry={} tag={}", registry, tag);
try {
Map<String, SolrMetricReporter> perRegistry = reporters.get(registry);
if (perRegistry != null) {
@ -1106,7 +1106,7 @@ public class SolrMetricManager {
try {
reporter.close();
} catch (IOException ioe) {
log.warn("Exception closing reporter " + reporter, ioe);
log.warn("Exception closing reporter {}", reporter, ioe);
}
removed.add(name);
});
@ -1133,11 +1133,11 @@ public class SolrMetricManager {
registry = enforcePrefix(registry);
try {
if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
log.warn("Could not obtain lock to modify reporters registry: " + registry);
log.warn("Could not obtain lock to modify reporters registry: {}", registry);
return Collections.emptyMap();
}
} catch (InterruptedException e) {
log.warn("Interrupted while trying to obtain lock to modify reporters registry: " + registry);
log.warn("Interrupted while trying to obtain lock to modify reporters registry: {}", registry);
return Collections.emptyMap();
}
try {
@ -1219,7 +1219,7 @@ public class SolrMetricManager {
try {
loadReporter(registryName, core, info, core.getMetricTag());
} catch (Exception e) {
log.warn("Could not load shard reporter, pluginInfo=" + info, e);
log.warn("Could not load shard reporter, pluginInfo={}", info, e);
}
}
}
@ -1241,7 +1241,7 @@ public class SolrMetricManager {
try {
loadReporter(registryName, cc, info);
} catch (Exception e) {
log.warn("Could not load cluster reporter, pluginInfo=" + info, e);
log.warn("Could not load cluster reporter, pluginInfo={}", info, e);
}
}
}

View File

@ -63,10 +63,10 @@ public abstract class SolrMetricReporter implements Closeable, PluginInfoInitial
}
validate();
if (!enabled) {
log.info("Reporter disabled for registry " + registryName);
log.info("Reporter disabled for registry {}", registryName);
return;
}
log.debug("Initializing for registry " + registryName);
log.debug("Initializing for registry {}", registryName);
doInit();
}

View File

@ -59,7 +59,7 @@ public class ReporterClientCache<T> implements Closeable {
item = clientProvider.get();
cache.put(id, item);
} catch (Exception e) {
log.warn("Error providing a new client for id=" + id, e);
log.warn("Error providing a new client for id={}", id, e);
item = null;
}
}
@ -75,7 +75,7 @@ public class ReporterClientCache<T> implements Closeable {
try {
((Closeable)client).close();
} catch (Exception e) {
log.warn("Error closing client " + client + ", ignoring...", e);
log.warn("Error closing client {}, ignoring...", client, e);
}
}
}

View File

@ -79,7 +79,7 @@ public class SolrJmxReporter extends FilteringSolrMetricReporter {
mBeanServer = JmxUtil.findMBeanServerForAgentId(agentId);
} else {
mBeanServer = JmxUtil.findFirstMBeanServer();
log.debug("No serviceUrl or agentId was configured, using first MBeanServer: " + mBeanServer);
log.debug("No serviceUrl or agentId was configured, using first MBeanServer: {}", mBeanServer);
}
if (mBeanServer == null) {
@ -108,7 +108,7 @@ public class SolrJmxReporter extends FilteringSolrMetricReporter {
.build();
reporter.start();
started = true;
log.info("JMX monitoring for '" + fullDomain + "' (registry '" + registryName + "') enabled at server: " + mBeanServer);
log.info("JMX monitoring for '{}' (registry '{}') enabled at server: {}", fullDomain, registryName, mBeanServer);
}
/**
@ -123,7 +123,7 @@ public class SolrJmxReporter extends FilteringSolrMetricReporter {
*/
@Override
public synchronized void close() {
log.info("Closing reporter " + this + " for registry " + registryName + " / " + registry);
log.info("Closing reporter {} for registry {}/{}", this, registryName, registry);
started = false;
if (reporter != null) {
reporter.close();

View File

@ -522,9 +522,13 @@ public class JmxMetricsReporter implements Reporter, Closeable {
if (mBeanServer.isRegistered(objectName)) {
if (log.isDebugEnabled()) {
Set<ObjectInstance> objects = mBeanServer.queryMBeans(objectName, null);
log.debug("## removing existing " + objects.size() + " bean(s) for " + objectName.getCanonicalName() + ", current tag=" + tag + ":");
if (log.isDebugEnabled()) {
log.debug("## removing existing {} bean(s) for {}, current tag={}:", objects.size(), objectName.getCanonicalName(), tag);
}
for (ObjectInstance inst : objects) {
log.debug("## - tag=" + mBeanServer.getAttribute(inst.getObjectName(), INSTANCE_TAG));
if (log.isDebugEnabled()) {
log.debug("## - tag={}{}", mBeanServer.getAttribute(inst.getObjectName(), INSTANCE_TAG));
}
}
}
mBeanServer.unregisterMBean(objectName);
@ -538,7 +542,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
} else {
registered.put(objectName, objectName);
}
log.debug("## registered " + objectInstance.getObjectName().getCanonicalName() + ", tag=" + tag);
if (log.isDebugEnabled()) {
log.debug("## registered {}, tag={}", objectInstance.getObjectName().getCanonicalName(), tag);
}
}
private void unregisterMBean(ObjectName originalObjectName) throws InstanceNotFoundException, MBeanRegistrationException {
@ -548,7 +554,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
}
Set<ObjectInstance> objects = mBeanServer.queryMBeans(objectName, exp);
for (ObjectInstance o : objects) {
log.debug("## Unregistered " + o.getObjectName().getCanonicalName() + ", tag=" + tag);
if (log.isDebugEnabled()) {
log.debug("## Unregistered {}, tag={}", o.getObjectName().getCanonicalName(), tag);
}
mBeanServer.unregisterMBean(o.getObjectName());
}
}
@ -740,7 +748,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
} else if (v instanceof Gauge) {
listener.onGaugeAdded(k, (Gauge)v);
} else {
log.warn("Unknown metric type " + v.getClass().getName() + " for metric '" + k + "', ignoring");
if (log.isWarnEnabled()) {
log.warn("Unknown metric type {} for metric '{}', ignoring", v.getClass().getName(), k);
}
}
});
}

View File

@ -196,7 +196,7 @@ public class SolrClusterReporter extends SolrCoreContainerReporter {
reporter.close();;
}
if (!enabled) {
log.info("Reporter disabled for registry " + registryName);
log.info("Reporter disabled for registry {}", registryName);
return;
}
// start reporter only in cloud mode
@ -205,7 +205,7 @@ public class SolrClusterReporter extends SolrCoreContainerReporter {
return;
}
if (period < 1) { // don't start it
log.info("Turning off node reporter, period=" + period);
log.info("Turning off node reporter, period={}", period);
return;
}
HttpClient httpClient = cc.getUpdateShardHandler().getDefaultHttpClient();
@ -275,7 +275,7 @@ public class SolrClusterReporter extends SolrCoreContainerReporter {
try {
nodeName = LeaderElector.getNodeName(oid);
} catch (Exception e) {
log.warn("Unknown format of leader id, skipping: " + oid, e);
log.warn("Unknown format of leader id, skipping: {}", oid, e);
return lastKnownUrl;
}
// convert nodeName back to URL

View File

@ -106,11 +106,11 @@ public class SolrReporter extends ScheduledReporter {
} else if (oFilters instanceof Collection) {
metricFilters = (Collection<String>)oFilters;
} else {
log.warn("Invalid report filters, ignoring: " + oFilters);
log.warn("Invalid report filters, ignoring: {}", oFilters);
}
}
if (groupPattern == null || registryPattern == null) {
log.warn("Invalid report configuration, group and registry required!: " + map);
log.warn("Invalid report configuration, group and registry required!: {}", map);
return null;
}
return new Report(groupPattern, labelPattern, registryPattern, metricFilters);
@ -328,7 +328,7 @@ public class SolrReporter extends ScheduledReporter {
CompiledReport cs = new CompiledReport(report);
compiledReports.add(cs);
} catch (PatternSyntaxException e) {
log.warn("Skipping report with invalid registryPattern: " + report.registryPattern, e);
log.warn("Skipping report with invalid registryPattern: {}", report.registryPattern, e);
}
});
this.skipHistograms = skipHistograms;
@ -404,7 +404,9 @@ public class SolrReporter extends ScheduledReporter {
//log.info("%%% sending to " + url + ": " + req.getParams());
solr.request(req);
} catch (Exception e) {
log.debug("Error sending metric report: {}", e.toString());
if (log.isDebugEnabled()) {
log.debug("Error sending metric report: {}", e.toString());
}
}
}

View File

@ -124,16 +124,18 @@ public class SolrShardReporter extends SolrCoreReporter {
reporter.close();
}
if (!enabled) {
log.info("Reporter disabled for registry " + registryName);
log.info("Reporter disabled for registry {}", registryName);
return;
}
if (core.getCoreDescriptor().getCloudDescriptor() == null) {
// not a cloud core
log.warn("Not initializing shard reporter for non-cloud core " + core.getName());
if (log.isWarnEnabled()) {
log.warn("Not initializing shard reporter for non-cloud core {}", core.getName());
}
return;
}
if (period < 1) { // don't start it
log.warn("period=" + period + ", not starting shard reporter ");
log.warn("period={}, not starting shard reporter ", period);
return;
}
// our id is coreNodeName
@ -141,7 +143,7 @@ public class SolrShardReporter extends SolrCoreReporter {
// target registry is the leaderRegistryName
String groupId = core.getCoreMetricManager().getLeaderRegistryName();
if (groupId == null) {
log.warn("No leaderRegistryName for core " + core + ", not starting the reporter...");
log.warn("No leaderRegistryName for core {}, not starting the reporter...", core);
return;
}
SolrReporter.Report spec = new SolrReporter.Report(groupId, null, registryName, filters);
@ -176,12 +178,14 @@ public class SolrShardReporter extends SolrCoreReporter {
DocCollection collection = state.getCollection(core.getCoreDescriptor().getCollectionName());
Replica replica = collection.getLeader(core.getCoreDescriptor().getCloudDescriptor().getShardId());
if (replica == null) {
log.warn("No leader for " + collection.getName() + "/" + core.getCoreDescriptor().getCloudDescriptor().getShardId());
if (log.isWarnEnabled()) {
log.warn("No leader for {}/{}", collection.getName(), core.getCoreDescriptor().getCloudDescriptor().getShardId());
}
return null;
}
String baseUrl = replica.getStr("base_url");
if (baseUrl == null) {
log.warn("No base_url for replica " + replica);
log.warn("No base_url for replica {}", replica);
}
return baseUrl;
}

View File

@ -60,7 +60,7 @@ public class SolrRrdBackend extends RrdByteArrayBackend implements Closeable {
this.lastModifiedTime = syncData.timestamp;
}
} catch (IOException e) {
log.warn("Exception retrieving data from " + path + ", store will be readOnly", e);
log.warn("Exception retrieving data from {}, store will be readOnly", path, e);
readOnly = true;
}
this.readOnly = readOnly;

View File

@ -101,7 +101,9 @@ public class SolrRrdBackendFactory extends RrdBackendFactory implements SolrClos
this.timeSource = timeSource;
this.collection = collection;
this.syncPeriod = syncPeriod;
log.debug("Created " + hashCode());
if (log.isDebugEnabled()) {
log.debug("Created {}", hashCode());
}
this.idPrefixLength = ID_PREFIX.length() + ID_SEP.length();
syncService = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(2,
new SolrNamedThreadFactory("SolrRrdBackendFactory"));
@ -333,7 +335,7 @@ public class SolrRrdBackendFactory extends RrdBackendFactory implements SolrClos
try {
solrClient.deleteByQuery(collection, "{!term f=id}" + ID_PREFIX + ID_SEP + path);
} catch (SolrServerException | SolrException e) {
log.warn("Error deleting RRD for path " + path, e);
log.warn("Error deleting RRD for path {}", path, e);
}
}
@ -347,7 +349,9 @@ public class SolrRrdBackendFactory extends RrdBackendFactory implements SolrClos
if (Thread.interrupted()) {
return;
}
log.debug("-- maybe sync backends: " + backends.keySet());
if (log.isDebugEnabled()) {
log.debug("-- maybe sync backends: {}", backends.keySet());
}
Map<String, SolrRrdBackend.SyncData> syncDatas = new HashMap<>();
backends.forEach((path, backend) -> {
SolrRrdBackend.SyncData syncData = backend.getSyncDataAndMarkClean();
@ -358,7 +362,9 @@ public class SolrRrdBackendFactory extends RrdBackendFactory implements SolrClos
if (syncDatas.isEmpty()) {
return;
}
log.debug("-- syncing " + syncDatas.keySet());
if (log.isDebugEnabled()) {
log.debug("-- syncing {}", syncDatas.keySet());
}
// write updates
try {
syncDatas.forEach((path, syncData) -> {
@ -370,7 +376,7 @@ public class SolrRrdBackendFactory extends RrdBackendFactory implements SolrClos
try {
solrClient.add(collection, doc);
} catch (SolrServerException | IOException e) {
log.warn("Error updating RRD data for " + path, e);
log.warn("Error updating RRD data for {}", path, e);
}
});
if (Thread.interrupted()) {
@ -449,7 +455,9 @@ public class SolrRrdBackendFactory extends RrdBackendFactory implements SolrClos
if (closed) {
return;
}
log.debug("Closing " + hashCode());
if (log.isDebugEnabled()) {
log.debug("Closing {}", hashCode());
}
closed = true;
backends.forEach((p, b) -> IOUtils.closeQuietly(b));
backends.clear();

View File

@ -138,7 +138,7 @@ public abstract class AbstractSpatialFieldType<T extends SpatialStrategy> extend
}
// Warn about using old Spatial4j class names
if (argEntry.getValue().contains(OLD_SPATIAL4J_PREFIX)) {
log.warn("Replace '" + OLD_SPATIAL4J_PREFIX + "' with '" + NEW_SPATIAL4J_PREFIX + "' in your schema.");
log.warn("Replace '{}' with '{}' in your schema", OLD_SPATIAL4J_PREFIX, NEW_SPATIAL4J_PREFIX);
argEntry.setValue(argEntry.getValue().replace(OLD_SPATIAL4J_PREFIX, NEW_SPATIAL4J_PREFIX));
}
}

View File

@ -109,7 +109,9 @@ public abstract class AbstractSpatialPrefixTreeFieldType<T extends PrefixTreeStr
if (defaultFieldValuesArrayLen != null)
strat.setDefaultFieldValuesArrayLen(defaultFieldValuesArrayLen);
log.info(this.toString()+" strat: "+strat+" maxLevels: "+ grid.getMaxLevels());//TODO output maxDetailKm
if (log.isInfoEnabled()) {
log.info("{} strat: {} maxLevels: {}", this, strat, grid.getMaxLevels());//TODO output maxDetailKm
}
return strat;
}

View File

@ -82,7 +82,7 @@ public class BinaryField extends FieldType {
public IndexableField createField(SchemaField field, Object val) {
if (val == null) return null;
if (!field.stored()) {
log.trace("Ignoring unstored binary field: " + field);
log.trace("Ignoring unstored binary field: {}", field);
return null;
}
byte[] buf = null;

View File

@ -85,7 +85,9 @@ public class ExternalFileFieldReloader extends AbstractSolrEventListener {
if (type instanceof ExternalFileField) {
ExternalFileField eff = (ExternalFileField)type;
fieldSources.add(eff.getFileFloatSource(field, datadir));
log.info("Adding ExternalFileFieldReloader listener for field {}", field.getName());
if (log.isInfoEnabled()) {
log.info("Adding ExternalFileFieldReloader listener for field {}", field.getName());
}
}
}
}

View File

@ -165,7 +165,7 @@ public final class FieldTypePluginLoader
protected FieldType register(String name,
FieldType plugin) throws Exception {
log.trace("fieldtype defined: " + plugin );
log.trace("fieldtype defined: {}", plugin);
return fieldTypes.put( name, plugin );
}
@ -241,7 +241,7 @@ public final class FieldTypePluginLoader
analyzer.setVersion(luceneMatchVersion);
return analyzer;
} catch (Exception e) {
log.error("Cannot load analyzer: "+analyzerName, e);
log.error("Cannot load analyzer: {}", analyzerName, e);
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
"Cannot load analyzer: "+analyzerName, e );
}
@ -264,7 +264,7 @@ public final class FieldTypePluginLoader
if (Objects.nonNull(name)) {
factory = CharFilterFactory.forName(name, params);
if (Objects.nonNull(className)) {
log.error("Both of name: " + name + " and className: " + className + " are specified for charFilter.");
log.error("Both of name: {} and className: {} are specified for charFilter.", name, className);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Cannot create charFilter: Both of name and className are specified.");
}
@ -314,7 +314,7 @@ public final class FieldTypePluginLoader
if (Objects.nonNull(name)) {
factory = TokenizerFactory.forName(name, params);
if (Objects.nonNull(className)) {
log.error("Both of name: " + name + " and className: " + className + " are specified for tokenizer.");
log.error("Both of name: {} and className: {} are specified for tokenizer.", name, className);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Cannot create tokenizer: Both of name and className are specified.");
}
@ -368,7 +368,7 @@ public final class FieldTypePluginLoader
if (Objects.nonNull(name)) {
factory = TokenFilterFactory.forName(name, params);
if (Objects.nonNull(className)) {
log.error("Both of name: " + name + " and className: " + className + " are specified for tokenFilter.");
log.error("Both of name: {} and className: {} are specified for tokenFilter.", name, className);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Cannot create tokenFilter: Both of name and className are specified.");
}
@ -406,9 +406,12 @@ public final class FieldTypePluginLoader
SolrConfig.parseLuceneVersionString(configuredVersion) : schema.getDefaultLuceneMatchVersion();
if (!version.onOrAfter(Version.LUCENE_8_0_0)) {
log.warn(pluginClassName + " is using deprecated " + version +
" emulation. You should at some point declare and reindex to at least 8.0, because " +
"7.x emulation is deprecated and will be removed in 9.0");
if (log.isWarnEnabled()) {
log.warn("{} is using deprecated {}" +
" emulation. You should at some point declare and reindex to at least 8.0, because " +
"7.x emulation is deprecated and will be removed in 9.0"
, pluginClassName, version);
}
}
return version;
}

View File

@ -480,14 +480,14 @@ public class IndexSchema {
// Another case where the initialization from the test harness is different than the "real world"
if (nd==null) {
sb.append("schema has no name!");
log.warn(sb.toString());
log.warn("{}", sb);
} else {
name = nd.getNodeValue();
sb.append("Schema ");
sb.append(NAME);
sb.append("=");
sb.append(name);
log.info(sb.toString());
log.info("{}", sb);
}
// /schema/@version
@ -546,7 +546,7 @@ public class IndexSchema {
expression = stepsToPath(SCHEMA, UNIQUE_KEY, TEXT_FUNCTION);
node = (Node) xpath.evaluate(expression, document, XPathConstants.NODE);
if (node==null) {
log.warn("no " + UNIQUE_KEY + " specified in schema.");
log.warn("no {} specified in schema.", UNIQUE_KEY);
} else {
uniqueKeyField=getIndexedField(node.getNodeValue().trim());
uniqueKeyFieldName=uniqueKeyField.getName();
@ -572,7 +572,7 @@ public class IndexSchema {
}
if (!uniqueKeyField.stored()) {
log.warn(UNIQUE_KEY + " is not stored - distributed search and MoreLikeThis will not work");
log.warn("{} is not stored - distributed search and MoreLikeThis will not work", UNIQUE_KEY);
}
if (uniqueKeyField.multiValued()) {
String msg = UNIQUE_KEY + " field ("+uniqueKeyFieldName+
@ -651,7 +651,7 @@ public class IndexSchema {
NamedNodeMap attrs = node.getAttributes();
String name = DOMUtil.getAttr(attrs, NAME, "field definition");
log.trace("reading field def "+name);
log.trace("reading field def {}", name);
String type = DOMUtil.getAttr(attrs, TYPE, "field " + name);
FieldType ft = fieldTypes.get(type);
@ -674,13 +674,15 @@ public class IndexSchema {
+ f.getName() + "' [[["+old.toString()+"]]] and [[["+f.toString()+"]]]";
throw new SolrException(ErrorCode.SERVER_ERROR, msg );
}
log.debug("field defined: " + f);
log.debug("field defined: {}", f);
if( f.getDefaultValue() != null ) {
log.debug(name+" contains default value: " + f.getDefaultValue());
if (log.isDebugEnabled()) {
log.debug("{} contains default value {}", name, f.getDefaultValue());
}
fieldsWithDefaultValue.add( f );
}
if (f.isRequired()) {
log.debug(name+" is required in this schema");
log.debug("{} is required in this schema", name);
requiredFields.add(f);
}
} else if (node.getNodeName().equals(DYNAMIC_FIELD)) {
@ -713,7 +715,9 @@ public class IndexSchema {
DynamicField[] dFields = dynamicFieldList.toArray(new DynamicField[dynamicFieldList.size()]);
Arrays.sort(dFields);
log.trace("Dynamic Field Ordering:" + Arrays.toString(dFields));
if (log.isTraceEnabled()) {
log.trace("Dynamic Field Ordering: {}", Arrays.toString(dFields));
}
return dFields;
}
@ -738,8 +742,8 @@ public class IndexSchema {
try {
maxCharsInt = Integer.parseInt(maxChars);
} catch (NumberFormatException e) {
log.warn("Couldn't parse " + MAX_CHARS + " attribute for " + COPY_FIELD + " from "
+ source + " to " + dest + " as integer. The whole field will be copied.");
log.warn("Couldn't parse {} attribute for '{}' from '{}' to '{}' as integer. The whole field will be copied."
, MAX_CHARS, COPY_FIELD, source, dest);
}
}
@ -755,9 +759,10 @@ public class IndexSchema {
for (Map.Entry<SchemaField, Integer> entry : copyFieldTargetCounts.entrySet()) {
if (entry.getValue() > 1 && !entry.getKey().multiValued()) {
log.warn("Field " + entry.getKey().name + " is not multivalued "+
"and destination for multiple " + COPY_FIELDS + " ("+
entry.getValue()+")");
if (log.isWarnEnabled()) {
log.warn("Field {} is not multivalued and destination for multiople {} ({})"
, entry.getKey().name, COPY_FIELDS, entry.getValue());
}
}
}
}
@ -814,9 +819,13 @@ public class IndexSchema {
List<DynamicField> dynFields = new ArrayList<>(asList(dynamicFields));
for (SchemaField field : fields) {
if (isDuplicateDynField(dynFields, field)) {
log.debug("dynamic field already exists: dynamic field: [" + field.getName() + "]");
if (log.isDebugEnabled()) {
log.debug("dynamic field already exists: dynamic field: [{}]", field.getName());
}
} else {
log.debug("dynamic field creation for schema field: " + field.getName());
if (log.isDebugEnabled()) {
log.debug("dynamic field creation for schema field: {}", field.getName());
}
addDynamicFieldNoDupCheck(dynFields, field);
}
}
@ -825,7 +834,7 @@ public class IndexSchema {
private void addDynamicFieldNoDupCheck(List<DynamicField> dFields, SchemaField f) {
dFields.add(new DynamicField(f));
log.debug("dynamic field defined: " + f);
log.debug("dynamic field defined: {}", f);
}
protected boolean isDuplicateDynField(List<DynamicField> dFields, SchemaField f) {
@ -849,8 +858,8 @@ public class IndexSchema {
* @see SolrCoreAware
*/
public void registerCopyField(String source, String dest, int maxChars) {
log.debug(COPY_FIELD + " " + SOURCE + "='" + source + "' " + DESTINATION + "='" + dest
+ "' " + MAX_CHARS + "=" + maxChars);
log.debug("{} {}='{}' {}='{}' {}='{}'", COPY_FIELD, SOURCE, source, DESTINATION, dest
,MAX_CHARS, maxChars);
DynamicField destDynamicField = null;
SchemaField destSchemaField = fields.get(dest);

View File

@ -132,7 +132,7 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
try {
tokenStart = Integer.parseInt(String.valueOf(obj));
} catch (NumberFormatException nfe) {
log.warn("Invalid " + OFFSET_START_KEY + " attribute, skipped: '" + obj + "'");
log.warn("Invalid {} attribute, skipped: '{}'", OFFSET_START_KEY, obj);
hasOffsetStart = false;
}
}
@ -145,7 +145,7 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
try {
tokenEnd = Integer.parseInt(String.valueOf(obj));
} catch (NumberFormatException nfe) {
log.warn("Invalid " + OFFSET_END_KEY + " attribute, skipped: '" + obj + "'");
log.warn("Invalid {} attribute, skipped: '{}'", OFFSET_END_KEY, obj);
hasOffsetEnd = false;
}
}
@ -158,7 +158,7 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
try {
posIncr = Integer.parseInt(String.valueOf(obj));
} catch (NumberFormatException nfe) {
log.warn("Invalid " + POSINCR_KEY + " attribute, skipped: '" + obj + "'");
log.warn("Invalid {} attribute, skipped: '{}'", POSINCR_KEY, obj);
}
}
PositionIncrementAttribute patt = parent.addAttribute(PositionIncrementAttribute.class);
@ -178,13 +178,17 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
FlagsAttribute flags = parent.addAttribute(FlagsAttribute.class);
flags.setFlags(f);
} catch (NumberFormatException nfe) {
log.warn("Invalid " + FLAGS_KEY + " attribute, skipped: '" + e.getValue() + "'");
if (log.isWarnEnabled()) {
log.warn("Invalid {} attribute, skipped: '{}'", FLAGS_KEY, e.getValue());
}
}
} else if (key.equals(TYPE_KEY)) {
TypeAttribute tattr = parent.addAttribute(TypeAttribute.class);
tattr.setType(String.valueOf(e.getValue()));
} else {
log.warn("Unknown attribute, skipped: " + e.getKey() + "=" + e.getValue());
if (log.isWarnEnabled()) {
log.warn("Unknown attribute, skipped: {} = {}", e.getKey(), e.getValue());
}
}
}
// handle offset attr

View File

@ -133,7 +133,9 @@ public final class ManagedIndexSchema extends IndexSchema {
final FileOutputStream out = new FileOutputStream(managedSchemaFile);
writer = new OutputStreamWriter(out, StandardCharsets.UTF_8);
persist(writer);
log.info("Upgraded to managed schema at " + managedSchemaFile.getPath());
if (log.isInfoEnabled()) {
log.info("Upgraded to managed schema at {}", managedSchemaFile.getPath());
}
} catch (IOException e) {
final String msg = "Error persisting managed schema " + managedSchemaFile;
log.error(msg, e);
@ -177,20 +179,20 @@ public final class ManagedIndexSchema extends IndexSchema {
try {
zkClient.create(managedSchemaPath, data, CreateMode.PERSISTENT, true);
schemaZkVersion = 0;
log.info("Created and persisted managed schema znode at " + managedSchemaPath);
log.info("Created and persisted managed schema znode at {}", managedSchemaPath);
} catch (KeeperException.NodeExistsException e) {
// This is okay - do nothing and fall through
log.info("Managed schema znode at " + managedSchemaPath + " already exists - no need to create it");
log.info("Managed schema znode at {} already exists - no need to create it", managedSchemaPath);
}
} else {
try {
// Assumption: the path exists
Stat stat = zkClient.setData(managedSchemaPath, data, schemaZkVersion, true);
schemaZkVersion = stat.getVersion();
log.info("Persisted managed schema version "+schemaZkVersion+" at " + managedSchemaPath);
log.info("Persisted managed schema version {} at {}", schemaZkVersion, managedSchemaPath);
} catch (KeeperException.BadVersionException e) {
log.error("Bad version when trying to persist schema using "+schemaZkVersion+" due to: "+e);
log.error("Bad version when trying to persist schema using {} due to: ", schemaZkVersion, e);
success = false;
schemaChangedInZk = true;
@ -230,8 +232,10 @@ public final class ManagedIndexSchema extends IndexSchema {
return; // nothing to wait for ...
log.info("Waiting up to "+maxWaitSecs+" secs for "+concurrentTasks.size()+
" replicas to apply schema update version "+schemaZkVersion+" for collection "+collection);
if (log.isInfoEnabled()) {
log.info("Waiting up to {} secs for {} replicas to apply schema update version {} for collection {}"
, maxWaitSecs, concurrentTasks.size(), schemaZkVersion, collection);
}
// use an executor service to invoke schema zk version requests in parallel with a max wait time
int poolSize = Math.min(concurrentTasks.size(), 10);
@ -257,7 +261,7 @@ public final class ManagedIndexSchema extends IndexSchema {
if (vers == -1) {
String coreUrl = concurrentTasks.get(f).coreUrl;
log.warn("Core "+coreUrl+" version mismatch! Expected "+schemaZkVersion+" but got "+vers);
log.warn("Core {} version mismatch! Expected {} but got {}", coreUrl, schemaZkVersion, vers);
if (failedList == null) failedList = new ArrayList<>();
failedList.add(coreUrl);
}
@ -270,17 +274,20 @@ public final class ManagedIndexSchema extends IndexSchema {
maxWaitSecs+" seconds! Failed cores: "+failedList);
} catch (InterruptedException ie) {
log.warn("Core "+localCoreNodeName+" was interrupted waiting for schema version "+schemaZkVersion+
" to propagate to "+concurrentTasks.size()+" replicas for collection "+collection);
if (log.isWarnEnabled()) {
log.warn("Core {} was interrupted waiting for schema version {} to propagate to {} replicas for collection {}"
, localCoreNodeName, schemaZkVersion, concurrentTasks.size(), collection);
}
Thread.currentThread().interrupt();
} finally {
if (!parallelExecutor.isShutdown())
parallelExecutor.shutdown();
}
log.info("Took {}ms for {} replicas to apply schema update version {} for collection {}",
timer.getTime(), concurrentTasks.size(), schemaZkVersion, collection);
if (log.isInfoEnabled()) {
log.info("Took {}ms for {} replicas to apply schema update version {} for collection {}",
timer.getTime(), concurrentTasks.size(), schemaZkVersion, collection);
}
}
protected static List<String> getActiveReplicaCoreUrls(ZkController zkController, String collection, String localCoreNodeName) {
@ -344,15 +351,15 @@ public final class ManagedIndexSchema extends IndexSchema {
// rather than waiting and re-polling, let's be proactive and tell the replica
// to refresh its schema from ZooKeeper, if that fails, then the
//Thread.sleep(1000); // slight delay before requesting version again
log.error("Replica "+coreUrl+" returned schema version "+
remoteVersion+" and has not applied schema version "+expectedZkVersion);
log.error("Replica {} returned schema version {} and has not applied schema version {}"
, coreUrl, remoteVersion, expectedZkVersion);
}
} catch (Exception e) {
if (e instanceof InterruptedException) {
break; // stop looping
} else {
log.warn("Failed to get /schema/zkversion from " + coreUrl + " due to: " + e);
log.warn("Failed to get /schema/zkversion from {} due to: ", coreUrl, e);
}
}
}
@ -402,11 +409,15 @@ public final class ManagedIndexSchema extends IndexSchema {
newSchema.fields.put(newField.getName(), newField);
if (null != newField.getDefaultValue()) {
log.debug(newField.getName() + " contains default value: " + newField.getDefaultValue());
if (log.isDebugEnabled()) {
log.debug("{} contains default value: {}", newField.getName(), newField.getDefaultValue());
}
newSchema.fieldsWithDefaultValue.add(newField);
}
if (newField.isRequired()) {
log.debug("{} is required in this schema", newField.getName());
if (log.isDebugEnabled()) {
log.debug("{} is required in this schema", newField.getName());
}
newSchema.requiredFields.add(newField);
}
Collection<String> copyFields = copyFieldNames.get(newField.getName());
@ -502,11 +513,15 @@ public final class ManagedIndexSchema extends IndexSchema {
SchemaField replacementField = SchemaField.create(fieldName, replacementFieldType, replacementArgs);
newSchema.fields.put(fieldName, replacementField);
if (null != replacementField.getDefaultValue()) {
log.debug(replacementField.getName() + " contains default value: " + replacementField.getDefaultValue());
if (log.isDebugEnabled()) {
log.debug("{} contains default value: {}", replacementField.getName(), replacementField.getDefaultValue());
}
newSchema.fieldsWithDefaultValue.add(replacementField);
}
if (replacementField.isRequired()) {
log.debug("{} is required in this schema", replacementField.getName());
if (log.isDebugEnabled()) {
log.debug("{} is required in this schema", replacementField.getName());
}
newSchema.requiredFields.add(replacementField);
}
@ -762,7 +777,9 @@ public final class ManagedIndexSchema extends IndexSchema {
if(persist) {
success = newSchema.persistManagedSchema(false); // don't just create - update it if it already exists
if (success) {
log.debug("Added copy fields for {} sources", copyFields.size());
if (log.isDebugEnabled()) {
log.debug("Added copy fields for {} sources", copyFields.size());
}
} else {
log.error("Failed to add copy fields for {} sources", copyFields.size());
}
@ -969,7 +986,7 @@ public final class ManagedIndexSchema extends IndexSchema {
if (i > 0) fieldTypeNames.append(", ");
fieldTypeNames.append(fieldTypeList.get(i).typeName);
}
log.debug("Added field types: {}", fieldTypeNames.toString());
log.debug("Added field types: {}", fieldTypeNames);
}
} else {
// this is unlikely to happen as most errors are handled as exceptions in the persist code
@ -1070,11 +1087,15 @@ public final class ManagedIndexSchema extends IndexSchema {
SchemaField replacementField = SchemaField.create(fieldName, replacementFieldType, oldField.getArgs());
replacementFields.add(replacementField); // Save the new field to be added after iteration is finished
if (null != replacementField.getDefaultValue()) {
log.debug(replacementField.getName() + " contains default value: " + replacementField.getDefaultValue());
if (log.isDebugEnabled()) {
log.debug("{} contains default value: {}", replacementField.getName(), replacementField.getDefaultValue());
}
newSchema.fieldsWithDefaultValue.add(replacementField);
}
if (replacementField.isRequired()) {
log.debug("{} is required in this schema", replacementField.getName());
if (log.isDebugEnabled()) {
log.debug("{} is required in this schema", replacementField.getName());
}
newSchema.requiredFields.add(replacementField);
}
newSchema.removeCopyFieldSource(fieldName, copyFieldsToRebuild);

View File

@ -139,8 +139,8 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
Thread.currentThread().interrupt();
log.warn("", e);
} catch (KeeperException.NoNodeException e) {
log.info("The schema is configured as managed, but managed schema resource " + managedSchemaResourceName
+ " not found - loading non-managed schema " + resourceName + " instead");
log.info("The schema is configured as managed, but managed schema resource {} not found - loading non-managed schema {} instead"
, managedSchemaResourceName, resourceName);
} catch (KeeperException e) {
String msg = "Error attempting to access " + managedSchemaPath;
log.error(msg, e);
@ -194,8 +194,8 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
loadedResource = managedSchemaResourceName;
warnIfNonManagedSchemaExists();
} catch (IOException e) {
log.info("The schema is configured as managed, but managed schema resource " + managedSchemaResourceName
+ " not found - loading non-managed schema " + resourceName + " instead");
log.info("The schema is configured as managed, but managed schema resource {} not found - loading non-managed schema {} instead"
, managedSchemaResourceName, resourceName);
}
if (null == schemaInputStream) {
// The managed schema file could not be found - load the non-managed schema
@ -230,7 +230,7 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
log.warn("", e); // Log as warning and suppress the exception
} catch (KeeperException e) {
// log as warning and suppress the exception
log.warn("Error checking for the existence of the non-managed schema " + resourceName, e);
log.warn("Error checking for the existence of the non-managed schema {}", resourceName, e);
}
} else { // Config is not in ZooKeeper
InputStream nonManagedSchemaInputStream = null;
@ -246,8 +246,8 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
}
}
if (exists) {
log.warn("The schema has been upgraded to managed, but the non-managed schema " + resourceName
+ " is still loadable. PLEASE REMOVE THIS FILE.");
log.warn("The schema has been upgraded to managed, but the non-managed schema {} is still loadable. PLEASE REMOVE THIS FILE."
, resourceName);
}
}
}
@ -271,28 +271,30 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
// schema file by appending UPGRADED_SCHEMA_EXTENSION to its name.
if (resourceName.equals(managedSchemaResourceName)) {
log.info("On upgrading to managed schema, did not rename non-managed schema '"
+ resourceName + "' because it's the same as the managed schema's name.");
log.info("On upgrading to managed schema, did not rename non-managed schema '{}' because it's the same as the managed schema's name."
, resourceName);
} else {
final File nonManagedSchemaFile = locateConfigFile(resourceName);
if (null == nonManagedSchemaFile) {
// Don't throw an exception for failure to rename the non-managed schema
log.warn("On upgrading to managed schema, did not rename non-managed schema "
+ resourceName + " because it's neither an absolute file "
+ "nor under SolrConfig.getConfigDir() or the current directory."
+ " PLEASE REMOVE THIS FILE.");
if (log.isWarnEnabled()) {
log.warn("On upgrading to managed schema, did not rename non-managed schema {} "
+ "because it's neither an absolute file "
+ "nor under SolrConfig.getConfigDir() or the current directory. "
+ "PLEASE REMOVE THIS FILE.", resourceName);
}
} else {
File upgradedSchemaFile = new File(nonManagedSchemaFile + UPGRADED_SCHEMA_EXTENSION);
if (nonManagedSchemaFile.renameTo(upgradedSchemaFile)) {
// Set the resource name to the managed schema so that the CoreAdminHandler returns a findable filename
schema.setResourceName(managedSchemaResourceName);
log.info("After upgrading to managed schema, renamed the non-managed schema "
+ nonManagedSchemaFile + " to " + upgradedSchemaFile);
log.info("After upgrading to managed schema, renamed the non-managed schema {} to {}"
, nonManagedSchemaFile, upgradedSchemaFile);
} else {
// Don't throw an exception for failure to rename the non-managed schema
log.warn("Can't rename " + nonManagedSchemaFile.toString() + " to "
+ upgradedSchemaFile.toString() + " - PLEASE REMOVE THIS FILE.");
log.warn("Can't rename {} to {} - PLEASE REMOVE THIS FILE."
, nonManagedSchemaFile, upgradedSchemaFile);
}
}
}
@ -322,8 +324,8 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
*/
private void zkUgradeToManagedSchema() {
if (resourceName.equals(managedSchemaResourceName)) {
log.info("On upgrading to managed schema, did not rename non-managed schema "
+ resourceName + " because it's the same as the managed schema's name.");
log.info("On upgrading to managed schema, did not rename non-managed schema {} because it's the same as the managed schema's name."
, resourceName);
return;
}
final ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader)loader;
@ -366,11 +368,11 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
// Set the resource name to the managed schema so that the CoreAdminHandler returns a findable filename
schema.setResourceName(managedSchemaResourceName);
log.info("After upgrading to managed schema in ZooKeeper, renamed the non-managed schema "
+ nonManagedSchemaPath + " to " + upgradedSchemaPath);
log.info("After upgrading to managed schema in ZooKeeper, renamed the non-managed schema {} to {}"
, nonManagedSchemaPath, upgradedSchemaPath);
} else {
log.info("After upgrading to managed schema in ZooKeeper, the non-managed schema "
+ nonManagedSchemaPath + " no longer exists.");
log.info("After upgrading to managed schema in ZooKeeper, the non-managed schema {} no longer exists."
, nonManagedSchemaPath);
}
} catch (Exception e) {
if (e instanceof InterruptedException) {
@ -387,7 +389,7 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
} catch (KeeperException.NoNodeException nne) {
// ignore - someone else deleted it
} catch (Exception e) {
log.warn("Unable to delete schema upgrade lock file " + lockPath, e);
log.warn("Unable to delete schema upgrade lock file {}", lockPath, e);
}
}
}

View File

@ -140,7 +140,7 @@ public class OpenExchangeRatesOrgProvider implements ExchangeRateProvider {
public boolean reload() throws SolrException {
InputStream ratesJsonStream = null;
try {
log.debug("Reloading exchange rates from "+ratesFileLocation);
log.debug("Reloading exchange rates from {}", ratesFileLocation);
try {
ratesJsonStream = (new URL(ratesFileLocation)).openStream();
} catch (Exception e) {
@ -175,7 +175,7 @@ public class OpenExchangeRatesOrgProvider implements ExchangeRateProvider {
refreshInterval = 60;
log.warn("Specified refreshInterval was too small. Setting to 60 minutes which is the update rate of openexchangerates.org");
}
log.debug("Initialized with rates="+ratesFileLocation+", refreshInterval="+refreshInterval+".");
log.debug("Initialized with rates={}, refreshInterval={}.", ratesFileLocation, refreshInterval);
refreshIntervalSeconds = refreshInterval * 60;
} catch (SolrException e1) {
throw e1;
@ -245,11 +245,13 @@ public class OpenExchangeRatesOrgProvider implements ExchangeRateProvider {
ev = parser.nextEvent();
}
} else {
log.warn("Unknown key "+key);
log.warn("Unknown key {}", key);
}
break;
} else {
log.warn("Expected key, got "+JSONParser.getEventString(ev));
if (log.isWarnEnabled()) {
log.warn("Expected key, got {}", JSONParser.getEventString(ev));
}
break;
}
@ -259,7 +261,9 @@ public class OpenExchangeRatesOrgProvider implements ExchangeRateProvider {
break;
default:
log.info("Noggit UNKNOWN_EVENT_ID:"+JSONParser.getEventString(ev));
if (log.isInfoEnabled()) {
log.info("Noggit UNKNOWN_EVENT_ID: {}", JSONParser.getEventString(ev));
}
break;
}
} while( ev != JSONParser.EOF);

View File

@ -83,8 +83,8 @@ public class PreAnalyzedField extends TextField implements HasImplicitIndexAnaly
Constructor<?> c = implClazz.getConstructor(new Class<?>[0]);
parser = (PreAnalyzedParser) c.newInstance(new Object[0]);
} catch (Exception e) {
log.warn("Can't use the configured PreAnalyzedParser class '" + implName +
"', using default " + DEFAULT_IMPL, e);
log.warn("Can't use the configured PreAnalyzedParser class '{}', using defualt {}"
, implName, DEFAULT_IMPL, e);
parser = new JsonPreAnalyzedParser();
}
}
@ -124,7 +124,9 @@ public class PreAnalyzedField extends TextField implements HasImplicitIndexAnaly
try {
f = fromString(field, String.valueOf(value));
} catch (Exception e) {
log.warn("Error parsing pre-analyzed field '" + field.getName() + "'", e);
if (log.isWarnEnabled()) {
log.warn("Error parsing pre-analyzed field '{}'", field.getName(), e);
}
return null;
}
return f;

View File

@ -155,7 +155,7 @@ public class SchemaManager {
waitForOtherReplicasToUpdate(timeOut, latestVersion);
}
if (errors.isEmpty() && timeOut.hasTimedOut()) {
log.warn(errorMsg + "Timed out.");
log.warn("{} Timed out", errorMsg);
errors = singletonList(errorMsg + "Timed out.");
}
return errors;
@ -428,7 +428,7 @@ public class SchemaManager {
if (!zkClient.exists(zkLoader.getConfigSetZkPath() + "/" + name, true)) {
String backupName = name + ManagedIndexSchemaFactory.UPGRADED_SCHEMA_EXTENSION;
if (!zkClient.exists(zkLoader.getConfigSetZkPath() + "/" + backupName, true)) {
log.warn("Unable to retrieve fresh managed schema, neither " + name + " nor " + backupName + " exist.");
log.warn("Unable to retrieve fresh managed schema, neither {} nor {} exist.", name, backupName);
// use current schema
return (ManagedIndexSchema) core.getLatestSchema();
} else {
@ -436,7 +436,7 @@ public class SchemaManager {
}
}
} catch (Exception e) {
log.warn("Unable to retrieve fresh managed schema " + name, e);
log.warn("Unable to retrieve fresh managed schema {}", name, e);
// use current schema
return (ManagedIndexSchema) core.getLatestSchema();
}

View File

@ -57,7 +57,9 @@ public class ZkIndexSchemaReader implements OnReconnect {
public void preClose(SolrCore core) {
CoreContainer cc = core.getCoreContainer();
if (cc.isZooKeeperAware()) {
log.debug("Removing ZkIndexSchemaReader OnReconnect listener as core "+core.getName()+" is shutting down.");
if (log.isDebugEnabled()) {
log.debug("Removing ZkIndexSchemaReader OnReconnect listener as core {} is shutting down.", core.getName());
}
cc.getZkController().removeOnReconnectListener(ZkIndexSchemaReader.this);
}
}
@ -86,7 +88,7 @@ public class ZkIndexSchemaReader implements OnReconnect {
* @return the registered {@linkplain SchemaWatcher}.
*/
public SchemaWatcher createSchemaWatcher() {
log.info("Creating ZooKeeper watch for the managed schema at " + managedSchemaPath);
log.info("Creating ZooKeeper watch for the managed schema at {}", managedSchemaPath);
SchemaWatcher watcher = new SchemaWatcher(this);
try {
@ -165,7 +167,9 @@ public class ZkIndexSchemaReader implements OnReconnect {
if (expectedZkVersion == -1 || oldSchema.schemaZkVersion < expectedZkVersion) {
byte[] data = zkClient.getData(managedSchemaPath, watcher, stat, true);
if (stat.getVersion() != oldSchema.schemaZkVersion) {
log.info("Retrieved schema version "+ stat.getVersion() + " from ZooKeeper");
if (log.isInfoEnabled()) {
log.info("Retrieved schema version {} from Zookeeper", stat.getVersion());
}
long start = System.nanoTime();
InputSource inputSource = new InputSource(new ByteArrayInputStream(data));
String resourceName = managedIndexSchemaFactory.getManagedSchemaResourceName();
@ -174,9 +178,9 @@ public class ZkIndexSchemaReader implements OnReconnect {
resourceName, stat.getVersion(), oldSchema.getSchemaUpdateLock());
managedIndexSchemaFactory.setSchema(newSchema);
long stop = System.nanoTime();
log.info("Finished refreshing schema in " + TimeUnit.MILLISECONDS.convert(stop - start, TimeUnit.NANOSECONDS) + " ms");
log.info("Finished refreshing schema in {} ms", TimeUnit.MILLISECONDS.convert(stop - start, TimeUnit.NANOSECONDS));
} else {
log.info("Current schema version "+oldSchema.schemaZkVersion+" is already the latest");
log.info("Current schema version {} is already the latest", oldSchema.schemaZkVersion);
}
}
}
@ -194,7 +198,7 @@ public class ZkIndexSchemaReader implements OnReconnect {
// force update now as the schema may have changed while our zk session was expired
updateSchema(null, -1);
} catch (Exception exc) {
log.error("Failed to update managed-schema watcher after session expiration due to: "+exc, exc);
log.error("Failed to update managed-schema watcher after session expiration due to: {}", exc);
}
}

View File

@ -168,9 +168,7 @@ public class Suggester extends SolrSpellChecker {
log.error("Store Lookup build from index on field: {} failed reader has: {} docs", field, reader.maxDoc());
}
} else {
if (log.isErrorEnabled()) {
log.error("Store Lookup build from sourceloaction: {} failed", sourceLocation);
}
log.error("Store Lookup build from sourceloaction: {} failed", sourceLocation);
}
} else {
if (log.isInfoEnabled()) {

View File

@ -681,8 +681,8 @@ public class SolrCLI implements CLIO {
if (--attempts > 0 && checkCommunicationError(exc)) {
if (!isFirstAttempt) // only show the log warning after the second attempt fails
if (log.isWarnEnabled()) {
log.warn("Request to " + getUrl + " failed due to: {}, sleeping for 5 seconds before re-trying the request ..."
, exc.getMessage());
log.warn("Request to {} failed due to: {}, sleeping for 5 seconds before re-trying the request ..."
, getUrl, exc.getMessage());
}
try {
Thread.sleep(5000);