mirror of https://github.com/apache/lucene.git
LUCENE-7788: fail precommit on unparameterised log messages and examine for wasted work/objects
This commit is contained in:
parent
3a743ea953
commit
e43b17962a
|
@ -66,11 +66,11 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
// , "solr/core/src/java/org/apache/solr/cloud/rule"
|
||||
, "solr/core/src/java/org/apache/solr/core"
|
||||
, "solr/core/src/java/org/apache/solr/filestore"
|
||||
// , "solr/core/src/java/org/apache/solr/handler/admin"
|
||||
// , "solr/core/src/java/org/apache/solr/handler/component"
|
||||
// , "solr/core/src/java/org/apache/solr/handler/export"
|
||||
// , "solr/core/src/java/org/apache/solr/handler/loader"
|
||||
// , "solr/core/src/java/org/apache/solr/handler/tagger"
|
||||
, "solr/core/src/java/org/apache/solr/handler/admin"
|
||||
, "solr/core/src/java/org/apache/solr/handler/component"
|
||||
, "solr/core/src/java/org/apache/solr/handler/export"
|
||||
, "solr/core/src/java/org/apache/solr/handler/loader"
|
||||
, "solr/core/src/java/org/apache/solr/handler/tagger"
|
||||
, "solr/core/src/java/org/apache/solr/highlight"
|
||||
, "solr/core/src/java/org/apache/solr/index"
|
||||
, "solr/core/src/java/org/apache/solr/internal"
|
||||
|
@ -107,11 +107,15 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
|
||||
// We have a log.someting line, check for patterns we're not fond of.
|
||||
def checkLogLine(File file, String line, int lineNumber, String prevLine) {
|
||||
// If the line has been explicitly checked, skip it.
|
||||
if (line.replaceAll("\\s", "").toLowerCase().contains("//logok")) {
|
||||
return
|
||||
}
|
||||
// Strip all of the comments, things in quotes and the like.
|
||||
def level = ""
|
||||
def lev = (line =~ "log\\.(.*?)\\(")
|
||||
if (lev.find()) {
|
||||
level = lev.group(1)
|
||||
level = lev.group(1).toLowerCase().trim()
|
||||
}
|
||||
def stripped =
|
||||
line.replaceFirst("//.*", " ") // remove comment to EOL. Again, fragile due to the possibility of embedded double slashes
|
||||
|
@ -126,35 +130,50 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
def m = stripped =~ "\\(.*?\\)"
|
||||
def hasParens = m.find()
|
||||
def hasPlus = stripped.contains("+")
|
||||
if (hasParens == false && hasPlus == false) {
|
||||
return
|
||||
|
||||
// Check that previous line isn't an if statement for always-reported log levels. Arbitrary decision: we don't
|
||||
// really care about checking for awkward constructions for WARN and above, so report a violation if the previous
|
||||
// line contains an if for those levels.
|
||||
boolean violation = false
|
||||
boolean dontCare = level.equals("fatal") || level.equals("error") || level.equals("warn")
|
||||
if (dontCare && prevLine.contains("is" + level + "enabled")) {
|
||||
violation = true
|
||||
}
|
||||
// Check that previous line isn't an if statement for this log level
|
||||
if (prevLine.toLowerCase().contains("is" + level + "enabled")) {
|
||||
return
|
||||
// Always report toString()
|
||||
if (line.contains("toString(") == true && line.contains("Arrays.toString(") == false) {
|
||||
violation = true
|
||||
}
|
||||
// There's a convention to declare a member variable for whether a level is enabled and check that rather than
|
||||
// isDebugEnabled. So check plusses there too.
|
||||
if (prevLine.toLowerCase().replaceAll("\\s+", "").contains("if(" + level + ")")
|
||||
&& (checkPlus == false || line.contains("+") == false)) {
|
||||
return
|
||||
}
|
||||
// isDebugEnabled. So check ust plusses there too.
|
||||
if (violation == false) {
|
||||
if (hasParens == false && hasPlus == false) {
|
||||
return
|
||||
}
|
||||
if (prevLine.replaceAll("\\s+", "").contains("if(" + level + ")")
|
||||
&& (checkPlus == false || line.contains("+") == false)) {
|
||||
return
|
||||
}
|
||||
if (prevLine.replaceAll("\\s+", "").contains("if(log.is" + level + "enabled")
|
||||
&& (checkPlus == false || line.contains("+") == false)) {
|
||||
return
|
||||
}
|
||||
|
||||
if (level.equals("error") || level.equals("fatal")) {
|
||||
if (checkPlus == false || hasPlus == false) {
|
||||
|
||||
if (dontCare && (checkPlus == false || hasPlus == false)) {
|
||||
return
|
||||
}
|
||||
}
|
||||
reportViolation(String.format("Suspicious logging call File: '%s' line: '%d' log message: '%s' parent path: '%s'. Parameterize or surround with 'if (log.is*Enabled) {... stripped: '%s', level: '%s' checkPlus '%s' hasParens '%s' errorfatal '%s' hasPlus '%s'"
|
||||
|
||||
reportViolation(String.format("Suspicious logging call File: '%s' line: '%d' log message: '%s' parent path: '%s'. Parameterize or surround with 'if (log.is*Enabled) {... stripped: '%s', level: '%s' checkPlus '%s' hasParens '%s' errorDontCare '%s' hasPlus '%s'"
|
||||
, file.name
|
||||
, lineNumber, line
|
||||
, file.getParentFile().getAbsolutePath()
|
||||
, stripped //nocommit, debugging.
|
||||
, level
|
||||
, checkPlus
|
||||
, hasParens
|
||||
,level.equals("error") || level.equals("fatal"),
|
||||
hasPlus))
|
||||
, hasParens
|
||||
, dontCare
|
||||
, hasPlus))
|
||||
}
|
||||
|
||||
|
||||
|
@ -218,7 +237,7 @@ class ValidateLogCallsTask extends DefaultTask {
|
|||
}
|
||||
switch (state) { // It's just easier to do this here rather than read another line in the switch above.
|
||||
case 0:
|
||||
prevLine = line;
|
||||
prevLine = line.toLowerCase();
|
||||
break;
|
||||
case 1:
|
||||
break;
|
||||
|
|
|
@ -33,15 +33,22 @@ NOTES:
|
|||
log.info("stuff {}", exception) will not print the full stack,
|
||||
log.info("stuff ", exception) _will_ print the full stack.
|
||||
log.inf0("stuff {} ", some_object, exception) will print the full stack.
|
||||
If you're puzzled as to why so many logging calls don't have a matching
|
||||
number of curly-braces and parameters, this is usually why if they involve
|
||||
exceptions.
|
||||
|
||||
- When slf4j supports lambdas in logging calls (log4j2 does now),
|
||||
we can use lambdas rather than "if log.is*Enabled". slf4j 2.0
|
||||
will when released.
|
||||
|
||||
- error and fatal level messages are NOT flagged. However, if you want to
|
||||
- warn, error, and fatal level messages are NOT flagged. However, if you want to
|
||||
check these levels for including '+', specify '-PcheckPlus=true'. This is more
|
||||
a style than functional check.
|
||||
|
||||
- You can get into some pretty convolued consructs trying to pass some of these
|
||||
checks. Adding //logok, with or without spaces will cause the line to pass
|
||||
no matter what. Please use this hack sparingly.
|
||||
|
||||
For a fuller discussion, see LUCENE-7788 and the other JIRAs linked
|
||||
from there.
|
||||
|
||||
|
|
|
@ -894,13 +894,11 @@ public class CoreContainer {
|
|||
|
||||
private void warnUsersOfInsecureSettings() {
|
||||
if (authenticationPlugin == null || authorizationPlugin == null) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Not all security plugins configured! authentication={} authorization={}. Solr is only as secure as " +
|
||||
"you make it. Consider configuring authentication/authorization before exposing Solr to users internal or " +
|
||||
"external. See https://s.apache.org/solrsecurity for more info",
|
||||
log.warn("Not all security plugins configured! authentication={} authorization={}. Solr is only as secure as {}{}"
|
||||
, "you make it. Consider configuring authentication/authorization before exposing Solr to users internal or "
|
||||
, "external. See https://s.apache.org/solrsecurity for more info",
|
||||
(authenticationPlugin != null) ? "enabled" : "disabled",
|
||||
(authorizationPlugin != null) ? "enabled" : "disabled");
|
||||
}
|
||||
}
|
||||
|
||||
if (authenticationPlugin !=null && StringUtils.isNotEmpty(System.getProperty("solr.jetty.https.port"))) {
|
||||
|
@ -1378,10 +1376,8 @@ public class CoreContainer {
|
|||
case none:
|
||||
throw original;
|
||||
default:
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
|
||||
action, Arrays.asList(CoreInitFailedAction.values()));
|
||||
}
|
||||
log.warn("Failed to create core, and did not recognize specified 'CoreInitFailedAction': [{}]. Valid options are {}.",
|
||||
action, Arrays.asList(CoreInitFailedAction.values()));
|
||||
throw original;
|
||||
}
|
||||
}
|
||||
|
@ -1980,11 +1976,9 @@ public class CoreContainer {
|
|||
DocCollection coll = getZkController().getZkStateReader().getClusterState().getCollection(cd.getCollectionName());
|
||||
for (Replica rep : coll.getReplicas()) {
|
||||
if (coreName.equals(rep.getCoreName())) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Core properties file for node {} found with no coreNodeName, attempting to repair with value {}. See SOLR-11503. " +
|
||||
"This message should only appear if upgrading from collections created Solr 6.6.1 through 7.1.",
|
||||
rep.getCoreName(), rep.getName());
|
||||
}
|
||||
log.warn("Core properties file for node {} found with no coreNodeName, attempting to repair with value {}. See SOLR-11503. {}"
|
||||
, "This message should only appear if upgrading from collections created Solr 6.6.1 through 7.1."
|
||||
, rep.getCoreName(), rep.getName());
|
||||
cd.getCloudDescriptor().setCoreNodeName(rep.getName());
|
||||
coresLocator.persist(this, cd);
|
||||
return true;
|
||||
|
|
|
@ -109,9 +109,7 @@ public class CorePropertiesLocator implements CoresLocator {
|
|||
try {
|
||||
Files.deleteIfExists(propfile);
|
||||
} catch (IOException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Couldn't delete core properties file {}: {}", propfile, e.getMessage());
|
||||
}
|
||||
log.warn("Couldn't delete core properties file {}: {}", propfile, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -198,7 +196,7 @@ public class CorePropertiesLocator implements CoresLocator {
|
|||
return ret;
|
||||
}
|
||||
catch (IOException e) {
|
||||
log.error("Couldn't load core descriptor from {}:{}", propertiesFile, e.toString());
|
||||
log.error("Couldn't load core descriptor from {}:", propertiesFile, e);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ public class Diagnostics {
|
|||
sb.append(info);
|
||||
// sb.append("\n");
|
||||
}
|
||||
log.error(sb.toString());
|
||||
log.error("{}", sb);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -383,7 +383,7 @@ public abstract class DirectoryFactory implements NamedListInitializedPlugin,
|
|||
log.warn("Delete old index directory {} failed.", dirToRmPath);
|
||||
}
|
||||
} catch (IOException ioExc) {
|
||||
log.error("Failed to delete old directory {} due to: {}", dir.getAbsolutePath(), ioExc.toString());
|
||||
log.error("Failed to delete old directory {} due to: ", dir.getAbsolutePath(), ioExc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -259,9 +259,7 @@ public class PluginBag<T> implements AutoCloseable {
|
|||
void setDefault(String def) {
|
||||
if (!registry.containsKey(def)) return;
|
||||
if (this.def != null) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Multiple defaults for : {}", meta.getCleanTag());
|
||||
}
|
||||
log.warn("Multiple defaults for : {}", meta.getCleanTag());
|
||||
}
|
||||
this.def = def;
|
||||
}
|
||||
|
@ -300,9 +298,7 @@ public class PluginBag<T> implements AutoCloseable {
|
|||
if (meta.clazz.equals(SolrRequestHandler.class)) name = RequestHandlers.normalize(info.name);
|
||||
PluginHolder<T> old = put(name, o);
|
||||
if (old != null) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Multiple entries of {} with name {}", meta.getCleanTag(), name);
|
||||
}
|
||||
log.warn("Multiple entries of {} with name {}", meta.getCleanTag(), name);
|
||||
}
|
||||
}
|
||||
if (infos.size() > 0) { // Aggregate logging
|
||||
|
@ -457,7 +453,7 @@ public class PluginBag<T> implements AutoCloseable {
|
|||
private synchronized boolean createInst() {
|
||||
if (lazyInst != null) return false;
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Going to create a new {} with {} ", pluginMeta.getCleanTag(), pluginInfo.toString());
|
||||
log.info("Going to create a new {} with {} ", pluginMeta.getCleanTag(), pluginInfo);
|
||||
}
|
||||
if (resourceLoader instanceof MemClassLoader) {
|
||||
MemClassLoader loader = (MemClassLoader) resourceLoader;
|
||||
|
|
|
@ -203,11 +203,9 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable {
|
|||
|
||||
booleanQueryMaxClauseCount = getInt("query/maxBooleanClauses", IndexSearcher.getMaxClauseCount());
|
||||
if (IndexSearcher.getMaxClauseCount() < booleanQueryMaxClauseCount) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("solrconfig.xml: <maxBooleanClauses> of {} is greater than global limit of {} " +
|
||||
"and will have no effect set 'maxBooleanClauses' in solr.xml to increase global limit"
|
||||
, booleanQueryMaxClauseCount, IndexSearcher.getMaxClauseCount());
|
||||
}
|
||||
log.warn("solrconfig.xml: <maxBooleanClauses> of {} is greater than global limit of {} {}"
|
||||
, booleanQueryMaxClauseCount, IndexSearcher.getMaxClauseCount()
|
||||
, "and will have no effect set 'maxBooleanClauses' in solr.xml to increase global limit");
|
||||
}
|
||||
|
||||
// Warn about deprecated / discontinued parameters
|
||||
|
@ -317,13 +315,10 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable {
|
|||
}
|
||||
|
||||
if (version == Version.LATEST && !versionWarningAlreadyLogged.getAndSet(true)) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn(
|
||||
"You should not use LATEST as luceneMatchVersion property: " +
|
||||
"if you use this setting, and then Solr upgrades to a newer release of Lucene, " +
|
||||
"sizable changes may happen. If precise back compatibility is important " +
|
||||
"then you should instead explicitly specify an actual Lucene version.");
|
||||
}
|
||||
log.warn("You should not use LATEST as luceneMatchVersion property: {}{}{}"
|
||||
, "if you use this setting, and then Solr upgrades to a newer release of Lucene, "
|
||||
, "sizable changes may happen. If precise back compatibility is important "
|
||||
, "then you should instead explicitly specify an actual Lucene version.");
|
||||
}
|
||||
|
||||
return version;
|
||||
|
@ -760,9 +755,7 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable {
|
|||
try {
|
||||
urls.addAll(SolrResourceLoader.getURLs(libPath));
|
||||
} catch (IOException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
|
||||
}
|
||||
log.warn("Couldn't add files from {} to classpath: {}", libPath, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -788,18 +781,14 @@ public class SolrConfig extends XmlConfigFile implements MapSerializable {
|
|||
else
|
||||
urls.addAll(SolrResourceLoader.getFilteredURLs(dir, regex));
|
||||
} catch (IOException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Couldn't add files from {} filtered by {} to classpath: {}", dir, regex, e.getMessage());
|
||||
}
|
||||
log.warn("Couldn't add files from {} filtered by {} to classpath: {}", dir, regex, e.getMessage());
|
||||
}
|
||||
} else if (null != path) {
|
||||
final Path dir = instancePath.resolve(path);
|
||||
try {
|
||||
urls.add(dir.toUri().toURL());
|
||||
} catch (MalformedURLException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Couldn't add file {} to classpath: {}", dir, e.getMessage());
|
||||
}
|
||||
log.warn("Couldn't add file {} to classpath: {}", dir, e.getMessage());
|
||||
}
|
||||
} else {
|
||||
throw new RuntimeException("lib: missing mandatory attributes: 'dir' or 'path'");
|
||||
|
|
|
@ -541,10 +541,8 @@ class SolrCores {
|
|||
public TransientSolrCoreCache getTransientCacheHandler() {
|
||||
|
||||
if (transientCoreCache == null) {
|
||||
if (log.isErrorEnabled()) {
|
||||
log.error("No transient handler has been defined. Check solr.xml to see if an attempt to provide a custom " +
|
||||
"TransientSolrCoreCacheFactory was done incorrectly since the default should have been used otherwise.");
|
||||
}
|
||||
log.error("No transient handler has been defined. Check solr.xml to see if an attempt to provide a custom {}"
|
||||
, "TransientSolrCoreCacheFactory was done incorrectly since the default should have been used otherwise.");
|
||||
return null;
|
||||
}
|
||||
return transientCoreCache.getTransientSolrCoreCache();
|
||||
|
|
|
@ -74,9 +74,7 @@ public final class SolrPaths {
|
|||
} catch (NamingException e) {
|
||||
log.debug("No /solr/home in JNDI");
|
||||
} catch (RuntimeException ex) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Odd RuntimeException while testing for JNDI: {}", ex.getMessage());
|
||||
}
|
||||
log.warn("Odd RuntimeException while testing for JNDI: {}", ex.getMessage());
|
||||
}
|
||||
|
||||
// Now try system property
|
||||
|
|
|
@ -223,7 +223,7 @@ public class SolrResourceLoader implements ResourceLoader, Closeable {
|
|||
allURLs.addAll(urls);
|
||||
for (URL url : urls) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Adding '{}' to classloader", url.toString());
|
||||
log.debug("Adding '{}' to classloader", url);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -472,9 +472,7 @@ public class SolrResourceLoader implements ResourceLoader, Closeable {
|
|||
} else if (TokenFilterFactory.class.isAssignableFrom(expectedType)) {
|
||||
return clazz = TokenFilterFactory.lookupClass(name).asSubclass(expectedType);
|
||||
} else {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("'{}' looks like an analysis factory, but caller requested different class type: {}", cname, expectedType.getName());
|
||||
}
|
||||
log.warn("'{}' looks like an analysis factory, but caller requested different class type: {}", cname, expectedType.getName());
|
||||
}
|
||||
} catch (IllegalArgumentException ex) {
|
||||
// ok, we fall back to legacy loading
|
||||
|
@ -656,9 +654,7 @@ public class SolrResourceLoader implements ResourceLoader, Closeable {
|
|||
try {
|
||||
infoRegistry.put(bean.getName(), bean);
|
||||
} catch (Exception e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("could not register MBean '{}'.", bean.getName(), e);
|
||||
}
|
||||
log.warn("could not register MBean '{}'.", bean.getName(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -206,9 +206,7 @@ public class SolrXmlConfig {
|
|||
return properties;
|
||||
}
|
||||
catch (XPathExpressionException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Error parsing solr.xml: {}", e.getMessage());
|
||||
}
|
||||
log.warn("Error parsing solr.xml: {}", e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ public class ZkContainer {
|
|||
if (testing_beforeRegisterInZk != null) {
|
||||
boolean didTrigger = testing_beforeRegisterInZk.test(cd);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug((didTrigger ? "Ran" : "Skipped") + " pre-zk hook");
|
||||
log.debug("{} pre-zk hook", (didTrigger ? "Ran" : "Skipped"));
|
||||
}
|
||||
}
|
||||
if (!core.getCoreContainer().isShutDown()) {
|
||||
|
|
|
@ -311,9 +311,7 @@ public class SolrSnapshotMetaDataManager {
|
|||
solrCore.getDirectoryFactory().release(d);
|
||||
}
|
||||
} else {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Commit with name {} is not persisted for core {}", commitName, solrCore.getName());
|
||||
}
|
||||
log.warn("Commit with name {} is not persisted for core {}", commitName, solrCore.getName());
|
||||
}
|
||||
|
||||
return result;
|
||||
|
|
|
@ -90,7 +90,9 @@ public class AdminHandlersProxy {
|
|||
}
|
||||
log.debug("Nodes requested: {}", nodes);
|
||||
}
|
||||
log.debug(PARAM_NODES + " parameter {} specified on {} request", nodeNames, pathStr);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{} parameter {} specified on {} request", PARAM_NODES, nodeNames, pathStr);
|
||||
}
|
||||
|
||||
Map<String, Pair<Future<NamedList<Object>>, SolrClient>> responses = new HashMap<>();
|
||||
for (String node : nodes) {
|
||||
|
@ -108,7 +110,9 @@ public class AdminHandlersProxy {
|
|||
log.warn("Timeout when fetching result from node {}", entry.getKey(), te);
|
||||
}
|
||||
}
|
||||
log.info("Fetched response from {} nodes: {}", responses.keySet().size(), responses.keySet());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Fetched response from {} nodes: {}", responses.keySet().size(), responses.keySet());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -210,7 +210,7 @@ public class ColStatus {
|
|||
rsp.remove("fieldInfoLegend");
|
||||
}
|
||||
} catch (SolrServerException | IOException e) {
|
||||
log.warn("Error getting details of replica segments from " + url, e);
|
||||
log.warn("Error getting details of replica segments from {}", url, e);
|
||||
}
|
||||
}
|
||||
if (nonCompliant.isEmpty()) {
|
||||
|
|
|
@ -80,7 +80,7 @@ public class CollectionHandlerApi extends BaseHandlerApiSupport {
|
|||
|
||||
for (Meta meta : Meta.values()) {
|
||||
if (result.get(meta) == null) {
|
||||
log.error("ERROR_INIT. No corresponding API implementation for : " + meta.commandName);
|
||||
log.error("ERROR_INIT. No corresponding API implementation for : {}", meta.commandName);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -249,7 +249,10 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
|
||||
}
|
||||
CollectionOperation operation = CollectionOperation.get(action);
|
||||
log.info("Invoked Collection Action :{} with params {} and sendToOCPQueue={}", action.toLower(), req.getParamString(), operation.sendToOCPQueue);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Invoked Collection Action :{} with params {} and sendToOCPQueue={}"
|
||||
, action.toLower(), req.getParamString(), operation.sendToOCPQueue);
|
||||
}
|
||||
MDCLoggingContext.setCollection(req.getParams().get(COLLECTION));
|
||||
invokeAction(req, rsp, cores, action, operation);
|
||||
} else {
|
||||
|
@ -1358,7 +1361,7 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
success = true;
|
||||
break;
|
||||
}
|
||||
log.warn("Force leader attempt {}. Waiting 5 secs for an active leader. State of the slice: {}", (i + 1), slice);
|
||||
log.warn("Force leader attempt {}. Waiting 5 secs for an active leader. State of the slice: {}", (i + 1), slice); //logok
|
||||
}
|
||||
|
||||
if (success) {
|
||||
|
@ -1379,7 +1382,9 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
|
||||
if (createCollResponse.getResponse().get("exception") != null) {
|
||||
// the main called failed, don't wait
|
||||
log.info("Not waiting for active collection due to exception: " + createCollResponse.getResponse().get("exception"));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Not waiting for active collection due to exception: {}", createCollResponse.getResponse().get("exception"));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1393,8 +1398,10 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
CloudConfig ccfg = cc.getConfig().getCloudConfig();
|
||||
Integer seconds = ccfg.getCreateCollectionWaitTimeTillActive();
|
||||
Boolean checkLeaderOnly = ccfg.isCreateCollectionCheckLeaderActive();
|
||||
log.info("Wait for new collection to be active for at most " + seconds + " seconds. Check all shard "
|
||||
+ (checkLeaderOnly ? "leaders" : "replicas"));
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Wait for new collection to be active for at most {} seconds. Check all shard {}"
|
||||
, seconds, (checkLeaderOnly ? "leaders" : "replicas"));
|
||||
}
|
||||
|
||||
try {
|
||||
cc.getZkController().getZkStateReader().waitForState(collectionName, seconds, TimeUnit.SECONDS, (n, c) -> {
|
||||
|
@ -1416,8 +1423,10 @@ public class CollectionsHandler extends RequestHandlerBase implements Permission
|
|||
}
|
||||
for (Replica replica : replicas) {
|
||||
String state = replica.getStr(ZkStateReader.STATE_PROP);
|
||||
log.debug("Checking replica status, collection={} replica={} state={}", collectionName,
|
||||
replica.getCoreUrl(), state);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Checking replica status, collection={} replica={} state={}", collectionName,
|
||||
replica.getCoreUrl(), state);
|
||||
}
|
||||
if (!n.contains(replica.getNodeName())
|
||||
|| !state.equals(Replica.State.ACTIVE.toString())) {
|
||||
replicaNotAliveCnt++;
|
||||
|
|
|
@ -122,7 +122,9 @@ public class ConfigSetsHandler extends RequestHandlerBase implements PermissionN
|
|||
|
||||
void invokeAction(SolrQueryRequest req, SolrQueryResponse rsp, ConfigSetAction action) throws Exception {
|
||||
ConfigSetOperation operation = ConfigSetOperation.get(action);
|
||||
log.info("Invoked ConfigSet Action :{} with params {} ", action.toLower(), req.getParamString());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Invoked ConfigSet Action :{} with params {} ", action.toLower(), req.getParamString());
|
||||
}
|
||||
Map<String, Object> result = operation.call(req, rsp, this);
|
||||
sendToZk(rsp, operation, result);
|
||||
}
|
||||
|
@ -188,8 +190,10 @@ public class ConfigSetsHandler extends RequestHandlerBase implements PermissionN
|
|||
|
||||
boolean getTrusted(SolrQueryRequest req) {
|
||||
AuthenticationPlugin authcPlugin = coreContainer.getAuthenticationPlugin();
|
||||
log.info("Trying to upload a configset. authcPlugin: {}, user principal: {}",
|
||||
authcPlugin, req.getUserPrincipal());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Trying to upload a configset. authcPlugin: {}, user principal: {}",
|
||||
authcPlugin, req.getUserPrincipal());
|
||||
}
|
||||
if (authcPlugin != null && req.getUserPrincipal() != null) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -101,7 +101,9 @@ public class HealthCheckHandler extends RequestHandlerBase {
|
|||
rsp.setException(new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Health check is only available when running in SolrCloud mode"));
|
||||
return;
|
||||
}
|
||||
log.debug("Invoked HealthCheckHandler on [{}]", coreContainer.getZkController().getNodeName());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Invoked HealthCheckHandler on [{}]", coreContainer.getZkController().getNodeName());
|
||||
}
|
||||
ZkStateReader zkStateReader = cores.getZkController().getZkStateReader();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
// Check for isConnected and isClosed
|
||||
|
|
|
@ -184,7 +184,9 @@ public class IndexSizeEstimator {
|
|||
}
|
||||
if (reader.maxDoc() > samplingThreshold) {
|
||||
samplingStep = Math.round(100.0f / samplingPercent);
|
||||
log.info("- number of documents {} larger than {}, sampling percent is {} and sampling step {}", reader.maxDoc(), samplingThreshold, samplingPercent, samplingStep);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("- number of documents {} larger than {}, sampling percent is {} and sampling step {}", reader.maxDoc(), samplingThreshold, samplingPercent, samplingStep);
|
||||
}
|
||||
if (reader.maxDoc() / samplingStep < 10) {
|
||||
throw new IllegalArgumentException("Out of " + reader.maxDoc() + " less than 10 documents would be sampled, which is too unreliable. Increase the samplingPercent.");
|
||||
}
|
||||
|
|
|
@ -82,9 +82,10 @@ public class LoggingHandler extends RequestHandlerBase implements SolrCoreAware
|
|||
if(params.get("test")!=null) {
|
||||
log.trace("trace message");
|
||||
log.debug( "debug message");
|
||||
log.info("info (with exception)", new RuntimeException("test") );
|
||||
log.warn("warn (with exception)", new RuntimeException("test") );
|
||||
log.error("error (with exception)", new RuntimeException("test") );
|
||||
RuntimeException exc = new RuntimeException("test");
|
||||
log.info("info (with exception) INFO", exc );
|
||||
log.warn("warn (with exception) WARN", exc );
|
||||
log.error("error (with exception) ERROR", exc );
|
||||
}
|
||||
|
||||
String[] set = params.getParams("set");
|
||||
|
|
|
@ -390,7 +390,7 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
fieldMap.add("index", "(unstored field)");
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
log.warn("error reading field: " + fieldName);
|
||||
log.warn("error reading field: {}", fieldName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -622,9 +622,9 @@ public class LukeRequestHandler extends RequestHandlerBase
|
|||
try {
|
||||
return commit.getDirectory().fileLength(commit.getSegmentsFileName());
|
||||
} catch (NoSuchFileException okException) {
|
||||
log.debug("Unable to determine the (optional) fileSize for the current IndexReader's segments file because it is "
|
||||
+ "no longer in the Directory, this can happen if there are new commits since the Reader was opened",
|
||||
okException);
|
||||
log.debug("Unable to determine the (optional) fileSize for the current IndexReader's segments file because it is {}{}"
|
||||
, "no longer in the Directory, this can happen if there are new commits since the Reader was opened"
|
||||
, okException);
|
||||
} catch (IOException strangeException) {
|
||||
log.warn("Ignoring IOException wile attempting to determine the (optional) fileSize stat for the current IndexReader's segments file",
|
||||
strangeException);
|
||||
|
|
|
@ -115,7 +115,7 @@ public class MetricsCollectorHandler extends RequestHandlerBase {
|
|||
// silently drop request
|
||||
return;
|
||||
}
|
||||
//log.info("#### " + req.toString());
|
||||
//log.info("#### {}", req);
|
||||
if (req.getContentStreams() == null) { // no content
|
||||
return;
|
||||
}
|
||||
|
@ -153,7 +153,7 @@ public class MetricsCollectorHandler extends RequestHandlerBase {
|
|||
}
|
||||
String metricName = (String)doc.getFieldValue(MetricUtils.METRIC_NAME);
|
||||
if (metricName == null) {
|
||||
log.warn("Missing " + MetricUtils.METRIC_NAME + " field in document, skipping: " + doc);
|
||||
log.warn("Missing {} field in document, skipping: {}", MetricUtils.METRIC_NAME, doc);
|
||||
return;
|
||||
}
|
||||
doc.remove(MetricUtils.METRIC_NAME);
|
||||
|
@ -161,13 +161,13 @@ public class MetricsCollectorHandler extends RequestHandlerBase {
|
|||
doc.remove(SolrReporter.REGISTRY_ID);
|
||||
String groupId = (String)doc.getFieldValue(SolrReporter.GROUP_ID);
|
||||
if (groupId == null) {
|
||||
log.warn("Missing " + SolrReporter.GROUP_ID + " field in document, skipping: " + doc);
|
||||
log.warn("Missing {} field in document, skipping: {}", SolrReporter.GROUP_ID, doc);
|
||||
return;
|
||||
}
|
||||
doc.remove(SolrReporter.GROUP_ID);
|
||||
String reporterId = (String)doc.getFieldValue(SolrReporter.REPORTER_ID);
|
||||
if (reporterId == null) {
|
||||
log.warn("Missing " + SolrReporter.REPORTER_ID + " field in document, skipping: " + doc);
|
||||
log.warn("Missing {} field in document, skipping: {}", SolrReporter.REPORTER_ID, doc);
|
||||
return;
|
||||
}
|
||||
doc.remove(SolrReporter.REPORTER_ID);
|
||||
|
|
|
@ -252,7 +252,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
DocCollection systemColl = clusterState.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
|
||||
if (systemColl == null) {
|
||||
if (logMissingCollection) {
|
||||
log.info("No " + CollectionAdminParams.SYSTEM_COLL + " collection, keeping metrics history in memory.");
|
||||
log.info("No {} collection, keeping metrics history in memory.", CollectionAdminParams.SYSTEM_COLL);
|
||||
logMissingCollection = false;
|
||||
}
|
||||
factory.setPersistent(false);
|
||||
|
@ -266,7 +266,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
}
|
||||
}
|
||||
if (!ready) {
|
||||
log.debug(CollectionAdminParams.SYSTEM_COLL + "collection not ready yet, keeping metrics history in memory");
|
||||
log.debug("{} collection not ready yet, keeping metrics history in memory", CollectionAdminParams.SYSTEM_COLL);
|
||||
factory.setPersistent(false);
|
||||
return;
|
||||
}
|
||||
|
@ -288,7 +288,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
logMissingCollection = true;
|
||||
} catch (Exception e) {
|
||||
if (logMissingCollection) {
|
||||
log.info("No " + CollectionAdminParams.SYSTEM_COLL + " collection, keeping metrics history in memory.");
|
||||
log.info("No {} collection, keeping metrics history in memory.", CollectionAdminParams.SYSTEM_COLL);
|
||||
}
|
||||
logMissingCollection = false;
|
||||
factory.setPersistent(false);
|
||||
|
@ -341,7 +341,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
try {
|
||||
nodeName = LeaderElector.getNodeName(oid);
|
||||
} catch (Exception e) {
|
||||
log.warn("Unknown format of leader id, skipping: " + oid, e);
|
||||
log.warn("Unknown format of leader id, skipping: {}", oid, e);
|
||||
return null;
|
||||
}
|
||||
return nodeName;
|
||||
|
@ -392,7 +392,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
if (Thread.interrupted()) {
|
||||
return;
|
||||
}
|
||||
log.debug("-- collecting local " + group + "...");
|
||||
log.debug("-- collecting local {}...", group);
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.add(MetricsHandler.GROUP_PARAM, group.toString());
|
||||
params.add(MetricsHandler.COMPACT_PARAM, "true");
|
||||
|
@ -650,7 +650,9 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
|
||||
@Override
|
||||
public void close() {
|
||||
log.debug("Closing " + hashCode());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Closing {}", hashCode());
|
||||
}
|
||||
if (collectService != null) {
|
||||
boolean shutdown = false;
|
||||
while (!shutdown) {
|
||||
|
@ -806,7 +808,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
u = new URL(u.getProtocol(), u.getHost(), u.getPort(), "/api/cluster/metrics/history");
|
||||
url = u.toString();
|
||||
} catch (MalformedURLException e) {
|
||||
log.warn("Invalid Overseer url '" + baseUrl + "', unable to fetch remote metrics history", e);
|
||||
log.warn("Invalid Overseer url '{}', unable to fetch remote metrics history", baseUrl, e);
|
||||
return null;
|
||||
}
|
||||
// always use javabin
|
||||
|
@ -820,7 +822,7 @@ public class MetricsHistoryHandler extends RequestHandlerBase implements Permiss
|
|||
return (NamedList<Object>)codec.unmarshal(new ByteArrayInputStream(data));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
log.warn("Exception forwarding request to Overseer at " + url, e);
|
||||
log.warn("Exception forwarding request to Overseer at {}", url, e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -112,8 +112,7 @@ class PrepRecoveryOp implements CoreAdminHandler.CoreAdminOp {
|
|||
state == Replica.State.ACTIVE);
|
||||
|
||||
if (leaderDoesNotNeedRecovery) {
|
||||
log.warn(
|
||||
"Leader " + cname + " ignoring request to be in the recovering state because it is live and active.");
|
||||
log.warn("Leader {} ignoring request to be in the recovering state because it is live and active.", cname);
|
||||
}
|
||||
|
||||
ZkShardTerms shardTerms = coreContainer.getZkController().getShardTerms(collectionName, slice.getName());
|
||||
|
@ -128,15 +127,16 @@ class PrepRecoveryOp implements CoreAdminHandler.CoreAdminOp {
|
|||
|
||||
boolean onlyIfActiveCheckResult = onlyIfLeaderActive != null && onlyIfLeaderActive
|
||||
&& localState != Replica.State.ACTIVE;
|
||||
log.info(
|
||||
"In WaitForState(" + waitForState + "): collection=" + collectionName + ", shard=" + slice.getName() +
|
||||
", thisCore=" + cname + ", leaderDoesNotNeedRecovery=" + leaderDoesNotNeedRecovery +
|
||||
", isLeader? " + cloudDescriptor.isLeader() +
|
||||
", live=" + live + ", checkLive=" + checkLive + ", currentState=" + state.toString()
|
||||
+ ", localState=" + localState + ", nodeName=" + nodeName +
|
||||
", coreNodeName=" + coreNodeName + ", onlyIfActiveCheckResult=" + onlyIfActiveCheckResult
|
||||
+ ", nodeProps: " + replica);
|
||||
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info(
|
||||
"In WaitForState(" + waitForState + "): collection=" + collectionName + ", shard=" + slice.getName() +
|
||||
", thisCore=" + cname + ", leaderDoesNotNeedRecovery=" + leaderDoesNotNeedRecovery +
|
||||
", isLeader? " + cloudDescriptor.isLeader() +
|
||||
", live=" + live + ", checkLive=" + checkLive + ", currentState=" + state
|
||||
+ ", localState=" + localState + ", nodeName=" + nodeName +
|
||||
", coreNodeName=" + coreNodeName + ", onlyIfActiveCheckResult=" + onlyIfActiveCheckResult
|
||||
+ ", nodeProps: " + replica); //LOGOK
|
||||
}
|
||||
if (!onlyIfActiveCheckResult && replica != null && (state == waitForState || leaderDoesNotNeedRecovery)) {
|
||||
if (checkLive == null) {
|
||||
return true;
|
||||
|
|
|
@ -137,7 +137,7 @@ class RebalanceLeaders {
|
|||
for (Slice slice : dc.getSlices()) {
|
||||
ensurePreferredIsLeader(slice);
|
||||
if (asyncRequests.size() == max) {
|
||||
log.info("Queued " + max + " leader reassignments, waiting for some to complete.");
|
||||
log.info("Queued {} leader reassignments, waiting for some to complete.", max);
|
||||
keepGoing = waitAsyncRequests(maxWaitSecs, false);
|
||||
if (keepGoing == false) {
|
||||
break; // If we've waited longer than specified, don't continue to wait!
|
||||
|
@ -150,7 +150,7 @@ class RebalanceLeaders {
|
|||
if (keepGoing == true) {
|
||||
log.info("All leader reassignments completed.");
|
||||
} else {
|
||||
log.warn("Exceeded specified timeout of ." + maxWaitSecs + "' all leaders may not have been reassigned");
|
||||
log.warn("Exceeded specified timeout of '{}' all leaders may not have been reassigned'", maxWaitSecs);
|
||||
}
|
||||
|
||||
checkLeaderStatus();
|
||||
|
@ -276,8 +276,8 @@ class RebalanceLeaders {
|
|||
// if there's only one we can't change anything.
|
||||
private boolean electionQueueInBadState(List<String> electionNodes, Slice slice, Replica replica) {
|
||||
if (electionNodes.size() < 2) { // if there's only one node in the queue, should already be leader and we shouldn't be here anyway.
|
||||
log.warn("Rebalancing leaders and slice {} has less than two elements in the leader " +
|
||||
"election queue, but replica {} doesn't think it's the leader.", slice.getName(), replica.getName());
|
||||
log.warn("Rebalancing leaders and slice {} has less than two elements in the leader election queue, but replica {} doesn't think it's the leader."
|
||||
, slice.getName(), replica.getName());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -464,7 +464,9 @@ class RebalanceLeaders {
|
|||
successes = new SimpleOrderedMap();
|
||||
results.add("successes", successes);
|
||||
}
|
||||
log.info("Successfully changed leader of shard {} to replica {}", slice.getName(), replica.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Successfully changed leader of shard {} to replica {}", slice.getName(), replica.getName());
|
||||
}
|
||||
SimpleOrderedMap res = new SimpleOrderedMap();
|
||||
res.add("status", "success");
|
||||
res.add("msg", "Successfully changed leader of slice " + slice.getName() + " to " + replica.getName());
|
||||
|
@ -482,7 +484,9 @@ class RebalanceLeaders {
|
|||
results.add("failures", fails);
|
||||
|
||||
for (Map.Entry<String, String> ent : pendingOps.entrySet()) {
|
||||
log.info("Failed to change leader of shard {} to replica {}", ent.getKey(), ent.getValue());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Failed to change leader of shard {} to replica {}", ent.getKey(), ent.getValue());
|
||||
}
|
||||
SimpleOrderedMap res = new SimpleOrderedMap();
|
||||
res.add("status", "failed");
|
||||
res.add("msg", String.format(Locale.ROOT, "Could not change leder for slice %s to %s", ent.getKey(), ent.getValue()));
|
||||
|
|
|
@ -71,10 +71,10 @@ class RequestSyncShardOp implements CoreAdminHandler.CoreAdminOp {
|
|||
.getNewestSearcher(false);
|
||||
SolrIndexSearcher searcher = searchHolder.get();
|
||||
try {
|
||||
log.debug(core.getCoreContainer()
|
||||
.getZkController().getNodeName()
|
||||
+ " synched "
|
||||
+ searcher.count(new MatchAllDocsQuery()));
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{} synched {}", core.getCoreContainer().getZkController().getNodeName()
|
||||
, searcher.count(new MatchAllDocsQuery()));
|
||||
}
|
||||
} finally {
|
||||
searchHolder.decref();
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ public class SegmentsInfoRequestHandler extends RequestHandlerBase {
|
|||
}
|
||||
if (withFieldInfos) {
|
||||
if (seg == null) {
|
||||
log.debug("Skipping segment info - not available as a SegmentReader: " + segmentCommitInfo);
|
||||
log.debug("Skipping segment info - not available as a SegmentReader: {}", segmentCommitInfo);
|
||||
} else {
|
||||
FieldInfos fis = seg.getFieldInfos();
|
||||
SimpleOrderedMap<Object> fields = new SimpleOrderedMap<>();
|
||||
|
@ -392,7 +392,7 @@ public class SegmentsInfoRequestHandler extends RequestHandlerBase {
|
|||
fieldFlags.add("sumTotalTermFreq", terms.getSumTotalTermFreq());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.debug("Exception retrieving term stats for field " + fi.name, e);
|
||||
log.debug("Exception retrieving term stats for field {}", fi.name, e);
|
||||
}
|
||||
|
||||
// probably too much detail?
|
||||
|
|
|
@ -195,14 +195,14 @@ public class ShowFileRequestHandler extends RequestHandlerBase
|
|||
|
||||
// Make sure the file exists, is readable and is not a hidden file
|
||||
if( !adminFile.exists() ) {
|
||||
log.error("Can not find: "+adminFile.getName() + " ["+adminFile.getAbsolutePath()+"]");
|
||||
log.error("Can not find: {} [{}]", adminFile.getName(), adminFile.getAbsolutePath());
|
||||
rsp.setException(new SolrException
|
||||
( ErrorCode.NOT_FOUND, "Can not find: "+adminFile.getName()
|
||||
+ " ["+adminFile.getAbsolutePath()+"]" ));
|
||||
return;
|
||||
}
|
||||
if( !adminFile.canRead() || adminFile.isHidden() ) {
|
||||
log.error("Can not show: "+adminFile.getName() + " ["+adminFile.getAbsolutePath()+"]");
|
||||
log.error("Can not show: {} [{}]", adminFile.getName(), adminFile.getAbsolutePath());
|
||||
rsp.setException(new SolrException
|
||||
( ErrorCode.NOT_FOUND, "Can not show: "+adminFile.getName()
|
||||
+ " ["+adminFile.getAbsolutePath()+"]" ));
|
||||
|
@ -257,7 +257,7 @@ public class ShowFileRequestHandler extends RequestHandlerBase
|
|||
String fname = fnameIn.toUpperCase(Locale.ROOT);
|
||||
if (hiddenFiles.contains(fname) || hiddenFiles.contains("*")) {
|
||||
if (reportError) {
|
||||
log.error("Cannot access " + fname);
|
||||
log.error("Cannot access {}", fname);
|
||||
rsp.setException(new SolrException(SolrException.ErrorCode.FORBIDDEN, "Can not access: " + fnameIn));
|
||||
}
|
||||
return true;
|
||||
|
@ -267,7 +267,7 @@ public class ShowFileRequestHandler extends RequestHandlerBase
|
|||
// to fix it to handle all possibilities though.
|
||||
if (fname.indexOf("..") >= 0 || fname.startsWith(".")) {
|
||||
if (reportError) {
|
||||
log.error("Invalid path: " + fname);
|
||||
log.error("Invalid path: {}", fname);
|
||||
rsp.setException(new SolrException(SolrException.ErrorCode.FORBIDDEN, "Invalid path: " + fnameIn));
|
||||
}
|
||||
return true;
|
||||
|
@ -306,7 +306,7 @@ public class ShowFileRequestHandler extends RequestHandlerBase
|
|||
|
||||
// Make sure the file exists, is readable and is not a hidden file
|
||||
if (!zkClient.exists(adminFile, true)) {
|
||||
log.error("Can not find: " + adminFile);
|
||||
log.error("Can not find: {}", adminFile);
|
||||
rsp.setException(new SolrException(SolrException.ErrorCode.NOT_FOUND, "Can not find: "
|
||||
+ adminFile));
|
||||
return null;
|
||||
|
@ -340,12 +340,12 @@ public class ShowFileRequestHandler extends RequestHandlerBase
|
|||
else {
|
||||
fname = fname.replace( '\\', '/' ); // normalize slashes
|
||||
if( hiddenFiles.contains( fname.toUpperCase(Locale.ROOT) ) ) {
|
||||
log.error("Can not access: "+ fname);
|
||||
log.error("Can not access: {}", fname);
|
||||
rsp.setException(new SolrException( SolrException.ErrorCode.FORBIDDEN, "Can not access: "+fname ));
|
||||
return null;
|
||||
}
|
||||
if( fname.indexOf( ".." ) >= 0 ) {
|
||||
log.error("Invalid path: "+ fname);
|
||||
log.error("Invalid path: {}", fname);
|
||||
rsp.setException(new SolrException( SolrException.ErrorCode.FORBIDDEN, "Invalid path: "+fname ));
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
|
|||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Either path or targetCore param must be specified");
|
||||
}
|
||||
|
||||
log.info("Invoked split action for core: " + cname);
|
||||
log.info("Invoked split action for core: {}", cname);
|
||||
String methodStr = params.get(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower());
|
||||
SolrIndexSplitter.SplitMethod splitMethod = SolrIndexSplitter.SplitMethod.get(methodStr);
|
||||
if (splitMethod == null) {
|
||||
|
@ -350,7 +350,10 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
|
|||
}
|
||||
}
|
||||
|
||||
log.info("Split histogram: ms={}, numBuckets={} sumBuckets={} numPrefixes={} numTriLevel={} numCollisions={}", timer.getTime(), counts.size(), sumBuckets, numPrefixes, numTriLevel, numCollisions);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Split histogram: ms={}, numBuckets={} sumBuckets={} numPrefixes={} numTriLevel={} numCollisions={}"
|
||||
, timer.getTime(), counts.size(), sumBuckets, numPrefixes, numTriLevel, numCollisions);
|
||||
}
|
||||
|
||||
return counts.values();
|
||||
}
|
||||
|
@ -434,7 +437,10 @@ class SplitOp implements CoreAdminHandler.CoreAdminOp {
|
|||
}
|
||||
}
|
||||
|
||||
log.info("Split histogram from idField {}: ms={}, numBuckets={} sumBuckets={} numPrefixes={} numCollisions={}", idField, timer.getTime(), counts.size(), sumBuckets, numPrefixes, numCollisions);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Split histogram from idField {}: ms={}, numBuckets={} sumBuckets={} numPrefixes={} numCollisions={}"
|
||||
, idField, timer.getTime(), counts.size(), sumBuckets, numPrefixes, numCollisions);
|
||||
}
|
||||
|
||||
return counts.values();
|
||||
}
|
||||
|
|
|
@ -106,9 +106,9 @@ public class SystemInfoHandler extends RequestHandlerBase
|
|||
InetAddress addr = InetAddress.getLocalHost();
|
||||
hostname = addr.getCanonicalHostName();
|
||||
} catch (Exception e) {
|
||||
log.warn("Unable to resolve canonical hostname for local host, possible DNS misconfiguration. " +
|
||||
"Set the '"+PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP+"' sysprop to true on startup to " +
|
||||
"prevent future lookups if DNS can not be fixed.", e);
|
||||
log.warn("Unable to resolve canonical hostname for local host, possible DNS misconfiguration. SET THE '{}' {}"
|
||||
, PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP
|
||||
, " sysprop to true on startup to prevent future lookups if DNS can not be fixed.", e);
|
||||
hostname = null;
|
||||
return;
|
||||
}
|
||||
|
@ -116,10 +116,9 @@ public class SystemInfoHandler extends RequestHandlerBase
|
|||
|
||||
if (15000D < timer.getTime()) {
|
||||
String readableTime = String.format(Locale.ROOT, "%.3f", (timer.getTime() / 1000));
|
||||
log.warn("Resolving canonical hostname for local host took {} seconds, possible DNS misconfiguration. " +
|
||||
"Set the '{}' sysprop to true on startup to prevent future lookups if DNS can not be fixed.",
|
||||
readableTime, PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP);
|
||||
|
||||
log.warn("Resolving canonical hostname for local host took {} seconds, possible DNS misconfiguration. Set the '{}' {}"
|
||||
, readableTime, PREVENT_REVERSE_DNS_OF_LOCALHOST_SYSPROP,
|
||||
" sysprop to true on startup to prevent future lookups if DNS can not be fixed.");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -687,10 +687,10 @@ public final class ZookeeperInfoHandler extends RequestHandlerBase {
|
|||
if (childData != null)
|
||||
childDataStr = (new BytesRef(childData)).utf8ToString();
|
||||
} catch (KeeperException.NoNodeException nne) {
|
||||
log.warn("State for collection " + collection +
|
||||
" not found in /clusterstate.json or /collections/" + collection + "/state.json!");
|
||||
log.warn("State for collection {} not found in /clusterstate.json or /collections/{}/state.json!"
|
||||
, collection, collection);
|
||||
} catch (Exception childErr) {
|
||||
log.error("Failed to get " + collStatePath + " due to: " + childErr);
|
||||
log.error("Failed to get {} due to", collStatePath, childErr);
|
||||
}
|
||||
|
||||
if (childDataStr != null) {
|
||||
|
|
|
@ -155,7 +155,7 @@ public class ZookeeperStatusHandler extends RequestHandlerBase {
|
|||
stat.put("role", zk.role);
|
||||
}
|
||||
} catch (SolrException se) {
|
||||
log.warn("Failed talking to zookeeper " + zkClientHostPort, se);
|
||||
log.warn("Failed talking to zookeeper {}", zkClientHostPort, se);
|
||||
errors.add(se.getMessage());
|
||||
Map<String, Object> stat = new HashMap<>();
|
||||
stat.put("host", zkClientHostPort);
|
||||
|
|
|
@ -717,7 +717,7 @@ public class FacetComponent extends SearchComponent {
|
|||
if (Boolean.TRUE.equals(responseHeader.getBooleanArg(SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY))) {
|
||||
continue;
|
||||
} else {
|
||||
log.warn("corrupted response on "+srsp.getShardRequest()+": "+srsp.getSolrResponse());
|
||||
log.warn("corrupted response on {} : {}", srsp.getShardRequest(), srsp.getSolrResponse());
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
||||
"facet_counts is absent in response from " + srsp.getNodeName() +
|
||||
", but "+SolrQueryResponse.RESPONSE_HEADER_PARTIAL_RESULTS_KEY+" hasn't been responded");
|
||||
|
@ -869,7 +869,9 @@ public class FacetComponent extends SearchComponent {
|
|||
if (ent.getValue().count >= minCount) {
|
||||
newQueryFacets.put(ent.getKey(), ent.getValue());
|
||||
} else {
|
||||
log.trace("Removing facetQuery/key: " + ent.getKey() + "/" + ent.getValue().toString() + " mincount=" + minCount);
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("Removing facetQuery/key: {}/{} mincount={}", ent.getKey(), ent.getValue(), minCount);
|
||||
}
|
||||
replace = true;
|
||||
}
|
||||
}
|
||||
|
@ -1002,10 +1004,8 @@ public class FacetComponent extends SearchComponent {
|
|||
ShardFacetCount sfc = dff.counts.get(name);
|
||||
if (sfc == null) {
|
||||
// we got back a term we didn't ask for?
|
||||
log.error("Unexpected term returned for facet refining. key=" + key
|
||||
+ " term='" + name + "'" + "\n\trequest params=" + sreq.params
|
||||
+ "\n\ttoRefine=" + dff._toRefine + "\n\tresponse="
|
||||
+ shardCounts);
|
||||
log.error("Unexpected term returned for facet refining. key='{}' term='{}'\n\trequest params={}\n\ttoRefine={}\n\tresponse={}"
|
||||
, key, name, sreq.params, dff._toRefine, shardCounts);
|
||||
continue;
|
||||
}
|
||||
sfc.count += count;
|
||||
|
@ -1538,7 +1538,9 @@ public class FacetComponent extends SearchComponent {
|
|||
if (ent.getValue().count >= minCount) {
|
||||
newOne.put(ent.getKey(), ent.getValue());
|
||||
} else {
|
||||
log.trace("Removing facet/key: " + ent.getKey() + "/" + ent.getValue().toString() + " mincount=" + minCount);
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("Removing facet/key: {}/{} mincount={}", ent.getKey(), ent.getValue(), minCount);
|
||||
}
|
||||
replace = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -549,7 +549,8 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
|
|||
throw new SolrException(ErrorCode.BAD_REQUEST, "Invalid URL syntax in \"shards\" parameter: " + shardsParamValue);
|
||||
}
|
||||
if (!localWhitelistHosts.contains(url.getHost() + ":" + url.getPort())) {
|
||||
log.warn("The '"+ShardParams.SHARDS+"' parameter value '"+shardsParamValue+"' contained value(s) not on the shards whitelist ("+localWhitelistHosts+"), shardUrl:" + shardUrl);
|
||||
log.warn("The '{}' parameter value '{}' contained value(s) not on the shards whitelist ({}), shardUrl: '{}'"
|
||||
, ShardParams.SHARDS, shardsParamValue, localWhitelistHosts, shardUrl);
|
||||
throw new SolrException(ErrorCode.FORBIDDEN,
|
||||
"The '"+ShardParams.SHARDS+"' parameter value '"+shardsParamValue+"' contained value(s) not on the shards whitelist. shardUrl:" + shardUrl + "." +
|
||||
HttpShardHandlerFactory.SET_SOLR_DISABLE_SHARDS_WHITELIST_CLUE);
|
||||
|
|
|
@ -86,8 +86,9 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
|
||||
rb.setFieldFlags(flags);
|
||||
|
||||
log.debug("Starting MoreLikeThis.Process. isShard: "
|
||||
+ params.getBool(ShardParams.IS_SHARD));
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Starting MoreLikeThis.Process. isShard: {}", params.getBool(ShardParams.IS_SHARD));
|
||||
}
|
||||
SolrIndexSearcher searcher = rb.req.getSearcher();
|
||||
|
||||
if (params.getBool(ShardParams.IS_SHARD, false)) {
|
||||
|
@ -112,7 +113,7 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
Entry<String,BooleanQuery> idToQuery = idToQueryIt.next();
|
||||
String s = idToQuery.getValue().toString();
|
||||
|
||||
log.debug("MLT Query:" + s);
|
||||
log.debug("MLT Query:{}", s);
|
||||
temp.add(idToQuery.getKey(), idToQuery.getValue().toString());
|
||||
}
|
||||
|
||||
|
@ -135,7 +136,9 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
public void handleResponses(ResponseBuilder rb, ShardRequest sreq) {
|
||||
if ((sreq.purpose & ShardRequest.PURPOSE_GET_TOP_IDS) != 0
|
||||
&& rb.req.getParams().getBool(COMPONENT_NAME, false)) {
|
||||
log.debug("ShardRequest.response.size: " + sreq.responses.size());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("ShardRequest.response.size: {}", sreq.responses.size());
|
||||
}
|
||||
for (ShardResponse r : sreq.responses) {
|
||||
if (r.getException() != null) {
|
||||
// This should only happen in case of using shards.tolerant=true. Omit this ShardResponse
|
||||
|
@ -143,11 +146,14 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
}
|
||||
NamedList<?> moreLikeThisReponse = (NamedList<?>) r.getSolrResponse()
|
||||
.getResponse().get("moreLikeThis");
|
||||
log.debug("ShardRequest.response.shard: " + r.getShard());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("ShardRequest.response.shard: {}", r.getShard());
|
||||
}
|
||||
if (moreLikeThisReponse != null) {
|
||||
for (Entry<String,?> entry : moreLikeThisReponse) {
|
||||
log.debug("id: \"" + entry.getKey() + "\" Query: \""
|
||||
+ entry.getValue() + "\"");
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("id: '{}' Query: '{}'", entry.getKey(), entry.getValue());
|
||||
}
|
||||
ShardRequest s = buildShardQuery(rb, (String) entry.getValue(),
|
||||
entry.getKey());
|
||||
rb.addRequest(this, s);
|
||||
|
@ -158,8 +164,9 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
|
||||
if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) != 0) {
|
||||
for (ShardResponse r : sreq.responses) {
|
||||
log.debug("MLT Query returned: "
|
||||
+ r.getSolrResponse().getResponse().toString());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("MLT Query returned: {}", r.getSolrResponse().getResponse());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -180,7 +187,9 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
for (ShardRequest sreq : rb.finished) {
|
||||
if ((sreq.purpose & ShardRequest.PURPOSE_GET_MLT_RESULTS) != 0) {
|
||||
for (ShardResponse r : sreq.responses) {
|
||||
log.debug("ShardRequest.response.shard: " + r.getShard());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("ShardRequest.response.shard: {}", r.getShard());
|
||||
}
|
||||
String key = r.getShardRequest().params
|
||||
.get(MoreLikeThisComponent.DIST_DOC_ID);
|
||||
SolrDocumentList shardDocList = (SolrDocumentList) r.getSolrResponse().getResponse().get("response");
|
||||
|
@ -188,9 +197,8 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
if (shardDocList == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
log.info("MLT: results added for key: " + key + " documents: "
|
||||
+ shardDocList.toString());
|
||||
|
||||
log.info("MLT: results added for key: {} documents: {}", key, shardDocList);
|
||||
SolrDocumentList mergedDocList = tempResults.get(key);
|
||||
|
||||
if (mergedDocList == null) {
|
||||
|
@ -203,7 +211,7 @@ public class MoreLikeThisComponent extends SearchComponent {
|
|||
mergedDocList = mergeSolrDocumentList(mergedDocList,
|
||||
shardDocList, mltcount, keyName);
|
||||
}
|
||||
log.debug("Adding docs for key: " + key);
|
||||
log.debug("Adding docs for key: {}", key);
|
||||
tempResults.put(key, mergedDocList);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -323,7 +323,9 @@ public class QueryComponent extends SearchComponent
|
|||
@Override
|
||||
public void process(ResponseBuilder rb) throws IOException
|
||||
{
|
||||
log.debug("process: {}", rb.req.getParams());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("process: {}", rb.req.getParams());
|
||||
}
|
||||
|
||||
SolrQueryRequest req = rb.req;
|
||||
SolrParams params = req.getParams();
|
||||
|
|
|
@ -247,7 +247,9 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
|
|||
elevationProvider = handleConfigLoadingException(e, true);
|
||||
} else {
|
||||
configFileExists = true;
|
||||
log.info("Loading QueryElevation from: " + fC.getAbsolutePath());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Loading QueryElevation from: {}", fC.getAbsolutePath());
|
||||
}
|
||||
XmlConfigFile cfg = new XmlConfigFile(core.getResourceLoader(), configFileName);
|
||||
elevationProvider = loadElevationProvider(cfg);
|
||||
}
|
||||
|
@ -366,7 +368,7 @@ public class QueryElevationComponent extends SearchComponent implements SolrCore
|
|||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"QueryElevationComponent must specify argument: " + CONFIG_FILE);
|
||||
}
|
||||
log.info("Loading QueryElevation from data dir: " + configFileName);
|
||||
log.info("Loading QueryElevation from data dir: {}", configFileName);
|
||||
|
||||
XmlConfigFile cfg;
|
||||
ZkController zkController = core.getCoreContainer().getZkController();
|
||||
|
|
|
@ -91,13 +91,14 @@ public class RangeFacetRequest extends FacetComponent.FacetBase {
|
|||
|
||||
if ((schemaField.getType() instanceof DateRangeField) && method.equals(FacetParams.FacetRangeMethod.DV)) {
|
||||
// the user has explicitly selected the FacetRangeMethod.DV method
|
||||
log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported together with field type '" +
|
||||
DateRangeField.class + "'. Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
|
||||
log.warn("Range facet method '{}' is not supported together with field type '{}'. Will use method '{}' instead"
|
||||
, FacetParams.FacetRangeMethod.DV, DateRangeField.class, FacetParams.FacetRangeMethod.FILTER);
|
||||
method = FacetParams.FacetRangeMethod.FILTER;
|
||||
}
|
||||
if (method.equals(FacetParams.FacetRangeMethod.DV) && !schemaField.hasDocValues() && (schemaField.getType().isPointField())) {
|
||||
log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported on PointFields without docValues." +
|
||||
"Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
|
||||
log.warn("Range facet method '{}' is not supported on PointFields without docValues. Will use method '{}' instead"
|
||||
, FacetParams.FacetRangeMethod.DV
|
||||
, FacetParams.FacetRangeMethod.FILTER);
|
||||
method = FacetParams.FacetRangeMethod.FILTER;
|
||||
}
|
||||
|
||||
|
@ -124,8 +125,8 @@ public class RangeFacetRequest extends FacetComponent.FacetBase {
|
|||
this.groupFacet = params.getBool(GroupParams.GROUP_FACET, false);
|
||||
if (groupFacet && method.equals(FacetParams.FacetRangeMethod.DV)) {
|
||||
// the user has explicitly selected the FacetRangeMethod.DV method
|
||||
log.warn("Range facet method '" + FacetParams.FacetRangeMethod.DV + "' is not supported together with '" +
|
||||
GroupParams.GROUP_FACET + "'. Will use method '" + FacetParams.FacetRangeMethod.FILTER + "' instead");
|
||||
log.warn("Range facet method '{}' is not supported together with '{}'. Will use method '{}' instead"
|
||||
, FacetParams.FacetRangeMethod.DV, GroupParams.GROUP_FACET, FacetParams.FacetRangeMethod.FILTER);
|
||||
method = FacetParams.FacetRangeMethod.FILTER;
|
||||
}
|
||||
|
||||
|
|
|
@ -166,10 +166,11 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
.getNewestSearcher(false);
|
||||
SolrIndexSearcher searcher = searchHolder.get();
|
||||
try {
|
||||
log.debug(req.getCore()
|
||||
.getCoreContainer().getZkController().getNodeName()
|
||||
+ " min count to sync to (from most recent searcher view) "
|
||||
+ searcher.count(new MatchAllDocsQuery()));
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("{} min count to sync to (from most recent searcher view) {}"
|
||||
, req.getCore().getCoreContainer().getZkController().getNodeName()
|
||||
, searcher.count(new MatchAllDocsQuery()));
|
||||
}
|
||||
} finally {
|
||||
searchHolder.decref();
|
||||
}
|
||||
|
@ -358,7 +359,7 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
if (idStr == null) return;
|
||||
AtomicLong version = new AtomicLong();
|
||||
SolrInputDocument doc = getInputDocument(req.getCore(), new BytesRef(idStr), version, null, Resolution.DOC);
|
||||
log.info("getInputDocument called for id="+idStr+", returning: "+doc);
|
||||
log.info("getInputDocument called for id={}, returning {}", idStr, doc);
|
||||
rb.rsp.add("inputDocument", doc);
|
||||
rb.rsp.add("version", version.get());
|
||||
}
|
||||
|
@ -971,7 +972,7 @@ public class RealTimeGetComponent extends SearchComponent
|
|||
// the mappings.
|
||||
|
||||
for (int i=0; i<rb.slices.length; i++) {
|
||||
log.info("LOOKUP_SLICE:" + rb.slices[i] + "=" + rb.shards[i]);
|
||||
log.info("LOOKUP_SLICE:{}={}", rb.slices[i], rb.shards[i]);
|
||||
if (lookup.equals(rb.slices[i]) || slice.equals(rb.slices[i])) {
|
||||
return new String[]{rb.shards[i]};
|
||||
}
|
||||
|
|
|
@ -344,7 +344,7 @@ public class SearchHandler extends RequestHandlerBase implements SolrCoreAware,
|
|||
}
|
||||
}
|
||||
} catch (ExitableDirectoryReader.ExitingReaderException ex) {
|
||||
log.warn( "Query: " + req.getParamString() + "; " + ex.getMessage());
|
||||
log.warn("Query: {}; {}", req.getParamString(), ex.getMessage());
|
||||
if( rb.rsp.getResponse() == null) {
|
||||
rb.rsp.addResponse(new SolrDocumentList());
|
||||
}
|
||||
|
|
|
@ -258,10 +258,10 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
}
|
||||
}
|
||||
} catch (IOException e){
|
||||
log.error(e.toString());
|
||||
log.error("Error", e);
|
||||
return null;
|
||||
} catch (SyntaxError e) {
|
||||
log.error(e.toString());
|
||||
log.error("Error", e);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,9 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unable to read spelling info for shard: " + srsp.getShard(), e);
|
||||
}
|
||||
log.info(srsp.getShard() + " " + nl);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("{} {}", srsp.getShard(), nl);
|
||||
}
|
||||
if (nl != null) {
|
||||
mergeData.totalNumberShardResponses++;
|
||||
collectShardSuggestions(nl, mergeData);
|
||||
|
@ -778,7 +780,9 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
boolean buildOnCommit = Boolean.parseBoolean((String) spellchecker.get("buildOnCommit"));
|
||||
boolean buildOnOptimize = Boolean.parseBoolean((String) spellchecker.get("buildOnOptimize"));
|
||||
if (buildOnCommit || buildOnOptimize) {
|
||||
log.info("Registering newSearcher listener for spellchecker: " + checker.getDictionaryName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Registering newSearcher listener for spellchecker: {}", checker.getDictionaryName());
|
||||
}
|
||||
core.registerNewSearcherListener(new SpellCheckerListener(core, checker, buildOnCommit, buildOnOptimize));
|
||||
}
|
||||
} else {
|
||||
|
@ -810,11 +814,12 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
if (currentSearcher == null) {
|
||||
// firstSearcher event
|
||||
try {
|
||||
log.info("Loading spell index for spellchecker: "
|
||||
+ checker.getDictionaryName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Loading spell index for spellchecker: {}", checker.getDictionaryName());
|
||||
}
|
||||
checker.reload(core, newSearcher);
|
||||
} catch (IOException e) {
|
||||
log.error( "Exception in reloading spell check index for spellchecker: " + checker.getDictionaryName(), e);
|
||||
log.error( "Exception in reloading spell check index for spellchecker: {}", checker.getDictionaryName(), e);
|
||||
}
|
||||
} else {
|
||||
// newSearcher event
|
||||
|
@ -824,7 +829,10 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
if (newSearcher.getIndexReader().leaves().size() == 1) {
|
||||
buildSpellIndex(newSearcher);
|
||||
} else {
|
||||
log.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Index is not optimized therefore skipping building spell check index for: {}"
|
||||
, checker.getDictionaryName());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -833,11 +841,12 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
|
||||
private void buildSpellIndex(SolrIndexSearcher newSearcher) {
|
||||
try {
|
||||
log.info("Building spell index for spell checker: " + checker.getDictionaryName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Building spell index for spell checker: {}", checker.getDictionaryName());
|
||||
}
|
||||
checker.build(core, newSearcher);
|
||||
} catch (Exception e) {
|
||||
log.error(
|
||||
"Exception in building spell check index for spellchecker: " + checker.getDictionaryName(), e);
|
||||
log.error("Exception in building spell check index for spellchecker: {}", checker.getDictionaryName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -151,7 +151,9 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
|
||||
if (buildOnCommit || buildOnOptimize || buildOnStartup) {
|
||||
SuggesterListener listener = new SuggesterListener(core, suggester, buildOnCommit, buildOnOptimize, buildOnStartup, core.isReloaded());
|
||||
log.info("Registering searcher listener for suggester: " + suggester.getName() + " - " + listener);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Registering searcher listener for suggester: {} = {}", suggester.getName(), listener);
|
||||
}
|
||||
core.registerFirstSearcherListener(listener);
|
||||
core.registerNewSearcherListener(listener);
|
||||
}
|
||||
|
@ -164,7 +166,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public void prepare(ResponseBuilder rb) throws IOException {
|
||||
SolrParams params = rb.req.getParams();
|
||||
log.info("SuggestComponent prepare with : " + params);
|
||||
log.info("SuggestComponent prepare with : {}", params);
|
||||
if (!params.getBool(COMPONENT_NAME, false)) {
|
||||
return;
|
||||
}
|
||||
|
@ -196,7 +198,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public int distributedProcess(ResponseBuilder rb) {
|
||||
SolrParams params = rb.req.getParams();
|
||||
log.info("SuggestComponent distributedProcess with : " + params);
|
||||
log.info("SuggestComponent distributedProcess with : {}", params);
|
||||
if (rb.stage < ResponseBuilder.STAGE_EXECUTE_QUERY)
|
||||
return ResponseBuilder.STAGE_EXECUTE_QUERY;
|
||||
if (rb.stage == ResponseBuilder.STAGE_EXECUTE_QUERY) {
|
||||
|
@ -218,7 +220,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public void process(ResponseBuilder rb) throws IOException {
|
||||
SolrParams params = rb.req.getParams();
|
||||
log.info("SuggestComponent process with : " + params);
|
||||
log.info("SuggestComponent process with : {}", params);
|
||||
if (!params.getBool(COMPONENT_NAME, false) || suggesters.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -273,7 +275,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public void finishStage(ResponseBuilder rb) {
|
||||
SolrParams params = rb.req.getParams();
|
||||
log.info("SuggestComponent finishStage with : " + params);
|
||||
log.info("SuggestComponent finishStage with : {}", params);
|
||||
if (!params.getBool(COMPONENT_NAME, false) || rb.stage != ResponseBuilder.STAGE_GET_FIELDS)
|
||||
return;
|
||||
int count = params.getInt(SUGGEST_COUNT, 1);
|
||||
|
@ -288,7 +290,9 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@SuppressWarnings("unchecked")
|
||||
Map<String, SimpleOrderedMap<NamedList<Object>>> namedList =
|
||||
(Map<String, SimpleOrderedMap<NamedList<Object>>>) resp.get(SuggesterResultLabels.SUGGEST);
|
||||
log.info(srsp.getShard() + " : " + namedList);
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("{} : {}", srsp.getShard(), namedList);
|
||||
}
|
||||
suggesterResults.add(toSuggesterResult(namedList));
|
||||
}
|
||||
}
|
||||
|
@ -511,20 +515,26 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
SolrIndexSearcher currentSearcher) {
|
||||
long thisCallCount = callCount.incrementAndGet();
|
||||
if (isCoreReload && thisCallCount == 1) {
|
||||
log.info("Skipping first newSearcher call for suggester " + suggester + " in core reload");
|
||||
log.info("Skipping first newSearcher call for suggester {} in core reload", suggester);
|
||||
return;
|
||||
} else if (thisCallCount == 1 || (isCoreReload && thisCallCount == 2)) {
|
||||
if (buildOnStartup) {
|
||||
log.info("buildOnStartup: " + suggester.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("buildOnStartup: {}", suggester.getName());
|
||||
}
|
||||
buildSuggesterIndex(newSearcher);
|
||||
}
|
||||
} else {
|
||||
if (buildOnCommit) {
|
||||
log.info("buildOnCommit: " + suggester.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("buildOnCommit: {}", suggester.getName());
|
||||
}
|
||||
buildSuggesterIndex(newSearcher);
|
||||
} else if (buildOnOptimize) {
|
||||
if (newSearcher.getIndexReader().leaves().size() == 1) {
|
||||
log.info("buildOnOptimize: " + suggester.getName());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("buildOnOptimize: {}", suggester.getName());
|
||||
}
|
||||
buildSuggesterIndex(newSearcher);
|
||||
}
|
||||
}
|
||||
|
@ -536,7 +546,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
try {
|
||||
suggester.build(core, newSearcher);
|
||||
} catch (Exception e) {
|
||||
log.error("Exception in building suggester index for: " + suggester.getName(), e);
|
||||
log.error("Exception in building suggester index for {}: ", suggester.getName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -183,8 +183,10 @@ public class JsonLoader extends ContentStreamLoader {
|
|||
case JSONParser.BIGNUMBER:
|
||||
case JSONParser.BOOLEAN:
|
||||
case JSONParser.NULL:
|
||||
log.info("Can't have a value here. Unexpected "
|
||||
+ JSONParser.getEventString(ev) + " at [" + parser.getPosition() + "]");
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Can't have a value here. Unexpected {} at [{}]"
|
||||
, JSONParser.getEventString(ev), parser.getPosition());
|
||||
}
|
||||
|
||||
case JSONParser.OBJECT_START:
|
||||
case JSONParser.OBJECT_END:
|
||||
|
@ -192,7 +194,7 @@ public class JsonLoader extends ContentStreamLoader {
|
|||
break;
|
||||
|
||||
default:
|
||||
log.info("Noggit UNKNOWN_EVENT_ID: " + ev);
|
||||
log.info("Noggit UNKNOWN_EVENT_ID: {}", ev);
|
||||
break;
|
||||
}
|
||||
// read the next event
|
||||
|
|
|
@ -105,7 +105,7 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
} catch (IllegalArgumentException ex) {
|
||||
// Other implementations will likely throw this exception since "reuse-instance"
|
||||
// isimplementation specific.
|
||||
log.debug("Unable to set the 'reuse-instance' property for the input chain: " + inputFactory);
|
||||
log.debug("Unable to set the 'reuse-instance' property for the input chain: {}", inputFactory);
|
||||
}
|
||||
|
||||
// Init SAX parser (for XSL):
|
||||
|
@ -116,7 +116,7 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
xsltCacheLifetimeSeconds = XSLT_CACHE_DEFAULT;
|
||||
if(args != null) {
|
||||
xsltCacheLifetimeSeconds = args.getInt(XSLT_CACHE_PARAM,XSLT_CACHE_DEFAULT);
|
||||
log.debug("xsltCacheLifetimeSeconds=" + xsltCacheLifetimeSeconds);
|
||||
log.debug("xsltCacheLifetimeSeconds={}", xsltCacheLifetimeSeconds);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -178,8 +178,10 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
final byte[] body = IOUtils.toByteArray(is);
|
||||
// TODO: The charset may be wrong, as the real charset is later
|
||||
// determined by the XML parser, the content-type is only used as a hint!
|
||||
log.trace("body: {}", new String(body, (charset == null) ?
|
||||
ContentStreamBase.DEFAULT_CHARSET : charset));
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("body: {}", new String(body, (charset == null) ?
|
||||
ContentStreamBase.DEFAULT_CHARSET : charset));
|
||||
}
|
||||
IOUtils.closeQuietly(is);
|
||||
is = new ByteArrayInputStream(body);
|
||||
}
|
||||
|
@ -249,7 +251,7 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
} else if (UpdateRequestHandler.COMMIT_WITHIN.equals(attrName)) {
|
||||
addCmd.commitWithin = Integer.parseInt(attrVal);
|
||||
} else {
|
||||
log.warn("XML element <add> has invalid XML attr: " + attrName);
|
||||
log.warn("XML element <add> has invalid XML attr: {}", attrName);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -263,7 +265,7 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unexpected <doc> tag without an <add> tag surrounding it.");
|
||||
}
|
||||
} else if (UpdateRequestHandler.COMMIT.equals(currTag) || UpdateRequestHandler.OPTIMIZE.equals(currTag)) {
|
||||
log.trace("parsing " + currTag);
|
||||
log.trace("parsing {}", currTag);
|
||||
|
||||
CommitUpdateCommand cmd = new CommitUpdateCommand(req, UpdateRequestHandler.OPTIMIZE.equals(currTag));
|
||||
ModifiableSolrParams mp = new ModifiableSolrParams();
|
||||
|
@ -317,7 +319,7 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
} else if (UpdateRequestHandler.COMMIT_WITHIN.equals(attrName)) {
|
||||
deleteCmd.commitWithin = Integer.parseInt(attrVal);
|
||||
} else {
|
||||
log.warn("XML element <delete> has invalid XML attr: " + attrName);
|
||||
log.warn("XML element <delete> has invalid XML attr: {}", attrName);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -397,7 +399,7 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
log.debug(message);
|
||||
}
|
||||
} else {
|
||||
log.warn("XML element <doc> has invalid XML attr:" + attrName);
|
||||
log.warn("XML element <doc> has invalid XML attr: {}", attrName);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -510,7 +512,7 @@ public class XMLLoader extends ContentStreamLoader {
|
|||
} else if ("update".equals(attrName)) {
|
||||
update = attrVal;
|
||||
} else {
|
||||
log.warn("XML element <field> has invalid XML attr: " + attrName);
|
||||
log.warn("XML element <field> has invalid XML attr: {}", attrName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -179,10 +179,11 @@ public abstract class Tagger {
|
|||
|
||||
if(!loggedSkippedAltTokenWarning && skippedTokens){
|
||||
loggedSkippedAltTokenWarning = true; //only log once
|
||||
log.warn("The Tagger skipped some alternate tokens (tokens with posInc == 0) "
|
||||
+ "while processing text. This may cause problems with some Analyzer "
|
||||
+ "configurations (e.g. query time synonym expansion). For details see "
|
||||
+ "https://github.com/OpenSextant/SolrTextTagger/pull/11#issuecomment-24936225");
|
||||
log.warn("{}{}{}{}"
|
||||
, "The Tagger skipped some alternate tokens (tokens with posInc == 0) "
|
||||
, "while processing text. This may cause problems with some Analyzer "
|
||||
, "configurations (e.g. query time synonym expansion). For details see "
|
||||
, "https://github.com/OpenSextant/SolrTextTagger/pull/11#issuecomment-24936225");
|
||||
}
|
||||
|
||||
tokenStream.end();
|
||||
|
|
|
@ -560,10 +560,8 @@ public class DefaultSolrHighlighter extends SolrHighlighter implements PluginInf
|
|||
if (!methodFvh) return false;
|
||||
boolean termPosOff = schemaField.storeTermPositions() && schemaField.storeTermOffsets();
|
||||
if (!termPosOff) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Solr will use the standard Highlighter instead of FastVectorHighlighter because the {} field " +
|
||||
"does not store TermVectors with TermPositions and TermOffsets.", schemaField.getName());
|
||||
}
|
||||
log.warn("Solr will use the standard Highlighter instead of FastVectorHighlighter because the {} field {}"
|
||||
, "does not store TermVectors with TermPositions and TermOffsets.", schemaField.getName());
|
||||
}
|
||||
return termPosOff;
|
||||
}
|
||||
|
|
|
@ -748,9 +748,7 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
} else if (v instanceof Gauge) {
|
||||
listener.onGaugeAdded(k, (Gauge)v);
|
||||
} else {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Unknown metric type {} for metric '{}', ignoring", v.getClass().getName(), k);
|
||||
}
|
||||
log.warn("Unknown metric type {} for metric '{}', ignoring", v.getClass().getName(), k);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -405,7 +405,7 @@ public class SolrReporter extends ScheduledReporter {
|
|||
solr.request(req);
|
||||
} catch (Exception e) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Error sending metric report: {}", e.toString());
|
||||
log.debug("Error sending metric report: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -129,9 +129,7 @@ public class SolrShardReporter extends SolrCoreReporter {
|
|||
}
|
||||
if (core.getCoreDescriptor().getCloudDescriptor() == null) {
|
||||
// not a cloud core
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Not initializing shard reporter for non-cloud core {}", core.getName());
|
||||
}
|
||||
log.warn("Not initializing shard reporter for non-cloud core {}", core.getName());
|
||||
return;
|
||||
}
|
||||
if (period < 1) { // don't start it
|
||||
|
@ -178,9 +176,7 @@ public class SolrShardReporter extends SolrCoreReporter {
|
|||
DocCollection collection = state.getCollection(core.getCoreDescriptor().getCollectionName());
|
||||
Replica replica = collection.getLeader(core.getCoreDescriptor().getCloudDescriptor().getShardId());
|
||||
if (replica == null) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("No leader for {}/{}", collection.getName(), core.getCoreDescriptor().getCloudDescriptor().getShardId());
|
||||
}
|
||||
log.warn("No leader for {}/{}", collection.getName(), core.getCoreDescriptor().getCloudDescriptor().getShardId());
|
||||
return null;
|
||||
}
|
||||
String baseUrl = replica.getStr("base_url");
|
||||
|
|
|
@ -116,9 +116,7 @@ public class PackageAPI {
|
|||
packageLoader.refreshPackageConf();
|
||||
}
|
||||
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
}
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
|
|
|
@ -59,7 +59,7 @@ public class SolrRequestInfo {
|
|||
// TODO: temporary sanity check... this can be changed to just an assert in the future
|
||||
SolrRequestInfo prev = threadLocal.get();
|
||||
if (prev != null) {
|
||||
log.error("Previous SolrRequestInfo was not closed! req={}", prev.req.getOriginalParams().toString());
|
||||
log.error("Previous SolrRequestInfo was not closed! req={}", prev.req.getOriginalParams());
|
||||
log.error("prev == info : {}", prev.req == info.req, new RuntimeException());
|
||||
}
|
||||
assert prev == null;
|
||||
|
|
|
@ -133,9 +133,7 @@ public abstract class ManagedResourceStorage {
|
|||
// that doesn't have write-access to the config dir
|
||||
// while this failover approach is not ideal, it's better
|
||||
// than causing the core to fail esp. if managed resources aren't being used
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Cannot write to config directory {} ; switching to use InMemory storage instead.", configDir.getAbsolutePath());
|
||||
}
|
||||
log.warn("Cannot write to config directory {} ; switching to use InMemory storage instead.", configDir.getAbsolutePath());
|
||||
storageIO = new ManagedResourceStorage.InMemoryStorageIO();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -122,9 +122,8 @@ public abstract class AbstractEnumField extends PrimitiveFieldType {
|
|||
throw new SolrException(SolrException.ErrorCode.NOT_FOUND, exceptionMessage);
|
||||
}
|
||||
if (nodesLength > 1) {
|
||||
if (log.isWarnEnabled())
|
||||
log.warn("{}: More than one enum configuration found for enum '{}' in {}. The last one was taken.",
|
||||
ftName, enumName, enumsConfigFile);
|
||||
log.warn("{}: More than one enum configuration found for enum '{}' in {}. The last one was taken."
|
||||
, ftName, enumName, enumsConfigFile);
|
||||
}
|
||||
final Node enumNode = nodes.item(nodesLength - 1);
|
||||
final NodeList valueNodes = (NodeList) xpath.evaluate("value", enumNode, XPathConstants.NODESET);
|
||||
|
|
|
@ -153,7 +153,7 @@ public class EnumField extends AbstractEnumField {
|
|||
|
||||
if (!indexed && !stored && !docValues) {
|
||||
if (log.isTraceEnabled())
|
||||
log.trace("Ignoring unindexed/unstored field: " + field);
|
||||
log.trace("Ignoring unindexed/unstored field: {}", field);
|
||||
return null;
|
||||
}
|
||||
final Integer intValue = enumMapping.stringValueToIntValue(value.toString());
|
||||
|
|
|
@ -282,7 +282,7 @@ public abstract class FieldType extends FieldProperties {
|
|||
public IndexableField createField(SchemaField field, Object value) {
|
||||
if (!field.indexed() && !field.stored()) {
|
||||
if (log.isTraceEnabled())
|
||||
log.trace("Ignoring unindexed/unstored field: " + field);
|
||||
log.trace("Ignoring unindexed/unstored field: {}", field);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -110,10 +110,8 @@ public final class FieldTypePluginLoader
|
|||
if (ft instanceof HasImplicitIndexAnalyzer) {
|
||||
ft.setIsExplicitAnalyzer(false);
|
||||
if (null != queryAnalyzer && null != analyzer) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Ignoring index-time analyzer for field: " + name);
|
||||
}
|
||||
} else if (null == queryAnalyzer) { // Accept non-query-time analyzer as a query-time analyzer
|
||||
log.warn("Ignoring index-time analyzer for field: {}", name);
|
||||
} else if (null == queryAnalyzer) { // Accept non-query-time analyzer as a query-time analyzer
|
||||
queryAnalyzer = analyzer;
|
||||
}
|
||||
if (null != queryAnalyzer) {
|
||||
|
@ -406,12 +404,11 @@ public final class FieldTypePluginLoader
|
|||
SolrConfig.parseLuceneVersionString(configuredVersion) : schema.getDefaultLuceneMatchVersion();
|
||||
|
||||
if (!version.onOrAfter(Version.LUCENE_8_0_0)) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("{} is using deprecated {}" +
|
||||
" emulation. You should at some point declare and reindex to at least 8.0, because " +
|
||||
"7.x emulation is deprecated and will be removed in 9.0"
|
||||
, pluginClassName, version);
|
||||
}
|
||||
log.warn("{} is using deprecated {}{}{}"
|
||||
, pluginClassName
|
||||
, version
|
||||
, " emulation. You should at some point declare and reindex to at least 8.0, because "
|
||||
, "7.x emulation is deprecated and will be removed in 9.0");
|
||||
}
|
||||
return version;
|
||||
}
|
||||
|
|
|
@ -759,10 +759,8 @@ public class IndexSchema {
|
|||
|
||||
for (Map.Entry<SchemaField, Integer> entry : copyFieldTargetCounts.entrySet()) {
|
||||
if (entry.getValue() > 1 && !entry.getKey().multiValued()) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Field {} is not multivalued and destination for multiople {} ({})"
|
||||
, entry.getKey().name, COPY_FIELDS, entry.getValue());
|
||||
}
|
||||
log.warn("Field {} is not multivalued and destination for multiople {} ({})"
|
||||
, entry.getKey().name, COPY_FIELDS, entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -178,17 +178,13 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
|
|||
FlagsAttribute flags = parent.addAttribute(FlagsAttribute.class);
|
||||
flags.setFlags(f);
|
||||
} catch (NumberFormatException nfe) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Invalid {} attribute, skipped: '{}'", FLAGS_KEY, e.getValue());
|
||||
}
|
||||
log.warn("Invalid {} attribute, skipped: '{}'", FLAGS_KEY, e.getValue());
|
||||
}
|
||||
} else if (key.equals(TYPE_KEY)) {
|
||||
TypeAttribute tattr = parent.addAttribute(TypeAttribute.class);
|
||||
tattr.setType(String.valueOf(e.getValue()));
|
||||
} else {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Unknown attribute, skipped: {} = {}", e.getKey(), e.getValue());
|
||||
}
|
||||
log.warn("Unknown attribute, skipped: {} = {}", e.getKey(), e.getValue());
|
||||
}
|
||||
}
|
||||
// handle offset attr
|
||||
|
|
|
@ -274,10 +274,8 @@ public final class ManagedIndexSchema extends IndexSchema {
|
|||
maxWaitSecs+" seconds! Failed cores: "+failedList);
|
||||
|
||||
} catch (InterruptedException ie) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Core {} was interrupted waiting for schema version {} to propagate to {} replicas for collection {}"
|
||||
, localCoreNodeName, schemaZkVersion, concurrentTasks.size(), collection);
|
||||
}
|
||||
log.warn("Core {} was interrupted waiting for schema version {} to propagate to {} replicas for collection {}"
|
||||
, localCoreNodeName, schemaZkVersion, concurrentTasks.size(), collection);
|
||||
Thread.currentThread().interrupt();
|
||||
} finally {
|
||||
if (!parallelExecutor.isShutdown())
|
||||
|
|
|
@ -277,12 +277,11 @@ public class ManagedIndexSchemaFactory extends IndexSchemaFactory implements Sol
|
|||
final File nonManagedSchemaFile = locateConfigFile(resourceName);
|
||||
if (null == nonManagedSchemaFile) {
|
||||
// Don't throw an exception for failure to rename the non-managed schema
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("On upgrading to managed schema, did not rename non-managed schema {} "
|
||||
+ "because it's neither an absolute file "
|
||||
+ "nor under SolrConfig.getConfigDir() or the current directory. "
|
||||
+ "PLEASE REMOVE THIS FILE.", resourceName);
|
||||
}
|
||||
log.warn("On upgrading to managed schema, did not rename non-managed schema {} {}{}{}"
|
||||
, resourceName
|
||||
, "because it's neither an absolute file "
|
||||
, "nor under SolrConfig.getConfigDir() or the current directory. "
|
||||
, "PLEASE REMOVE THIS FILE.");
|
||||
} else {
|
||||
File upgradedSchemaFile = new File(nonManagedSchemaFile + UPGRADED_SCHEMA_EXTENSION);
|
||||
if (nonManagedSchemaFile.renameTo(upgradedSchemaFile)) {
|
||||
|
|
|
@ -249,9 +249,7 @@ public class OpenExchangeRatesOrgProvider implements ExchangeRateProvider {
|
|||
}
|
||||
break;
|
||||
} else {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Expected key, got {}", JSONParser.getEventString(ev));
|
||||
}
|
||||
log.warn("Expected key, got {}", JSONParser.getEventString(ev));
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -234,9 +234,7 @@ public abstract class PointField extends NumericFieldType {
|
|||
boolean docValues = field.hasDocValues();
|
||||
|
||||
if (!indexed && !stored && !docValues) {
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("Ignoring unindexed/unstored field: " + field);
|
||||
}
|
||||
log.trace("Ignoring unindexed/unstored field: {}", field);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -124,9 +124,7 @@ public class PreAnalyzedField extends TextField implements HasImplicitIndexAnaly
|
|||
try {
|
||||
f = fromString(field, String.valueOf(value));
|
||||
} catch (Exception e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Error parsing pre-analyzed field '{}'", field.getName(), e);
|
||||
}
|
||||
log.warn("Error parsing pre-analyzed field '{}'", field.getName(), e);
|
||||
return null;
|
||||
}
|
||||
return f;
|
||||
|
@ -170,8 +168,7 @@ public class PreAnalyzedField extends TextField implements HasImplicitIndexAnaly
|
|||
*/
|
||||
public static org.apache.lucene.document.FieldType createFieldType(SchemaField field) {
|
||||
if (!field.indexed() && !field.stored()) {
|
||||
if (log.isTraceEnabled())
|
||||
log.trace("Ignoring unindexed/unstored field: " + field);
|
||||
log.trace("Ignoring unindexed/unstored field: {}", field);
|
||||
return null;
|
||||
}
|
||||
org.apache.lucene.document.FieldType newType = new org.apache.lucene.document.FieldType();
|
||||
|
|
|
@ -517,7 +517,7 @@ public class TrieField extends NumericFieldType {
|
|||
|
||||
if (!indexed && !stored && !docValues) {
|
||||
if (log.isTraceEnabled())
|
||||
log.trace("Ignoring unindexed/unstored field: " + field);
|
||||
log.trace("Ignoring unindexed/unstored field: {}", field);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -384,9 +384,7 @@ public class Grouping {
|
|||
cachedCollector.replay(secondPhaseCollectors);
|
||||
} else {
|
||||
signalCacheWarning = true;
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn(String.format(Locale.ROOT, "The grouping cache is active, but not used because it exceeded the max cache limit of %d percent", maxDocsPercentageToCache));
|
||||
}
|
||||
log.warn(String.format(Locale.ROOT, "The grouping cache is active, but not used because it exceeded the max cache limit of %d percent", maxDocsPercentageToCache));
|
||||
log.warn("Please increase cache size or disable group caching.");
|
||||
searchWithTimeLimiter(luceneFilter, secondPhaseCollectors);
|
||||
}
|
||||
|
@ -442,9 +440,7 @@ public class Grouping {
|
|||
try {
|
||||
searcher.search(QueryUtils.combineQueryAndFilter(query, luceneFilter), collector);
|
||||
} catch (TimeLimitingCollector.TimeExceededException | ExitableDirectoryReader.ExitingReaderException x) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Query: {}; {}", query, x.getMessage());
|
||||
}
|
||||
log.warn("Query: {}; {}", query, x.getMessage());
|
||||
qr.setPartialResults(true);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -617,10 +617,8 @@ public class SolrDocumentFetcher {
|
|||
}
|
||||
|
||||
if (schemaField.getType().getNumberType() == null) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Couldn't decode docValues for field: [{}], schemaField: [{}], numberType is unknown",
|
||||
schemaField.getName(), schemaField);
|
||||
}
|
||||
log.warn("Couldn't decode docValues for field: [{}], schemaField: [{}], numberType is unknown",
|
||||
schemaField.getName(), schemaField);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -200,9 +200,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI
|
|||
try {
|
||||
super.search(query, collector);
|
||||
} catch (TimeLimitingCollector.TimeExceededException | ExitableDirectoryReader.ExitingReaderException x) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Query: [{}]; {}", query, x.getMessage());
|
||||
}
|
||||
log.warn("Query: [{}]; {}", query, x.getMessage());
|
||||
qr.setPartialResults(true);
|
||||
} catch (EarlyTerminatingCollectorException etce) {
|
||||
if (collector instanceof DelegatingCollector) {
|
||||
|
|
|
@ -217,9 +217,7 @@ public class UnInvertedField extends DocTermOrds {
|
|||
maxTermCounts = newMaxTermCounts;
|
||||
}
|
||||
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("UnInverted multi-valued field {}", toString());
|
||||
}
|
||||
log.info("UnInverted multi-valued field {}", this);
|
||||
//System.out.println("CREATED: " + toString() + " ti.index=" + ti.index);
|
||||
}
|
||||
|
||||
|
|
|
@ -305,10 +305,8 @@ public class FileFloatSource extends ValueSource {
|
|||
fval=Float.parseFloat(val);
|
||||
} catch (Exception e) {
|
||||
if (++otherErrors<=10) {
|
||||
if (log.isErrorEnabled()) {
|
||||
log.error("Error loading external value source + fileName + " + e
|
||||
+ (otherErrors < 10 ? "" : "\tSkipping future errors for this file."));
|
||||
}
|
||||
log.error("Error loading external value source + fileName + {}{}", e
|
||||
, (otherErrors < 10 ? "" : "\tSkipping future errors for this file."));
|
||||
}
|
||||
continue; // go to next line in file.. leave values as default.
|
||||
}
|
||||
|
@ -336,12 +334,10 @@ public class FileFloatSource extends ValueSource {
|
|||
// exceptions that happened in the loop
|
||||
try{r.close();}catch(Exception e){}
|
||||
}
|
||||
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Loaded external value source " + fname
|
||||
+ (notFoundCount == 0 ? "" : " :" + notFoundCount + " missing keys " + notFound));
|
||||
String tmp = (notFoundCount == 0 ? "" : " :" + notFoundCount + " missing keys " + notFound);
|
||||
log.info("Loaded external value source {}{}", fname, tmp);
|
||||
}
|
||||
|
||||
return vals;
|
||||
}
|
||||
|
||||
|
|
|
@ -237,9 +237,7 @@ public class CommandHandler {
|
|||
searcher.search(query, collector);
|
||||
} catch (TimeLimitingCollector.TimeExceededException | ExitableDirectoryReader.ExitingReaderException x) {
|
||||
partialResults = true;
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Query: {}; {}", query, x.getMessage());
|
||||
}
|
||||
log.warn("Query: {}; {}", query, x.getMessage());
|
||||
}
|
||||
|
||||
if (includeHitCount) {
|
||||
|
|
|
@ -193,9 +193,7 @@ public class ChildFieldValueSourceParser extends ValueSourceParser {
|
|||
(NAME+" sort param field \""+ sortFieldName+"\" can't be found in schema");
|
||||
}
|
||||
} catch (SyntaxError e) {
|
||||
if (log.isErrorEnabled()) {
|
||||
log.error("can't parse {}", fp.getString(), e);
|
||||
}
|
||||
log.error("can't parse {}", fp.getString(), e);
|
||||
throw e;
|
||||
}
|
||||
return new BlockJoinSortFieldValueSource(childFilter, parentFilter, sf);
|
||||
|
|
|
@ -160,9 +160,7 @@ public class LRUStatsCache extends ExactStatsCache {
|
|||
return super.doRetrieveStatsRequest(rb);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Exception checking missing stats for query {}, forcing retrieving stats", rb.getQuery(), e);
|
||||
}
|
||||
log.warn("Exception checking missing stats for query {}, forcing retrieving stats", rb.getQuery(), e);
|
||||
// retrieve anyway
|
||||
return super.doRetrieveStatsRequest(rb);
|
||||
}
|
||||
|
|
|
@ -155,9 +155,7 @@ public class HadoopAuthPlugin extends AuthenticationPlugin {
|
|||
authFilter.init(conf);
|
||||
|
||||
} catch (ServletException e) {
|
||||
if (log.isErrorEnabled()) {
|
||||
log.error("Error initializing {}", getClass().getSimpleName(), e);
|
||||
}
|
||||
log.error("Error initializing {}", getClass().getSimpleName(), e);
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Error initializing " + getClass().getName() + ": "+e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -335,9 +335,7 @@ public class JWTAuthPlugin extends AuthenticationPlugin implements SpecProvider,
|
|||
|
||||
case AUTZ_HEADER_PROBLEM:
|
||||
case JWT_PARSE_ERROR:
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Authentication failed. {}, {}", authResponse.getAuthCode(), authResponse.getAuthCode().getMsg());
|
||||
}
|
||||
log.warn("Authentication failed. {}, {}", authResponse.getAuthCode(), authResponse.getAuthCode().getMsg());
|
||||
numErrors.mark();
|
||||
authenticationFailure(response, authResponse.getAuthCode().getMsg(), HttpServletResponse.SC_BAD_REQUEST, BearerWwwAuthErrorCode.invalid_request);
|
||||
return false;
|
||||
|
@ -346,9 +344,7 @@ public class JWTAuthPlugin extends AuthenticationPlugin implements SpecProvider,
|
|||
case JWT_EXPIRED:
|
||||
case JWT_VALIDATION_EXCEPTION:
|
||||
case PRINCIPAL_MISSING:
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Authentication failed. {}, {}", authResponse.getAuthCode(), exceptionMessage);
|
||||
}
|
||||
log.warn("Authentication failed. {}, {}", authResponse.getAuthCode(), exceptionMessage);
|
||||
numWrongCredentials.inc();
|
||||
authenticationFailure(response, authResponse.getAuthCode().getMsg(), HttpServletResponse.SC_UNAUTHORIZED, BearerWwwAuthErrorCode.invalid_token);
|
||||
return false;
|
||||
|
|
|
@ -81,9 +81,7 @@ public class Sha256AuthenticationProvider implements ConfigEditablePlugin, Basi
|
|||
for (Map.Entry<String, String> e : users.entrySet()) {
|
||||
String v = e.getValue();
|
||||
if (v == null) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("user has no password {}", e.getKey());
|
||||
}
|
||||
log.warn("user has no password {}", e.getKey());
|
||||
continue;
|
||||
}
|
||||
credentials.put(e.getKey(), v);
|
||||
|
|
|
@ -70,9 +70,7 @@ public class SolrLogAuditLoggerPlugin extends AuditLoggerPlugin {
|
|||
break;
|
||||
|
||||
case WARN:
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn(formatter.formatEvent(event));
|
||||
}
|
||||
log.warn(formatter.formatEvent(event));
|
||||
break;
|
||||
|
||||
case ERROR:
|
||||
|
|
|
@ -499,9 +499,7 @@ public class HttpSolrCall {
|
|||
return RETURN;
|
||||
}
|
||||
if (!(statusCode == HttpStatus.SC_ACCEPTED) && !(statusCode == HttpStatus.SC_OK)) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("ERROR {} during authentication: {}", statusCode, authResponse.getMessage());
|
||||
}
|
||||
log.warn("ERROR {} during authentication: {}", statusCode, authResponse.getMessage());
|
||||
sendError(statusCode,
|
||||
"ERROR during authorization, Response code: " + statusCode);
|
||||
if (shouldAudit(EventType.ERROR)) {
|
||||
|
|
|
@ -245,7 +245,7 @@ public class SolrDispatchFilter extends BaseSolrFilter {
|
|||
log.info("\\__ \\/ _ \\ | '_| Install dir: {}", System.getProperty(SOLR_INSTALL_DIR_ATTRIBUTE));
|
||||
}
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("|___/\\___/_|_| Start time: {}", Instant.now().toString());
|
||||
log.info("|___/\\___/_|_| Start time: {}", Instant.now());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ public class SpellCheckCollator {
|
|||
collations.add(collation);
|
||||
}
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Collation: " + collationQueryStr + (verifyCandidateWithQuery ? (" will return " + hits + " hits.") : ""));
|
||||
log.debug("Collation: {} {}", collationQueryStr, (verifyCandidateWithQuery ? (" will return " + hits + " hits.") : "")); // LOGOK
|
||||
}
|
||||
}
|
||||
return collations;
|
||||
|
|
|
@ -148,7 +148,7 @@ public class SolrSuggester implements Accountable {
|
|||
storeDir.mkdirs();
|
||||
} else if (getStoreFile().exists()) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("attempt reload of the stored lookup from file " + getStoreFile());
|
||||
log.debug("attempt reload of the stored lookup from file {}", getStoreFile());
|
||||
}
|
||||
try {
|
||||
lookup.load(new FileInputStream(getStoreFile()));
|
||||
|
|
|
@ -164,9 +164,7 @@ public class Suggester extends SolrSpellChecker {
|
|||
if(!lookup.store(new FileOutputStream(target))) {
|
||||
if (sourceLocation == null) {
|
||||
assert reader != null && field != null;
|
||||
if (log.isErrorEnabled()) {
|
||||
log.error("Store Lookup build from index on field: {} failed reader has: {} docs", field, reader.maxDoc());
|
||||
}
|
||||
log.error("Store Lookup build from index on field: {} failed reader has: {} docs", field, reader.maxDoc());
|
||||
} else {
|
||||
log.error("Store Lookup build from sourceloaction: {} failed", sourceLocation);
|
||||
}
|
||||
|
|
|
@ -118,10 +118,8 @@ public class HdfsLocalityReporter implements SolrInfoBean {
|
|||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Could not retrieve locality information for {} due to exception: {}",
|
||||
hdfsDirectory.getHdfsDirPath(), e);
|
||||
}
|
||||
log.warn("Could not retrieve locality information for {} due to exception: {}",
|
||||
hdfsDirectory.getHdfsDirPath(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -152,7 +150,9 @@ public class HdfsLocalityReporter implements SolrInfoBean {
|
|||
*/
|
||||
public void registerDirectory(HdfsDirectory dir) {
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Registering direcotry {} for locality metrics.", dir.getHdfsDirPath().toString());
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Registering direcotry {} for locality metrics.", dir.getHdfsDirPath());
|
||||
}
|
||||
}
|
||||
cache.put(dir, new ConcurrentHashMap<FileStatus, BlockLocation[]>());
|
||||
}
|
||||
|
|
|
@ -353,28 +353,20 @@ public class PeerSync implements SolrMetricProducer {
|
|||
boolean connectTimeoutExceptionInChain = connectTimeoutExceptionInChain(srsp.getException());
|
||||
if (connectTimeoutExceptionInChain || solrException instanceof ConnectTimeoutException || solrException instanceof SocketTimeoutException
|
||||
|| solrException instanceof NoHttpResponseException || solrException instanceof SocketException) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("{} couldn't connect to {}, counting as success ", msg(), srsp.getShardAddress(), srsp.getException());
|
||||
}
|
||||
|
||||
|
||||
log.warn("{} couldn't connect to {}, counting as success ", msg(), srsp.getShardAddress(), srsp.getException());
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (cantReachIsSuccess && sreq.purpose == 1 && srsp.getException() instanceof SolrException && ((SolrException) srsp.getException()).code() == 503) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("{} got a 503 from {}, counting as success "
|
||||
, msg(), srsp.getShardAddress(), srsp.getException());
|
||||
}
|
||||
log.warn("{} got a 503 from {}, counting as success ", msg(), srsp.getShardAddress(), srsp.getException());
|
||||
return true;
|
||||
}
|
||||
|
||||
if (cantReachIsSuccess && sreq.purpose == 1 && srsp.getException() instanceof SolrException && ((SolrException) srsp.getException()).code() == 404) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("{} got a 404 from {}, counting as success. {} Perhaps /get is not registered?"
|
||||
, msg(), srsp.getShardAddress(), srsp.getException());
|
||||
}
|
||||
log.warn("{} got a 404 from {}, counting as success. {} Perhaps /get is not registered?"
|
||||
, msg(), srsp.getShardAddress(), srsp.getException());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -384,10 +376,8 @@ public class PeerSync implements SolrMetricProducer {
|
|||
// TODO: at least log???
|
||||
// srsp.getException().printStackTrace(System.out);
|
||||
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("{} exception talking to {}, failed", msg(), srsp.getShardAddress(), srsp.getException());
|
||||
}
|
||||
|
||||
log.warn("{} exception talking to {}, failed", msg(), srsp.getShardAddress(), srsp.getException());
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -165,9 +165,7 @@ public class PeerSyncWithLeader implements SolrMetricProducer {
|
|||
long smallestNewUpdate = Math.abs(ourUpdates.get(ourUpdates.size() - 1));
|
||||
|
||||
if (Math.abs(startingVersions.get(0)) < smallestNewUpdate) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("{} too many updates received since start - startingUpdates no longer overlaps with our currentUpdates", msg());
|
||||
}
|
||||
log.warn("{} too many updates received since start - startingUpdates no longer overlaps with our currentUpdates", msg());
|
||||
syncErrors.inc();
|
||||
return PeerSync.PeerSyncResult.failure();
|
||||
}
|
||||
|
@ -195,9 +193,7 @@ public class PeerSyncWithLeader implements SolrMetricProducer {
|
|||
try {
|
||||
clientToLeader.close();
|
||||
} catch (IOException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("{} unable to close client to leader", msg(), e);
|
||||
}
|
||||
log.warn("{} unable to close client to leader", msg(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -322,9 +322,8 @@ public class SolrCmdDistributor implements Closeable {
|
|||
}
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("sending update to "
|
||||
+ req.node.getUrl() + " retry:"
|
||||
+ req.retries + " " + req.cmd + " params:" + req.uReq.getParams());
|
||||
log.debug("sending update to {} retry: {} {} params {}"
|
||||
, req.node.getUrl(), req.retries, req.cmd, req.uReq.getParams());
|
||||
}
|
||||
|
||||
if (isCommit) {
|
||||
|
|
|
@ -313,7 +313,7 @@ public class SolrIndexSplitter {
|
|||
if (log.isInfoEnabled()) {
|
||||
log.info("SolrIndexSplitter: partition #{} partitionCount={} segment #{} segmentCount={}"
|
||||
, partitionNumber, numPieces + (ranges != null ? " range=" + ranges.get(partitionNumber) : "")
|
||||
, segmentNumber, leaves.size());
|
||||
, segmentNumber, leaves.size()); //LOGOK
|
||||
}
|
||||
CodecReader subReader = SlowCodecReaderWrapper.wrap(leaves.get(segmentNumber).reader());
|
||||
iw.addIndexes(new LiveDocsReader(subReader, segmentDocSets.get(segmentNumber)[partitionNumber]));
|
||||
|
|
|
@ -126,7 +126,7 @@ class ErrorReportingConcurrentUpdateSolrClient extends ConcurrentUpdateHttp2Solr
|
|||
|
||||
@Override
|
||||
public void handleError(Throwable ex) {
|
||||
log.error("Error when calling {} to {}", req.toString(), req.node.getUrl(), ex);
|
||||
log.error("Error when calling {} to {}", req, req.node.getUrl(), ex);
|
||||
Error error = new Error();
|
||||
error.e = (Exception) ex;
|
||||
if (ex instanceof SolrException) {
|
||||
|
|
|
@ -194,9 +194,7 @@ public class TransactionLog implements Closeable {
|
|||
}
|
||||
} else {
|
||||
if (start > 0) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("New transaction log already exists:{} size={}", tlogFile, raf.length());
|
||||
}
|
||||
log.warn("New transaction log already exists:{} size={}", tlogFile, raf.length());
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -172,10 +172,8 @@ public class AtomicUpdateProcessorFactory extends UpdateRequestProcessorFactory
|
|||
"Atomic update failed after multiple attempts due to " + e.getMessage());
|
||||
}
|
||||
if (e.code() == ErrorCode.CONFLICT.code) { // version conflict
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Atomic update failed due to {} Retrying with new version .... ({})"
|
||||
, e.getMessage(), attempts);
|
||||
}
|
||||
log.warn("Atomic update failed due to {} Retrying with new version .... ({})"
|
||||
, e.getMessage(), attempts);
|
||||
|
||||
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
|
||||
// if lastVersion is null then we put -1 to assert that document must not exist
|
||||
|
|
|
@ -435,8 +435,8 @@ public class CloneFieldUpdateProcessorFactory
|
|||
resolvedDest = matcher.replaceAll(dest);
|
||||
} else {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("CloneFieldUpdateProcessor.srcSelector.shouldMutate('{}') returned true, " +
|
||||
"but replacement pattern did not match, field skipped.", fname);
|
||||
log.debug("CloneFieldUpdateProcessor.srcSelector.shouldMutate('{}') returned true, but replacement pattern did not match, field skipped."
|
||||
, fname);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -432,8 +432,8 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
|
|||
|
||||
if (fetchedFromLeader instanceof DeleteUpdateCommand) {
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("In-place update of {} failed to find valid lastVersion to apply to, and the document"
|
||||
+ " was deleted at the leader subsequently.", idBytes.utf8ToString());
|
||||
log.info("In-place update of {} failed to find valid lastVersion to apply to, and the document was deleted at the leader subsequently."
|
||||
, idBytes.utf8ToString());
|
||||
}
|
||||
versionDelete((DeleteUpdateCommand) fetchedFromLeader);
|
||||
return true;
|
||||
|
@ -558,8 +558,8 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
|
|||
// trying to index this partial update. Since a full update more recent than this partial update has succeeded,
|
||||
// we can drop the current update.
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Update was applied on version: {}, but last version I have is: {}"
|
||||
+ ". Current update should be dropped. id={}", cmd.prevVersion, lastFoundVersion, cmd.getPrintableId());
|
||||
log.debug("Update was applied on version: {}, but last version I have is: {} . Current update should be dropped. id={}"
|
||||
, cmd.prevVersion, lastFoundVersion, cmd.getPrintableId());
|
||||
}
|
||||
return -1;
|
||||
} else if (Math.abs(lastFoundVersion) == cmd.prevVersion) {
|
||||
|
@ -579,10 +579,10 @@ public class DistributedUpdateProcessor extends UpdateRequestProcessor {
|
|||
UpdateCommand missingUpdate = fetchFullUpdateFromLeader(cmd, versionOnUpdate);
|
||||
if (missingUpdate instanceof DeleteUpdateCommand) {
|
||||
if (log.isInfoEnabled()) {
|
||||
log.info("Tried to fetch document {} from the leader, but the leader says document has been deleted. "
|
||||
+ "Deleting the document here and skipping this update: Last found version: {}, was looking for: {}", cmd.getPrintableId(), lastFoundVersion, cmd.prevVersion);
|
||||
versionDelete((DeleteUpdateCommand) missingUpdate);
|
||||
log.info("Tried to fetch document {} from the leader, but the leader says document has been deleted. Deleting the document here and skipping this update: Last found version: {}, was looking for: {}"
|
||||
, cmd.getPrintableId(), lastFoundVersion, cmd.prevVersion);
|
||||
}
|
||||
versionDelete((DeleteUpdateCommand) missingUpdate);
|
||||
return -1;
|
||||
} else {
|
||||
assert missingUpdate instanceof AddUpdateCommand;
|
||||
|
|
|
@ -1088,9 +1088,7 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
|
|||
|
||||
// for now we don't error - we assume if it was added locally, we
|
||||
// succeeded
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Error sending update to {}", error.req.node.getBaseUrl(), error.e);
|
||||
}
|
||||
log.warn("Error sending update to {}", error.req.node.getBaseUrl(), error.e);
|
||||
|
||||
// Since it is not a forward request, for each fail, try to tell them to
|
||||
// recover - the doc was already added locally, so it should have been
|
||||
|
@ -1138,11 +1136,8 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
|
|||
getLeaderExc = exc;
|
||||
}
|
||||
if (leaderCoreNodeName == null) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Failed to determine if {} is still the leader for collection={} shardId={} " +
|
||||
"before putting {} into leader-initiated recovery",
|
||||
cloudDesc.getCoreNodeName(), collection, shardId, replicaUrl, getLeaderExc);
|
||||
}
|
||||
log.warn("Failed to determine if {} is still the leader for collection={} shardId={} before putting {} into leader-initiated recovery",
|
||||
cloudDesc.getCoreNodeName(), collection, shardId, replicaUrl, getLeaderExc);
|
||||
}
|
||||
|
||||
List<ZkCoreNodeProps> myReplicas = zkController.getZkStateReader().getReplicaProps(collection,
|
||||
|
@ -1175,20 +1170,16 @@ public class DistributedZkUpdateProcessor extends DistributedUpdateProcessor {
|
|||
} else {
|
||||
// not the leader anymore maybe or the error'd node is not my replica?
|
||||
if (!foundErrorNodeInReplicaList) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Core {} belonging to {} {}, does not have error'd node {} as a replica. No request recovery command will be sent!"
|
||||
, cloudDesc.getCoreNodeName(), collection, cloudDesc.getShardId(), stdNode.getNodeProps().getCoreUrl());
|
||||
}
|
||||
log.warn("Core {} belonging to {} {}, does not have error'd node {} as a replica. No request recovery command will be sent!"
|
||||
, cloudDesc.getCoreNodeName(), collection, cloudDesc.getShardId(), stdNode.getNodeProps().getCoreUrl());
|
||||
if (!shardId.equals(cloudDesc.getShardId())) {
|
||||
// some replicas on other shard did not receive the updates (ex: during splitshard),
|
||||
// exception must be notified to clients
|
||||
errorsForClient.add(error);
|
||||
}
|
||||
} else {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Core {} is no longer the leader for {} {} or we tried to put ourself into LIR, no request recovery command will be sent!"
|
||||
, cloudDesc.getCoreNodeName(), collection, shardId);
|
||||
}
|
||||
log.warn("Core {} is no longer the leader for {} {} or we tried to put ourself into LIR, no request recovery command will be sent!"
|
||||
, cloudDesc.getCoreNodeName(), collection, shardId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -203,9 +203,7 @@ public class DocBasedVersionConstraintsProcessorFactory extends UpdateRequestPro
|
|||
userVersionField.getType().getValueSource(userVersionField, null);
|
||||
} catch (Exception e) {
|
||||
useFieldCache = false;
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Can't use fieldcache/valuesource: {}", e.getMessage());
|
||||
}
|
||||
log.warn("Can't use fieldcache/valuesource: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -230,12 +228,9 @@ public class DocBasedVersionConstraintsProcessorFactory extends UpdateRequestPro
|
|||
versionFields.forEach(field -> requiredFieldNames.remove(field));
|
||||
}
|
||||
if (!requiredFieldNames.isEmpty()) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("The schema '{}' has required fields that aren't added in the tombstone."
|
||||
+ " This can cause deletes to fail if those aren't being added in some other way. Required Fields={}",
|
||||
schema.getSchemaName(),
|
||||
requiredFieldNames);
|
||||
}
|
||||
log.warn("The schema '{}' has required fields that aren't added in the tombstone. This can cause deletes to fail if those aren't being added in some other way. Required Fields={}",
|
||||
schema.getSchemaName(),
|
||||
requiredFieldNames);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -178,7 +178,7 @@ public class LogUpdateProcessorFactory extends UpdateRequestProcessorFactory imp
|
|||
@Override
|
||||
public void processRollback( RollbackUpdateCommand cmd ) throws IOException {
|
||||
if (logDebug) {
|
||||
log.debug("PRE_UPDATE {}", cmd, req);
|
||||
log.debug("PRE_UPDATE {} {}", cmd, req);
|
||||
}
|
||||
if (next != null) next.processRollback(cmd);
|
||||
|
||||
|
@ -202,9 +202,7 @@ public class LogUpdateProcessorFactory extends UpdateRequestProcessorFactory imp
|
|||
if (log.isWarnEnabled() && slowUpdateThresholdMillis >= 0) {
|
||||
final long elapsed = (long) req.getRequestTimer().getTime();
|
||||
if (elapsed >= slowUpdateThresholdMillis) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("slow: {}", getLogStringAndClearRspToLog());
|
||||
}
|
||||
log.warn("slow: {}", getLogStringAndClearRspToLog());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,9 +164,7 @@ class PreAnalyzedUpdateProcessor extends FieldMutatingUpdateProcessor {
|
|||
if (pre != null) {
|
||||
res.addValue(pre);
|
||||
} else { // restore the original value
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("Could not parse field {} - using original value as is: {}", src.getName(), o);
|
||||
}
|
||||
log.warn("Could not parse field {} - using original value as is: {}", src.getName(), o);
|
||||
res.addValue(o);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@ public class RegexpBoostProcessor extends UpdateRequestProcessor {
|
|||
sharedObjectCache.put(BOOST_ENTRIES_CACHE_KEY, cachedBoostEntries);
|
||||
} else {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Using cached boost entry list with " + cachedBoostEntries.size() + " elements.");
|
||||
log.debug("Using cached boost entry list with {} elements", cachedBoostEntries.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ public class RegexpBoostProcessor extends UpdateRequestProcessor {
|
|||
for (BoostEntry boostEntry : boostEntries) {
|
||||
if (boostEntry.getPattern().matcher(value).matches()) {
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Pattern match " + boostEntry.getPattern().pattern() + " for " + value);
|
||||
log.debug("Pattern match {} for {}", boostEntry.getPattern().pattern(), value);
|
||||
}
|
||||
boost = (boostEntry.getBoost() * 1000) * (boost * 1000) / 1000000;
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ public class RegexpBoostProcessor extends UpdateRequestProcessor {
|
|||
document.setField(boostFieldname, boost);
|
||||
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Value " + boost + ", applied to field " + boostFieldname);
|
||||
log.debug("Value {}, applied to field {}", boost, boostFieldname);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -255,9 +255,7 @@ public class TolerantUpdateProcessor extends UpdateRequestProcessor {
|
|||
NamedList<String> remoteErrMetadata = remoteErr.getMetadata();
|
||||
|
||||
if (null == remoteErrMetadata) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("remote error has no metadata to aggregate: {} {}", remoteErr.getMessage(), remoteErr);
|
||||
}
|
||||
log.warn("remote error has no metadata to aggregate: {} {}", remoteErr.getMessage(), remoteErr);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -127,9 +127,7 @@ public class URLClassifyProcessor extends UpdateRequestProcessor {
|
|||
}
|
||||
log.debug("{}", document);
|
||||
} catch (MalformedURLException | URISyntaxException e) {
|
||||
if (log.isWarnEnabled()) {
|
||||
log.warn("cannot get the normalized url for '{}' due to {}", url, e.getMessage());
|
||||
}
|
||||
log.warn("cannot get the normalized url for '{}' due to {}", url, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue