mirror of https://github.com/apache/lucene.git
SOLR-12690: Regularize LoggerFactory declarations
This commit is contained in:
parent
5eab1c3c68
commit
8cde1277ec
|
@ -60,7 +60,7 @@ import org.apache.lucene.util.RamUsageEstimator;
|
|||
*/
|
||||
public class DirectoryTaxonomyReader extends TaxonomyReader implements Accountable {
|
||||
|
||||
private static final Logger logger = Logger.getLogger(DirectoryTaxonomyReader.class.getName());
|
||||
private static final Logger log = Logger.getLogger(DirectoryTaxonomyReader.class.getName());
|
||||
|
||||
private static final int DEFAULT_CACHE_VALUE = 4000;
|
||||
|
||||
|
@ -421,8 +421,8 @@ public class DirectoryTaxonomyReader extends TaxonomyReader implements Accountab
|
|||
}
|
||||
sb.append(i +": "+category.toString()+"\n");
|
||||
} catch (IOException e) {
|
||||
if (logger.isLoggable(Level.FINEST)) {
|
||||
logger.log(Level.FINEST, e.getMessage(), e);
|
||||
if (log.isLoggable(Level.FINEST)) {
|
||||
log.log(Level.FINEST, e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ def lineSplitter = ~$/[\r\n]+/$;
|
|||
def singleLineSplitter = ~$/\n\r?/$;
|
||||
def licenseMatcher = Defaults.createDefaultMatcher();
|
||||
def validLoggerPattern = ~$/(?s)\b(private\s|static\s|final\s){3}+\s*Logger\s+\p{javaJavaIdentifierStart}+\s+=\s+\QLoggerFactory.getLogger(MethodHandles.lookup().lookupClass());\E/$;
|
||||
def validLoggerNamePattern = ~$/(?s)\b(private\s|static\s|final\s){3}+\s*Logger\s+(log|LOG)+\s+=\s+\QLoggerFactory.getLogger(MethodHandles.lookup().lookupClass());\E/$;
|
||||
def validLoggerNamePattern = ~$/(?s)\b(private\s|static\s|final\s){3}+\s*Logger\s+log+\s+=\s+\QLoggerFactory.getLogger(MethodHandles.lookup().lookupClass());\E/$;
|
||||
def packagePattern = ~$/(?m)^\s*package\s+org\.apache.*;/$;
|
||||
def xmlTagPattern = ~$/(?m)\s*<[a-zA-Z].*/$;
|
||||
def sourceHeaderPattern = ~$/\[source\b.*/$;
|
||||
|
@ -170,11 +170,8 @@ ant.fileScanner{
|
|||
if (!validLoggerPattern.matcher(text).find()) {
|
||||
reportViolation(f, 'invalid logging pattern [not private static final, uses static class name]');
|
||||
}
|
||||
if (f.toString().contains('solr/contrib') && !validLoggerNamePattern.matcher(text).find()) {
|
||||
reportViolation(f, 'invalid logger name [not log or LOG]');
|
||||
}
|
||||
if (f.toString().contains('solr/core') && !validLoggerNamePattern.matcher(text).find()) {
|
||||
reportViolation(f, 'invalid logger name [not log or LOG]');
|
||||
if (!validLoggerNamePattern.matcher(text).find()) {
|
||||
reportViolation(f, 'invalid logger name [log, uses static class name, not specialized logger]')
|
||||
}
|
||||
}
|
||||
checkLicenseHeaderPrecedes(f, 'package', packagePattern, javaCommentPattern, text, ratDocument);
|
||||
|
|
|
@ -333,6 +333,8 @@ Other Changes
|
|||
|
||||
* SOLR-12625: Combine SolrDocumentFetcher and RetrieveFieldsOptimizer (Erick Erickson)
|
||||
|
||||
* SOLR-12690: Regularize LoggerFactory declarations (Erick Erickson)
|
||||
|
||||
================== 7.4.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
|
|
@ -58,7 +58,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.ROOT);
|
||||
private static final SimpleDateFormat afterFmt =
|
||||
new SimpleDateFormat("yyyy/MM/dd", Locale.ROOT);
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static interface CustomFilter {
|
||||
public SearchTerm getCustomSearch(Folder folder);
|
||||
|
@ -112,7 +112,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
String varName = ConfigNameConstants.IMPORTER_NS_SHORT + "." + cname + "."
|
||||
+ DocBuilder.LAST_INDEX_TIME;
|
||||
Object varValue = context.getVariableResolver().resolve(varName);
|
||||
LOG.info(varName+"="+varValue);
|
||||
log.info(varName+"="+varValue);
|
||||
|
||||
if (varValue != null && !"".equals(varValue) &&
|
||||
!"".equals(getStringFromContext("fetchMailsSince", ""))) {
|
||||
|
@ -123,21 +123,21 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
try {
|
||||
tmp = sinceDateParser.parse((String)varValue);
|
||||
if (tmp.getTime() == 0) {
|
||||
LOG.info("Ignoring initial value "+varValue+" for "+varName+
|
||||
log.info("Ignoring initial value "+varValue+" for "+varName+
|
||||
" in favor of fetchMailsSince config parameter");
|
||||
tmp = null; // don't use this value
|
||||
}
|
||||
} catch (ParseException e) {
|
||||
// probably ok to ignore this since we have other options below
|
||||
// as we're just trying to figure out if the date is 0
|
||||
LOG.warn("Failed to parse "+varValue+" from "+varName+" due to: "+e);
|
||||
log.warn("Failed to parse "+varValue+" from "+varName+" due to: "+e);
|
||||
}
|
||||
|
||||
if (tmp == null) {
|
||||
// favor fetchMailsSince in this case because the value from
|
||||
// dataimport.properties is the default/init value
|
||||
varValue = getStringFromContext("fetchMailsSince", "");
|
||||
LOG.info("fetchMailsSince="+varValue);
|
||||
log.info("fetchMailsSince="+varValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
varName = ConfigNameConstants.IMPORTER_NS_SHORT + "."
|
||||
+ DocBuilder.LAST_INDEX_TIME;
|
||||
varValue = context.getVariableResolver().resolve(varName);
|
||||
LOG.info(varName+"="+varValue);
|
||||
log.info(varName+"="+varValue);
|
||||
}
|
||||
|
||||
if (varValue != null && varValue instanceof String) {
|
||||
|
@ -157,13 +157,13 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
if (lastIndexTime == null)
|
||||
lastIndexTime = getStringFromContext("fetchMailsSince", "");
|
||||
|
||||
LOG.info("Using lastIndexTime "+lastIndexTime+" for mail import");
|
||||
log.info("Using lastIndexTime "+lastIndexTime+" for mail import");
|
||||
|
||||
this.fetchMailsSince = null;
|
||||
if (lastIndexTime != null && lastIndexTime.length() > 0) {
|
||||
try {
|
||||
fetchMailsSince = sinceDateParser.parse(lastIndexTime);
|
||||
LOG.info("Parsed fetchMailsSince=" + lastIndexTime);
|
||||
log.info("Parsed fetchMailsSince=" + lastIndexTime);
|
||||
} catch (ParseException e) {
|
||||
throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
|
||||
"Invalid value for fetchMailSince: " + lastIndexTime, e);
|
||||
|
@ -247,7 +247,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
addPartToDocument(mail, row, true);
|
||||
return row;
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed to convert message [" + mail.toString()
|
||||
log.error("Failed to convert message [" + mail.toString()
|
||||
+ "] to document due to: " + e, e);
|
||||
return null;
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
for (int i = 0; i < count; i++)
|
||||
addPartToDocument(mp.getBodyPart(i), row, false);
|
||||
} else {
|
||||
LOG.warn("Multipart content is a not an instance of Multipart! Content is: "
|
||||
log.warn("Multipart content is a not an instance of Multipart! Content is: "
|
||||
+ (content != null ? content.getClass().getName() : "null")
|
||||
+ ". Typically, this is due to the Java Activation JAR being loaded by the wrong classloader.");
|
||||
}
|
||||
|
@ -374,7 +374,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
|
||||
if (("imap".equals(protocol) || "imaps".equals(protocol))
|
||||
&& "imap.gmail.com".equals(host)) {
|
||||
LOG.info("Consider using 'gimaps' protocol instead of '" + protocol
|
||||
log.info("Consider using 'gimaps' protocol instead of '" + protocol
|
||||
+ "' for enabling GMail specific extensions for " + host);
|
||||
}
|
||||
|
||||
|
@ -399,14 +399,14 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
} else {
|
||||
mailbox.connect(host, user, password);
|
||||
}
|
||||
LOG.info("Connected to " + user + "'s mailbox on " + host);
|
||||
log.info("Connected to " + user + "'s mailbox on " + host);
|
||||
|
||||
return true;
|
||||
} catch (MessagingException e) {
|
||||
String errMsg = String.format(Locale.ENGLISH,
|
||||
"Failed to connect to %s server %s as user %s due to: %s", protocol,
|
||||
host, user, e.toString());
|
||||
LOG.error(errMsg, e);
|
||||
log.error(errMsg, e);
|
||||
throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
|
||||
errMsg, e);
|
||||
}
|
||||
|
@ -431,7 +431,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
}
|
||||
|
||||
private void logConfig() {
|
||||
if (!LOG.isInfoEnabled()) return;
|
||||
if (!log.isInfoEnabled()) return;
|
||||
|
||||
String lineSep = System.getProperty("line.separator");
|
||||
|
||||
|
@ -474,7 +474,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
.append(lineSep);
|
||||
config.append("includeSharedFolders : ").append(includeSharedFolders)
|
||||
.append(lineSep);
|
||||
LOG.info(config.toString());
|
||||
log.info(config.toString());
|
||||
}
|
||||
|
||||
class FolderIterator implements Iterator<Folder> {
|
||||
|
@ -515,22 +515,22 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
hasMessages = (next.getType() & Folder.HOLDS_MESSAGES) != 0;
|
||||
next.open(Folder.READ_ONLY);
|
||||
lastFolder = next;
|
||||
LOG.info("Opened folder : " + fullName);
|
||||
log.info("Opened folder : " + fullName);
|
||||
}
|
||||
if (recurse && ((next.getType() & Folder.HOLDS_FOLDERS) != 0)) {
|
||||
Folder[] children = next.list();
|
||||
LOG.info("Added its children to list : ");
|
||||
log.info("Added its children to list : ");
|
||||
for (int i = children.length - 1; i >= 0; i--) {
|
||||
folders.add(0, children[i]);
|
||||
LOG.info("child name : " + children[i].getFullName());
|
||||
log.info("child name : " + children[i].getFullName());
|
||||
}
|
||||
if (children.length == 0) LOG.info("NO children : ");
|
||||
if (children.length == 0) log.info("NO children : ");
|
||||
}
|
||||
}
|
||||
} while (!hasMessages);
|
||||
return next;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Failed to read folders due to: "+e);
|
||||
log.warn("Failed to read folders due to: "+e);
|
||||
// throw new
|
||||
// DataImportHandlerException(DataImportHandlerException.SEVERE,
|
||||
// "Folder open failed", e);
|
||||
|
@ -568,12 +568,12 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
try {
|
||||
Folder[] ufldrs = mailbox.getUserNamespaces(null);
|
||||
if (ufldrs != null) {
|
||||
LOG.info("Found " + ufldrs.length + " user namespace folders");
|
||||
log.info("Found " + ufldrs.length + " user namespace folders");
|
||||
for (Folder ufldr : ufldrs)
|
||||
folders.add(ufldr);
|
||||
}
|
||||
} catch (MessagingException me) {
|
||||
LOG.warn("Messaging exception retrieving user namespaces: "
|
||||
log.warn("Messaging exception retrieving user namespaces: "
|
||||
+ me.getMessage());
|
||||
}
|
||||
}
|
||||
|
@ -582,12 +582,12 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
try {
|
||||
Folder[] sfldrs = mailbox.getSharedNamespaces();
|
||||
if (sfldrs != null) {
|
||||
LOG.info("Found " + sfldrs.length + " shared namespace folders");
|
||||
log.info("Found " + sfldrs.length + " shared namespace folders");
|
||||
for (Folder sfldr : sfldrs)
|
||||
folders.add(sfldr);
|
||||
}
|
||||
} catch (MessagingException me) {
|
||||
LOG.warn("Messaging exception retrieving shared namespaces: "
|
||||
log.warn("Messaging exception retrieving shared namespaces: "
|
||||
+ me.getMessage());
|
||||
}
|
||||
}
|
||||
|
@ -620,14 +620,14 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
this.batchSize = batchSize;
|
||||
SearchTerm st = getSearchTerm();
|
||||
|
||||
LOG.info("SearchTerm=" + st);
|
||||
log.info("SearchTerm=" + st);
|
||||
|
||||
if (st != null || folder instanceof GmailFolder) {
|
||||
doBatching = false;
|
||||
// Searching can still take a while even though we're only pulling
|
||||
// envelopes; unless you're using gmail server-side filter, which is
|
||||
// fast
|
||||
LOG.info("Searching folder " + folder.getName() + " for messages");
|
||||
log.info("Searching folder " + folder.getName() + " for messages");
|
||||
final RTimer searchTimer = new RTimer();
|
||||
|
||||
// If using GMail, speed up the envelope processing by doing a
|
||||
|
@ -642,11 +642,11 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
|
||||
if (folder instanceof GmailFolder && fetchMailsSince != null) {
|
||||
String afterCrit = "after:" + afterFmt.format(fetchMailsSince);
|
||||
LOG.info("Added server-side gmail filter: " + afterCrit);
|
||||
log.info("Added server-side gmail filter: " + afterCrit);
|
||||
Message[] afterMessages = folder.search(new GmailRawSearchTerm(
|
||||
afterCrit));
|
||||
|
||||
LOG.info("GMail server-side filter found " + afterMessages.length
|
||||
log.info("GMail server-side filter found " + afterMessages.length
|
||||
+ " messages received " + afterCrit + " in folder " + folder.getName());
|
||||
|
||||
// now pass in the server-side filtered messages to the local filter
|
||||
|
@ -657,11 +657,11 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
totalInFolder = messagesInCurBatch.length;
|
||||
folder.fetch(messagesInCurBatch, fp);
|
||||
current = 0;
|
||||
LOG.info("Total messages : " + totalInFolder);
|
||||
LOG.info("Search criteria applied. Batching disabled. Took {} (ms)", searchTimer.getTime());
|
||||
log.info("Total messages : " + totalInFolder);
|
||||
log.info("Search criteria applied. Batching disabled. Took {} (ms)", searchTimer.getTime());
|
||||
} else {
|
||||
totalInFolder = folder.getMessageCount();
|
||||
LOG.info("Total messages : " + totalInFolder);
|
||||
log.info("Total messages : " + totalInFolder);
|
||||
getNextBatch(batchSize, folder);
|
||||
}
|
||||
} catch (MessagingException e) {
|
||||
|
@ -685,8 +685,8 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
folder.fetch(messagesInCurBatch, fp);
|
||||
current = 0;
|
||||
currentBatch++;
|
||||
LOG.info("Current Batch : " + currentBatch);
|
||||
LOG.info("Messages in this batch : " + messagesInCurBatch.length);
|
||||
log.info("Current Batch : " + currentBatch);
|
||||
log.info("Messages in this batch : " + messagesInCurBatch.length);
|
||||
}
|
||||
|
||||
public boolean hasNext() {
|
||||
|
@ -741,7 +741,7 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
|
||||
@SuppressWarnings("serial")
|
||||
public SearchTerm getCustomSearch(final Folder folder) {
|
||||
LOG.info("Building mail filter for messages in " + folder.getName()
|
||||
log.info("Building mail filter for messages in " + folder.getName()
|
||||
+ " that occur after " + sinceDateParser.format(since));
|
||||
return new DateTerm(ComparisonTerm.GE, since) {
|
||||
private int matched = 0;
|
||||
|
@ -761,15 +761,15 @@ public class MailEntityProcessor extends EntityProcessorBase {
|
|||
} else {
|
||||
String msgDateStr = (msgDate != null) ? sinceDateParser.format(msgDate) : "null";
|
||||
String sinceDateStr = (since != null) ? sinceDateParser.format(since) : "null";
|
||||
LOG.debug("Message " + msg.getSubject() + " was received at [" + msgDateStr
|
||||
log.debug("Message " + msg.getSubject() + " was received at [" + msgDateStr
|
||||
+ "], since filter is [" + sinceDateStr + "]");
|
||||
}
|
||||
} catch (MessagingException e) {
|
||||
LOG.warn("Failed to process message due to: "+e, e);
|
||||
log.warn("Failed to process message due to: "+e, e);
|
||||
}
|
||||
|
||||
if (seen % 100 == 0) {
|
||||
LOG.info("Matched " + matched + " of " + seen + " messages since: "
|
||||
log.info("Matched " + matched + " of " + seen + " messages since: "
|
||||
+ sinceDateParser.format(since));
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ import java.util.Properties;
|
|||
* @since solr 3.1
|
||||
*/
|
||||
public class BinURLDataSource extends DataSource<InputStream>{
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private String baseUrl;
|
||||
private int connectionTimeout = CONNECTION_TIMEOUT;
|
||||
|
@ -61,14 +61,14 @@ public class BinURLDataSource extends DataSource<InputStream>{
|
|||
try {
|
||||
connectionTimeout = Integer.parseInt(cTimeout);
|
||||
} catch (NumberFormatException e) {
|
||||
LOG.warn("Invalid connection timeout: " + cTimeout);
|
||||
log.warn("Invalid connection timeout: " + cTimeout);
|
||||
}
|
||||
}
|
||||
if (rTimeout != null) {
|
||||
try {
|
||||
readTimeout = Integer.parseInt(rTimeout);
|
||||
} catch (NumberFormatException e) {
|
||||
LOG.warn("Invalid read timeout: " + rTimeout);
|
||||
log.warn("Invalid read timeout: " + rTimeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,13 +79,13 @@ public class BinURLDataSource extends DataSource<InputStream>{
|
|||
try {
|
||||
if (URIMETHOD.matcher(query).find()) url = new URL(query);
|
||||
else url = new URL(baseUrl + query);
|
||||
LOG.debug("Accessing URL: " + url.toString());
|
||||
log.debug("Accessing URL: " + url.toString());
|
||||
URLConnection conn = url.openConnection();
|
||||
conn.setConnectTimeout(connectionTimeout);
|
||||
conn.setReadTimeout(readTimeout);
|
||||
return conn.getInputStream();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception thrown while getting data", e);
|
||||
log.error("Exception thrown while getting data", e);
|
||||
wrapAndThrow (SEVERE, e, "Exception in invoking url " + url);
|
||||
return null;//unreachable
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ import static org.apache.solr.handler.dataimport.DataImporter.IMPORT_CMD;
|
|||
public class DataImportHandler extends RequestHandlerBase implements
|
||||
SolrCoreAware {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private DataImporter importer;
|
||||
|
||||
|
@ -107,7 +107,7 @@ public class DataImportHandler extends RequestHandlerBase implements
|
|||
debugEnabled = StrUtils.parseBool((String)initArgs.get(ENABLE_DEBUG), true);
|
||||
importer = new DataImporter(core, myName);
|
||||
} catch (Exception e) {
|
||||
LOG.error( DataImporter.MSG.LOAD_EXP, e);
|
||||
log.error( DataImporter.MSG.LOAD_EXP, e);
|
||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, DataImporter.MSG.LOAD_EXP, e);
|
||||
}
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ public class DataImportHandler extends RequestHandlerBase implements
|
|||
try {
|
||||
return super.upload(document);
|
||||
} catch (RuntimeException e) {
|
||||
LOG.error("Exception while adding: " + document, e);
|
||||
log.error("Exception while adding: " + document, e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,8 +74,8 @@ public class DataImporter {
|
|||
IDLE, RUNNING_FULL_DUMP, RUNNING_DELTA_DUMP, JOB_FAILED
|
||||
}
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final XMLErrorLogger XMLLOG = new XMLErrorLogger(LOG);
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final XMLErrorLogger XMLLOG = new XMLErrorLogger(log);
|
||||
|
||||
private Status status = Status.IDLE;
|
||||
private DIHConfiguration config;
|
||||
|
@ -125,7 +125,7 @@ public class DataImporter {
|
|||
} else if(dataconfigFile!=null) {
|
||||
is = new InputSource(core.getResourceLoader().openResource(dataconfigFile));
|
||||
is.setSystemId(SystemIdResolver.createSystemIdFromResourceName(dataconfigFile));
|
||||
LOG.info("Loading DIH Configuration: " + dataconfigFile);
|
||||
log.info("Loading DIH Configuration: " + dataconfigFile);
|
||||
}
|
||||
if(is!=null) {
|
||||
config = loadDataConfig(is);
|
||||
|
@ -143,12 +143,12 @@ public class DataImporter {
|
|||
if (name.equals("datasource")) {
|
||||
success = true;
|
||||
NamedList dsConfig = (NamedList) defaultParams.getVal(position);
|
||||
LOG.info("Getting configuration for Global Datasource...");
|
||||
log.info("Getting configuration for Global Datasource...");
|
||||
Map<String,String> props = new HashMap<>();
|
||||
for (int i = 0; i < dsConfig.size(); i++) {
|
||||
props.put(dsConfig.getName(i), dsConfig.getVal(i).toString());
|
||||
}
|
||||
LOG.info("Adding properties to datasource: " + props);
|
||||
log.info("Adding properties to datasource: " + props);
|
||||
dsProps.put((String) dsConfig.get("name"), props);
|
||||
}
|
||||
position++;
|
||||
|
@ -201,7 +201,7 @@ public class DataImporter {
|
|||
dbf.setXIncludeAware(true);
|
||||
dbf.setNamespaceAware(true);
|
||||
} catch( UnsupportedOperationException e ) {
|
||||
LOG.warn( "XML parser doesn't support XInclude option" );
|
||||
log.warn( "XML parser doesn't support XInclude option" );
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,7 @@ public class DataImporter {
|
|||
}
|
||||
|
||||
dihcfg = readFromXml(document);
|
||||
LOG.info("Data Configuration loaded successfully");
|
||||
log.info("Data Configuration loaded successfully");
|
||||
} catch (Exception e) {
|
||||
throw new DataImportHandlerException(SEVERE,
|
||||
"Data Config problem: " + e.getMessage(), e);
|
||||
|
@ -414,7 +414,7 @@ public class DataImporter {
|
|||
}
|
||||
|
||||
public void doFullImport(DIHWriter writer, RequestInfo requestParams) {
|
||||
LOG.info("Starting Full Import");
|
||||
log.info("Starting Full Import");
|
||||
setStatus(Status.RUNNING_FULL_DUMP);
|
||||
try {
|
||||
DIHProperties dihPropWriter = createPropertyWriter();
|
||||
|
@ -425,7 +425,7 @@ public class DataImporter {
|
|||
if (!requestParams.isDebug())
|
||||
cumulativeStatistics.add(docBuilder.importStatistics);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Full Import failed", e);
|
||||
SolrException.log(log, "Full Import failed", e);
|
||||
docBuilder.handleError("Full Import failed", e);
|
||||
} finally {
|
||||
setStatus(Status.IDLE);
|
||||
|
@ -442,7 +442,7 @@ public class DataImporter {
|
|||
}
|
||||
|
||||
public void doDeltaImport(DIHWriter writer, RequestInfo requestParams) {
|
||||
LOG.info("Starting Delta Import");
|
||||
log.info("Starting Delta Import");
|
||||
setStatus(Status.RUNNING_DELTA_DUMP);
|
||||
try {
|
||||
DIHProperties dihPropWriter = createPropertyWriter();
|
||||
|
@ -453,7 +453,7 @@ public class DataImporter {
|
|||
if (!requestParams.isDebug())
|
||||
cumulativeStatistics.add(docBuilder.importStatistics);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Delta Import Failed", e);
|
||||
log.error("Delta Import Failed", e);
|
||||
docBuilder.handleError("Delta Import Failed", e);
|
||||
} finally {
|
||||
setStatus(Status.IDLE);
|
||||
|
@ -475,7 +475,7 @@ public class DataImporter {
|
|||
return;
|
||||
}
|
||||
if (!importLock.tryLock()){
|
||||
LOG.warn("Import command failed . another import is running");
|
||||
log.warn("Import command failed . another import is running");
|
||||
return;
|
||||
}
|
||||
try {
|
||||
|
|
|
@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class DateFormatTransformer extends Transformer {
|
||||
private Map<String, SimpleDateFormat> fmtCache = new HashMap<>();
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -80,7 +80,7 @@ public class DateFormatTransformer extends Transformer {
|
|||
}
|
||||
}
|
||||
} catch (ParseException e) {
|
||||
LOG.warn("Could not parse a Date field ", e);
|
||||
log.warn("Could not parse a Date field ", e);
|
||||
}
|
||||
}
|
||||
return aRow;
|
||||
|
|
|
@ -50,7 +50,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
*/
|
||||
public class DocBuilder {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final AtomicBoolean WARNED_ABOUT_INDEX_TIME_BOOSTS = new AtomicBoolean();
|
||||
|
||||
private static final Date EPOCH = new Date(0);
|
||||
|
@ -265,7 +265,7 @@ public class DocBuilder {
|
|||
statusMessages.put(DataImporter.MSG.TOTAL_FAILED_DOCS, ""+ importStatistics.failedDocCount.get());
|
||||
|
||||
statusMessages.put("Time taken", getTimeElapsedSince(startTime.get()));
|
||||
LOG.info("Time taken = " + getTimeElapsedSince(startTime.get()));
|
||||
log.info("Time taken = " + getTimeElapsedSince(startTime.get()));
|
||||
} catch(Exception e)
|
||||
{
|
||||
throw new RuntimeException(e);
|
||||
|
@ -294,7 +294,7 @@ public class DocBuilder {
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void finish(Map<String,Object> lastIndexTimeProps) {
|
||||
LOG.info("Import completed successfully");
|
||||
log.info("Import completed successfully");
|
||||
statusMessages.put("", "Indexing completed. Added/Updated: "
|
||||
+ importStatistics.docCount + " documents. Deleted "
|
||||
+ importStatistics.deletedDocCount + " documents.");
|
||||
|
@ -307,7 +307,7 @@ public class DocBuilder {
|
|||
try {
|
||||
propWriter.persist(lastIndexTimeProps);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Could not write property file", e);
|
||||
log.error("Could not write property file", e);
|
||||
statusMessages.put("error", "Could not write property file. Delta imports will not work. " +
|
||||
"Make sure your conf directory is writable");
|
||||
}
|
||||
|
@ -340,7 +340,7 @@ public class DocBuilder {
|
|||
}
|
||||
|
||||
addStatusMessage("Identifying Delta");
|
||||
LOG.info("Starting delta collection.");
|
||||
log.info("Starting delta collection.");
|
||||
Set<Map<String, Object>> deletedKeys = new HashSet<>();
|
||||
Set<Map<String, Object>> allPks = collectDelta(currentEntityProcessorWrapper, resolver, deletedKeys);
|
||||
if (stop.get())
|
||||
|
@ -369,12 +369,12 @@ public class DocBuilder {
|
|||
}
|
||||
|
||||
if (!stop.get()) {
|
||||
LOG.info("Delta Import completed successfully");
|
||||
log.info("Delta Import completed successfully");
|
||||
}
|
||||
}
|
||||
|
||||
private void deleteAll(Set<Map<String, Object>> deletedKeys) {
|
||||
LOG.info("Deleting stale documents ");
|
||||
log.info("Deleting stale documents ");
|
||||
Iterator<Map<String, Object>> iter = deletedKeys.iterator();
|
||||
while (iter.hasNext()) {
|
||||
Map<String, Object> map = iter.next();
|
||||
|
@ -385,7 +385,7 @@ public class DocBuilder {
|
|||
key = map.get(keyName);
|
||||
}
|
||||
if(key == null) {
|
||||
LOG.warn("no key was available for deleted pk query. keyName = " + keyName);
|
||||
log.warn("no key was available for deleted pk query. keyName = " + keyName);
|
||||
continue;
|
||||
}
|
||||
writer.deleteDoc(key);
|
||||
|
@ -483,7 +483,7 @@ public class DocBuilder {
|
|||
if (seenDocCount <= reqParams.getStart())
|
||||
continue;
|
||||
if (seenDocCount > reqParams.getStart() + reqParams.getRows()) {
|
||||
LOG.info("Indexing stopped at docCount = " + importStatistics.docCount);
|
||||
log.info("Indexing stopped at docCount = " + importStatistics.docCount);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -548,7 +548,7 @@ public class DocBuilder {
|
|||
importStatistics.skipDocCount.getAndIncrement();
|
||||
doc = null;
|
||||
} else {
|
||||
SolrException.log(LOG, "Exception while processing: "
|
||||
SolrException.log(log, "Exception while processing: "
|
||||
+ epw.getEntity().getName() + " document : " + doc, e);
|
||||
}
|
||||
if (e.getErrCode() == DataImportHandlerException.SEVERE)
|
||||
|
@ -620,9 +620,9 @@ public class DocBuilder {
|
|||
if (value != null) {
|
||||
String message = "Ignoring document boost: " + value + " as index-time boosts are not supported anymore";
|
||||
if (WARNED_ABOUT_INDEX_TIME_BOOSTS.compareAndSet(false, true)) {
|
||||
LOG.warn(message);
|
||||
log.warn(message);
|
||||
} else {
|
||||
LOG.debug(message);
|
||||
log.debug(message);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -759,7 +759,7 @@ public class DocBuilder {
|
|||
"deltaQuery has no column to resolve to declared primary key pk='%s'",
|
||||
pk));
|
||||
}
|
||||
LOG.info(String.format(Locale.ROOT,
|
||||
log.info(String.format(Locale.ROOT,
|
||||
"Resolving deltaQuery column '%s' to match entity's declared pk '%s'",
|
||||
resolvedPk, pk));
|
||||
return resolvedPk;
|
||||
|
@ -796,7 +796,7 @@ public class DocBuilder {
|
|||
|
||||
// identifying the modified rows for this entity
|
||||
Map<String, Map<String, Object>> deltaSet = new HashMap<>();
|
||||
LOG.info("Running ModifiedRowKey() for Entity: " + epw.getEntity().getName());
|
||||
log.info("Running ModifiedRowKey() for Entity: " + epw.getEntity().getName());
|
||||
//get the modified rows in this entity
|
||||
String pk = epw.getEntity().getPk();
|
||||
while (true) {
|
||||
|
@ -844,8 +844,8 @@ public class DocBuilder {
|
|||
return new HashSet();
|
||||
}
|
||||
|
||||
LOG.info("Completed ModifiedRowKey for Entity: " + epw.getEntity().getName() + " rows obtained : " + deltaSet.size());
|
||||
LOG.info("Completed DeletedRowKey for Entity: " + epw.getEntity().getName() + " rows obtained : " + deletedSet.size());
|
||||
log.info("Completed ModifiedRowKey for Entity: " + epw.getEntity().getName() + " rows obtained : " + deltaSet.size());
|
||||
log.info("Completed DeletedRowKey for Entity: " + epw.getEntity().getName() + " rows obtained : " + deletedSet.size());
|
||||
|
||||
myModifiedPks.addAll(deltaSet.values());
|
||||
Set<Map<String, Object>> parentKeyList = new HashSet<>();
|
||||
|
@ -870,7 +870,7 @@ public class DocBuilder {
|
|||
return new HashSet();
|
||||
}
|
||||
}
|
||||
LOG.info("Completed parentDeltaQuery for Entity: " + epw.getEntity().getName());
|
||||
log.info("Completed parentDeltaQuery for Entity: " + epw.getEntity().getName());
|
||||
if (epw.getEntity().isDocRoot())
|
||||
deletedRows.addAll(deletedSet);
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ import java.util.Properties;
|
|||
* @since 1.4
|
||||
*/
|
||||
public class FieldReaderDataSource extends DataSource<Reader> {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
protected VariableResolver vr;
|
||||
protected String dataField;
|
||||
private String encoding;
|
||||
|
@ -77,7 +77,7 @@ public class FieldReaderDataSource extends DataSource<Reader> {
|
|||
// so let us just check it
|
||||
return readCharStream(clob);
|
||||
} catch (Exception e) {
|
||||
LOG.info("Unable to get data from CLOB");
|
||||
log.info("Unable to get data from CLOB");
|
||||
return null;
|
||||
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ public class FieldReaderDataSource extends DataSource<Reader> {
|
|||
try {
|
||||
return getReader(blob);
|
||||
} catch (Exception e) {
|
||||
LOG.info("Unable to get data from BLOB");
|
||||
log.info("Unable to get data from BLOB");
|
||||
return null;
|
||||
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
|
|||
* @since 3.1
|
||||
*/
|
||||
public class FieldStreamDataSource extends DataSource<InputStream> {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
protected VariableResolver vr;
|
||||
protected String dataField;
|
||||
private EntityProcessorWrapper wrapper;
|
||||
|
@ -67,7 +67,7 @@ public class FieldStreamDataSource extends DataSource<InputStream> {
|
|||
try {
|
||||
return blob.getBinaryStream();
|
||||
} catch (SQLException sqle) {
|
||||
LOG.info("Unable to get data from BLOB");
|
||||
log.info("Unable to get data from BLOB");
|
||||
return null;
|
||||
}
|
||||
} else if (o instanceof byte[]) {
|
||||
|
|
|
@ -58,7 +58,7 @@ public class FileDataSource extends DataSource<Reader> {
|
|||
*/
|
||||
protected String encoding = null;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
public void init(Context context, Properties initProps) {
|
||||
|
@ -102,13 +102,13 @@ public class FileDataSource extends DataSource<Reader> {
|
|||
File basePathFile;
|
||||
if (basePath == null) {
|
||||
basePathFile = new File(".").getAbsoluteFile();
|
||||
LOG.warn("FileDataSource.basePath is empty. " +
|
||||
log.warn("FileDataSource.basePath is empty. " +
|
||||
"Resolving to: " + basePathFile.getAbsolutePath());
|
||||
} else {
|
||||
basePathFile = new File(basePath);
|
||||
if (!basePathFile.isAbsolute()) {
|
||||
basePathFile = basePathFile.getAbsoluteFile();
|
||||
LOG.warn("FileDataSource.basePath is not absolute. Resolving to: "
|
||||
log.warn("FileDataSource.basePath is not absolute. Resolving to: "
|
||||
+ basePathFile.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ public class FileDataSource extends DataSource<Reader> {
|
|||
}
|
||||
|
||||
if (file.isFile() && file.canRead()) {
|
||||
LOG.debug("Accessing File: " + file.getAbsolutePath());
|
||||
log.debug("Accessing File: " + file.getAbsolutePath());
|
||||
return file;
|
||||
} else {
|
||||
throw new FileNotFoundException("Could not find file: " + query +
|
||||
|
|
|
@ -51,7 +51,7 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public class JdbcDataSource extends
|
||||
DataSource<Iterator<Map<String, Object>>> {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected Callable<Connection> factory;
|
||||
|
||||
|
@ -87,7 +87,7 @@ public class JdbcDataSource extends
|
|||
if (batchSize == -1)
|
||||
batchSize = Integer.MIN_VALUE;
|
||||
} catch (NumberFormatException e) {
|
||||
LOG.warn("Invalid batch size: " + bsz);
|
||||
log.warn("Invalid batch size: " + bsz);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ public class JdbcDataSource extends
|
|||
return factory = new Callable<Connection>() {
|
||||
@Override
|
||||
public Connection call() throws Exception {
|
||||
LOG.info("Creating a connection for entity "
|
||||
log.info("Creating a connection for entity "
|
||||
+ context.getEntityAttribute(DataImporter.NAME) + " with URL: "
|
||||
+ url);
|
||||
long start = System.nanoTime();
|
||||
|
@ -199,13 +199,13 @@ public class JdbcDataSource extends
|
|||
try {
|
||||
c.close();
|
||||
} catch (SQLException e2) {
|
||||
LOG.warn("Exception closing connection during cleanup", e2);
|
||||
log.warn("Exception closing connection during cleanup", e2);
|
||||
}
|
||||
|
||||
throw new DataImportHandlerException(SEVERE, "Exception initializing SQL connection", e);
|
||||
}
|
||||
}
|
||||
LOG.info("Time taken for getConnection(): "
|
||||
log.info("Time taken for getConnection(): "
|
||||
+ TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS));
|
||||
return c;
|
||||
}
|
||||
|
@ -289,7 +289,7 @@ public class JdbcDataSource extends
|
|||
}
|
||||
|
||||
private void logError(String msg, Exception e) {
|
||||
LOG.warn(msg, e);
|
||||
log.warn(msg, e);
|
||||
}
|
||||
|
||||
protected List<String> readFieldNames(ResultSetMetaData metaData)
|
||||
|
@ -316,10 +316,10 @@ public class JdbcDataSource extends
|
|||
try {
|
||||
Connection c = getConnection();
|
||||
stmt = createStatement(c, batchSize, maxRows);
|
||||
LOG.debug("Executing SQL: " + query);
|
||||
log.debug("Executing SQL: " + query);
|
||||
long start = System.nanoTime();
|
||||
resultSet = executeStatement(stmt, query);
|
||||
LOG.trace("Time taken for sql :"
|
||||
log.trace("Time taken for sql :"
|
||||
+ TimeUnit.MILLISECONDS.convert(System.nanoTime() - start, TimeUnit.NANOSECONDS));
|
||||
setColNames(resultSet);
|
||||
} catch (Exception e) {
|
||||
|
@ -541,7 +541,7 @@ public class JdbcDataSource extends
|
|||
protected void finalize() throws Throwable {
|
||||
try {
|
||||
if(!isClosed){
|
||||
LOG.error("JdbcDataSource was not closed prior to finalize(), indicates a bug -- POSSIBLE RESOURCE LEAK!!!");
|
||||
log.error("JdbcDataSource was not closed prior to finalize(), indicates a bug -- POSSIBLE RESOURCE LEAK!!!");
|
||||
close();
|
||||
}
|
||||
} finally {
|
||||
|
@ -575,7 +575,7 @@ public class JdbcDataSource extends
|
|||
conn.close();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error("Ignoring Error when closing connection", e);
|
||||
log.error("Ignoring Error when closing connection", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ import java.util.Map;
|
|||
* @since solr 1.4
|
||||
*/
|
||||
public class LogTransformer extends Transformer {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
public Object transformRow(Map<String, Object> row, Context ctx) {
|
||||
|
@ -43,20 +43,20 @@ public class LogTransformer extends Transformer {
|
|||
if (expr == null || level == null) return row;
|
||||
|
||||
if ("info".equals(level)) {
|
||||
if (LOG.isInfoEnabled())
|
||||
LOG.info(ctx.replaceTokens(expr));
|
||||
if (log.isInfoEnabled())
|
||||
log.info(ctx.replaceTokens(expr));
|
||||
} else if ("trace".equals(level)) {
|
||||
if (LOG.isTraceEnabled())
|
||||
LOG.trace(ctx.replaceTokens(expr));
|
||||
if (log.isTraceEnabled())
|
||||
log.trace(ctx.replaceTokens(expr));
|
||||
} else if ("warn".equals(level)) {
|
||||
if (LOG.isWarnEnabled())
|
||||
LOG.warn(ctx.replaceTokens(expr));
|
||||
if (log.isWarnEnabled())
|
||||
log.warn(ctx.replaceTokens(expr));
|
||||
} else if ("error".equals(level)) {
|
||||
if (LOG.isErrorEnabled())
|
||||
LOG.error(ctx.replaceTokens(expr));
|
||||
if (log.isErrorEnabled())
|
||||
log.error(ctx.replaceTokens(expr));
|
||||
} else if ("debug".equals(level)) {
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug(ctx.replaceTokens(expr));
|
||||
if (log.isDebugEnabled())
|
||||
log.debug(ctx.replaceTokens(expr));
|
||||
}
|
||||
|
||||
return row;
|
||||
|
|
|
@ -40,7 +40,7 @@ import java.util.regex.Pattern;
|
|||
* @see Pattern
|
||||
*/
|
||||
public class RegexTransformer extends Transformer {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -166,7 +166,7 @@ public class RegexTransformer extends Transformer {
|
|||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Parsing failed for field : " + columnName, e);
|
||||
log.warn("Parsing failed for field : " + columnName, e);
|
||||
}
|
||||
}
|
||||
return l == null ? map: l;
|
||||
|
|
|
@ -59,7 +59,7 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class SolrEntityProcessor extends EntityProcessorBase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String SOLR_SERVER = "url";
|
||||
public static final String QUERY = "query";
|
||||
|
@ -118,13 +118,13 @@ public class SolrEntityProcessor extends EntityProcessorBase {
|
|||
.withHttpClient(client)
|
||||
.withResponseParser(new XMLResponseParser())
|
||||
.build();
|
||||
LOG.info("using XMLResponseParser");
|
||||
log.info("using XMLResponseParser");
|
||||
} else {
|
||||
// TODO: it doesn't matter for this impl when passing a client currently, but we should close this!
|
||||
solrClient = new Builder(url.toExternalForm())
|
||||
.withHttpClient(client)
|
||||
.build();
|
||||
LOG.info("using BinaryResponseParser");
|
||||
log.info("using BinaryResponseParser");
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
throw new DataImportHandlerException(DataImportHandlerException.SEVERE, e);
|
||||
|
|
|
@ -42,7 +42,7 @@ import java.util.regex.Pattern;
|
|||
* @since solr 1.3
|
||||
*/
|
||||
public class SqlEntityProcessor extends EntityProcessorBase {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected DataSource<Iterator<Map<String, Object>>> dataSource;
|
||||
|
||||
|
@ -61,7 +61,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
|
|||
} catch (DataImportHandlerException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
LOG.error( "The query failed '" + q + "'", e);
|
||||
log.error( "The query failed '" + q + "'", e);
|
||||
throw new DataImportHandlerException(DataImportHandlerException.SEVERE, e);
|
||||
}
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
|
|||
String parentDeltaQuery = context.getEntityAttribute(PARENT_DELTA_QUERY);
|
||||
if (parentDeltaQuery == null)
|
||||
return null;
|
||||
LOG.info("Running parentDeltaQuery for Entity: "
|
||||
log.info("Running parentDeltaQuery for Entity: "
|
||||
+ context.getEntityAttribute("name"));
|
||||
initQuery(context.replaceTokens(parentDeltaQuery));
|
||||
}
|
||||
|
@ -119,7 +119,7 @@ public class SqlEntityProcessor extends EntityProcessorBase {
|
|||
String deltaImportQuery = context.getEntityAttribute(DELTA_IMPORT_QUERY);
|
||||
if(deltaImportQuery != null) return deltaImportQuery;
|
||||
}
|
||||
LOG.warn("'deltaImportQuery' attribute is not specified for entity : "+ entityName);
|
||||
log.warn("'deltaImportQuery' attribute is not specified for entity : "+ entityName);
|
||||
return getDeltaImportQuery(queryString);
|
||||
}
|
||||
|
||||
|
|
|
@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class TemplateTransformer extends Transformer {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private Map<String ,List<String>> templateVsVars = new HashMap<>();
|
||||
|
||||
@Override
|
||||
|
@ -76,7 +76,7 @@ public class TemplateTransformer extends Transformer {
|
|||
}
|
||||
for (String v : variables) {
|
||||
if (resolver.resolve(v) == null) {
|
||||
LOG.warn("Unable to resolve variable: " + v
|
||||
log.warn("Unable to resolve variable: " + v
|
||||
+ " while parsing expression: " + expr);
|
||||
resolvable = false;
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import java.util.regex.Pattern;
|
|||
* @since solr 1.4
|
||||
*/
|
||||
public class URLDataSource extends DataSource<Reader> {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private String baseUrl;
|
||||
|
||||
|
@ -72,14 +72,14 @@ public class URLDataSource extends DataSource<Reader> {
|
|||
try {
|
||||
connectionTimeout = Integer.parseInt(cTimeout);
|
||||
} catch (NumberFormatException e) {
|
||||
LOG.warn("Invalid connection timeout: " + cTimeout);
|
||||
log.warn("Invalid connection timeout: " + cTimeout);
|
||||
}
|
||||
}
|
||||
if (rTimeout != null) {
|
||||
try {
|
||||
readTimeout = Integer.parseInt(rTimeout);
|
||||
} catch (NumberFormatException e) {
|
||||
LOG.warn("Invalid read timeout: " + rTimeout);
|
||||
log.warn("Invalid read timeout: " + rTimeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ public class URLDataSource extends DataSource<Reader> {
|
|||
if (URIMETHOD.matcher(query).find()) url = new URL(query);
|
||||
else url = new URL(baseUrl + query);
|
||||
|
||||
LOG.debug("Accessing URL: " + url.toString());
|
||||
log.debug("Accessing URL: " + url.toString());
|
||||
|
||||
URLConnection conn = url.openConnection();
|
||||
conn.setConnectTimeout(connectionTimeout);
|
||||
|
@ -112,7 +112,7 @@ public class URLDataSource extends DataSource<Reader> {
|
|||
DataImporter.QUERY_COUNT.get().incrementAndGet();
|
||||
return new InputStreamReader(in, enc);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception thrown while getting data", e);
|
||||
log.error("Exception thrown while getting data", e);
|
||||
throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
|
||||
"Exception in invoking url " + url, e);
|
||||
}
|
||||
|
|
|
@ -54,8 +54,8 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
* @since solr 1.3
|
||||
*/
|
||||
public class XPathEntityProcessor extends EntityProcessorBase {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final XMLErrorLogger xmllog = new XMLErrorLogger(LOG);
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final XMLErrorLogger xmllog = new XMLErrorLogger(log);
|
||||
|
||||
private static final Map<String, Object> END_MARKER = new HashMap<>();
|
||||
|
||||
|
@ -136,7 +136,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
|
|||
// some XML parsers are broken and don't close the byte stream (but they should according to spec)
|
||||
IOUtils.closeQuietly(xsltSource.getInputStream());
|
||||
}
|
||||
LOG.info("Using xslTransformer: "
|
||||
log.info("Using xslTransformer: "
|
||||
+ xslTransformer.getClass().getName());
|
||||
} catch (Exception e) {
|
||||
throw new DataImportHandlerException(SEVERE,
|
||||
|
@ -293,10 +293,10 @@ public class XPathEntityProcessor extends EntityProcessorBase {
|
|||
if (ABORT.equals(onError)) {
|
||||
wrapAndThrow(SEVERE, e);
|
||||
} else if (SKIP.equals(onError)) {
|
||||
if (LOG.isDebugEnabled()) LOG.debug("Skipping url : " + s, e);
|
||||
if (log.isDebugEnabled()) log.debug("Skipping url : " + s, e);
|
||||
wrapAndThrow(DataImportHandlerException.SKIP, e);
|
||||
} else {
|
||||
LOG.warn("Failed for url : " + s, e);
|
||||
log.warn("Failed for url : " + s, e);
|
||||
rowIterator = Collections.EMPTY_LIST.iterator();
|
||||
return;
|
||||
}
|
||||
|
@ -313,7 +313,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
|
|||
} else if (SKIP.equals(onError)) {
|
||||
wrapAndThrow(DataImportHandlerException.SKIP, e);
|
||||
} else {
|
||||
LOG.warn("Failed for url : " + s, e);
|
||||
log.warn("Failed for url : " + s, e);
|
||||
rowIterator = Collections.EMPTY_LIST.iterator();
|
||||
return;
|
||||
}
|
||||
|
@ -330,12 +330,12 @@ public class XPathEntityProcessor extends EntityProcessorBase {
|
|||
if (ABORT.equals(onError)) {
|
||||
wrapAndThrow(SEVERE, e, msg);
|
||||
} else if (SKIP.equals(onError)) {
|
||||
LOG.warn(msg, e);
|
||||
log.warn(msg, e);
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
map.put(DocBuilder.SKIP_DOC, Boolean.TRUE);
|
||||
rows.add(map);
|
||||
} else if (CONTINUE.equals(onError)) {
|
||||
LOG.warn(msg, e);
|
||||
log.warn(msg, e);
|
||||
}
|
||||
}
|
||||
rowIterator = rows.iterator();
|
||||
|
@ -457,7 +457,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
|
|||
try {
|
||||
while (!blockingQueue.offer(row, blockingQueueTimeOut, blockingQueueTimeOutUnits)) {
|
||||
if (isEnd.get()) return;
|
||||
LOG.debug("Timeout elapsed writing records. Perhaps buffer size should be increased.");
|
||||
log.debug("Timeout elapsed writing records. Perhaps buffer size should be increased.");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
return;
|
||||
|
@ -488,10 +488,10 @@ public class XPathEntityProcessor extends EntityProcessorBase {
|
|||
try {
|
||||
row = blockingQueue.poll(blockingQueueTimeOut, blockingQueueTimeOutUnits);
|
||||
if (row == null) {
|
||||
LOG.debug("Timeout elapsed reading records.");
|
||||
log.debug("Timeout elapsed reading records.");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.debug("Caught InterruptedException while waiting for row. Aborting.");
|
||||
log.debug("Caught InterruptedException while waiting for row. Aborting.");
|
||||
isEnd.set(true);
|
||||
return null;
|
||||
}
|
||||
|
@ -507,7 +507,7 @@ public class XPathEntityProcessor extends EntityProcessorBase {
|
|||
} else if (SKIP.equals(onError)) {
|
||||
wrapAndThrow(DataImportHandlerException.SKIP, exp.get());
|
||||
} else {
|
||||
LOG.warn(msg, exp.get());
|
||||
log.warn(msg, exp.get());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
|
|
@ -57,8 +57,8 @@ import org.slf4j.LoggerFactory;
|
|||
* @since solr 1.3
|
||||
*/
|
||||
public class XPathRecordReader {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final XMLErrorLogger XMLLOG = new XMLErrorLogger(LOG);
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final XMLErrorLogger XMLLOG = new XMLErrorLogger(log);
|
||||
|
||||
private Node rootNode = new Node("/", null);
|
||||
|
||||
|
@ -645,7 +645,7 @@ public class XPathRecordReader {
|
|||
} catch (IllegalArgumentException ex) {
|
||||
// Other implementations will likely throw this exception since "reuse-instance"
|
||||
// isimplementation specific.
|
||||
LOG.debug("Unable to set the 'reuse-instance' property for the input chain: " + factory);
|
||||
log.debug("Unable to set the 'reuse-instance' property for the input chain: " + factory);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.w3c.dom.Element;
|
|||
* @since solr 1.3
|
||||
*/
|
||||
public class DIHConfiguration {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
// TODO - remove from here and add it to entity
|
||||
private final String deleteQuery;
|
||||
|
@ -106,7 +106,7 @@ public class DIHConfiguration {
|
|||
SchemaField sf = entry.getValue();
|
||||
if (!fields.containsKey(sf.getName())) {
|
||||
if (sf.isRequired()) {
|
||||
LOG.info(sf.getName() + " is a required field in SolrSchema . But not found in DataConfig");
|
||||
log.info(sf.getName() + " is a required field in SolrSchema . But not found in DataConfig");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ public class DIHConfiguration {
|
|||
EntityField fld = entry.getValue();
|
||||
SchemaField field = getSchemaField(fld.getName());
|
||||
if (field == null && !isSpecialCommand(fld.getName())) {
|
||||
LOG.info("The field :" + fld.getName() + " present in DataConfig does not have a counterpart in Solr Schema");
|
||||
log.info("The field :" + fld.getName() + " present in DataConfig does not have a counterpart in Solr Schema");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,7 @@ import java.util.Properties;
|
|||
*/
|
||||
public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static final String SOLR_CONFIG = "dataimport-solrconfig.xml";
|
||||
private static final String SOLR_SCHEMA = "dataimport-schema.xml";
|
||||
|
@ -136,7 +136,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
try {
|
||||
deleteCore();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error deleting core", e);
|
||||
log.error("Error deleting core", e);
|
||||
}
|
||||
jetty.stop();
|
||||
instance.tearDown();
|
||||
|
@ -151,7 +151,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
addDocumentsToSolr(SOLR_DOCS);
|
||||
runFullImport(generateDIHConfig("query='*:*' rows='2' fl='id,desc' onError='skip'", false));
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getMessage(), e);
|
||||
log.error(e.getMessage(), e);
|
||||
fail(e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
map.put("rows", "50");
|
||||
runFullImport(generateDIHConfig("query='*:*' fq='desc:Description1*,desc:Description*2' rows='2'", false), map);
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getMessage(), e);
|
||||
log.error(e.getMessage(), e);
|
||||
fail(e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
addDocumentsToSolr(generateSolrDocuments(7));
|
||||
runFullImport(generateDIHConfig("query='*:*' fl='id' rows='2'"+(random().nextBoolean() ?" cursorMark='true' sort='id asc'":""), false));
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getMessage(), e);
|
||||
log.error(e.getMessage(), e);
|
||||
fail(e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -221,7 +221,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
addDocumentsToSolr(DOCS);
|
||||
runFullImport(getDihConfigTagsInnerEntity());
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getMessage(), e);
|
||||
log.error(e.getMessage(), e);
|
||||
fail(e.getMessage());
|
||||
} finally {
|
||||
MockDataSource.clearCache();
|
||||
|
@ -244,7 +244,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
try {
|
||||
runFullImport(generateDIHConfig("query='*:*' rows='2' fl='id,desc' onError='skip'", true /* use dead server */));
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getMessage(), e);
|
||||
log.error(e.getMessage(), e);
|
||||
fail(e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -258,7 +258,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
runFullImport(generateDIHConfig("query='bogus:3' rows='2' fl='id,desc' onError='"+
|
||||
(random().nextBoolean() ? "abort" : "justtogetcoverage")+"'", false));
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getMessage(), e);
|
||||
log.error(e.getMessage(), e);
|
||||
fail(e.getMessage());
|
||||
}
|
||||
|
||||
|
@ -278,7 +278,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
runFullImport(generateDIHConfig(attrs,
|
||||
false));
|
||||
} catch (Exception e) {
|
||||
LOG.error(e.getMessage(), e);
|
||||
log.error(e.getMessage(), e);
|
||||
fail(e.getMessage());
|
||||
}
|
||||
|
||||
|
|
|
@ -89,17 +89,17 @@ public class JettySolrRunner {
|
|||
private final JettyConfig config;
|
||||
private final String solrHome;
|
||||
private final Properties nodeProperties;
|
||||
|
||||
|
||||
private volatile boolean startedBefore = false;
|
||||
|
||||
private LinkedList<FilterHolder> extraFilters;
|
||||
|
||||
private static final String excludePatterns = "/css/.+,/js/.+,/img/.+,/tpl/.+";
|
||||
|
||||
|
||||
private int proxyPort = -1;
|
||||
|
||||
public static class DebugFilter implements Filter {
|
||||
public final static Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private AtomicLong nRequests = new AtomicLong();
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.slf4j.LoggerFactory;
|
|||
* but internally it is synchronized so that only one thread can perform any operation.
|
||||
*/
|
||||
public class LockTree {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private final Node root = new Node(null, LockLevel.CLUSTER, null);
|
||||
|
||||
public void clear() {
|
||||
|
@ -141,7 +141,7 @@ public class LockTree {
|
|||
void unlock(LockImpl lockObject) {
|
||||
if (myLock == lockObject) myLock = null;
|
||||
else {
|
||||
LOG.info("Unlocked multiple times : {}", lockObject.toString());
|
||||
log.info("Unlocked multiple times : {}", lockObject.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -171,7 +171,7 @@ public class LockTree {
|
|||
|
||||
void clear() {
|
||||
if (myLock != null) {
|
||||
LOG.warn("lock_is_leaked at" + constructPath(new LinkedList<>()));
|
||||
log.warn("lock_is_leaked at" + constructPath(new LinkedList<>()));
|
||||
myLock = null;
|
||||
}
|
||||
for (Node node : children.values()) node.clear();
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory;
|
|||
* This is inefficient! But the API on this class is kind of muddy..
|
||||
*/
|
||||
public class OverseerTaskQueue extends ZkDistributedQueue {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static final String RESPONSE_PREFIX = "qnr-" ;
|
||||
|
||||
|
@ -70,7 +70,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
if (data != null) {
|
||||
ZkNodeProps message = ZkNodeProps.load(data);
|
||||
if (message.containsKey(requestIdKey)) {
|
||||
LOG.debug("Looking for {}, found {}", message.get(requestIdKey), requestId);
|
||||
log.debug("Looking for {}, found {}", message.get(requestIdKey), requestId);
|
||||
if(message.get(requestIdKey).equals(requestId)) return true;
|
||||
}
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
if (zookeeper.exists(responsePath, true)) {
|
||||
zookeeper.setData(responsePath, event.getBytes(), true);
|
||||
} else {
|
||||
LOG.info("Response ZK path: " + responsePath + " doesn't exist."
|
||||
log.info("Response ZK path: " + responsePath + " doesn't exist."
|
||||
+ " Requestor may have disconnected from ZooKeeper");
|
||||
}
|
||||
try {
|
||||
|
@ -136,7 +136,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
return;
|
||||
}
|
||||
// If latchEventType is not null, only fire if the type matches
|
||||
LOG.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
|
||||
log.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
|
||||
if (latchEventType == null || event.getType() == latchEventType) {
|
||||
lock.lock();
|
||||
try {
|
||||
|
@ -234,7 +234,7 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
throws KeeperException, InterruptedException {
|
||||
ArrayList<QueueEvent> topN = new ArrayList<>();
|
||||
|
||||
LOG.debug("Peeking for top {} elements. ExcludeSet: {}", n, excludeSet);
|
||||
log.debug("Peeking for top {} elements. ExcludeSet: {}", n, excludeSet);
|
||||
Timer.Context time;
|
||||
if (waitMillis == Long.MAX_VALUE) time = stats.time(dir + "_peekTopN_wait_forever");
|
||||
else time = stats.time(dir + "_peekTopN_wait" + waitMillis);
|
||||
|
@ -252,13 +252,13 @@ public class OverseerTaskQueue extends ZkDistributedQueue {
|
|||
}
|
||||
|
||||
private static void printQueueEventsListElementIds(ArrayList<QueueEvent> topN) {
|
||||
if (LOG.isDebugEnabled() && !topN.isEmpty()) {
|
||||
if (log.isDebugEnabled() && !topN.isEmpty()) {
|
||||
StringBuilder sb = new StringBuilder("[");
|
||||
for (QueueEvent queueEvent : topN) {
|
||||
sb.append(queueEvent.getId()).append(", ");
|
||||
}
|
||||
sb.append("]");
|
||||
LOG.debug("Returning topN elements: {}", sb.toString());
|
||||
log.debug("Returning topN elements: {}", sb.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -96,7 +96,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private int waitForUpdatesWithStaleStatePauseMilliSeconds = Integer.getInteger("solr.cloud.wait-for-updates-with-stale-state-pause", 2500);
|
||||
private int maxRetries = 500;
|
||||
|
@ -171,13 +171,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
if (prevSendPreRecoveryHttpUriRequest != null) {
|
||||
prevSendPreRecoveryHttpUriRequest.abort();
|
||||
}
|
||||
LOG.warn("Stopping recovery for core=[{}] coreNodeName=[{}]", coreName, coreZkNodeName);
|
||||
log.warn("Stopping recovery for core=[{}] coreNodeName=[{}]", coreName, coreZkNodeName);
|
||||
}
|
||||
|
||||
final private void recoveryFailed(final SolrCore core,
|
||||
final ZkController zkController, final String baseUrl,
|
||||
final String shardZkNodeName, final CoreDescriptor cd) throws Exception {
|
||||
SolrException.log(LOG, "Recovery failed - I give up.");
|
||||
SolrException.log(log, "Recovery failed - I give up.");
|
||||
try {
|
||||
zkController.publish(cd, Replica.State.RECOVERY_FAILED);
|
||||
} finally {
|
||||
|
@ -200,7 +200,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
|
||||
final String leaderUrl = getReplicateLeaderUrl(leaderprops);
|
||||
|
||||
LOG.info("Attempting to replicate from [{}].", leaderUrl);
|
||||
log.info("Attempting to replicate from [{}].", leaderUrl);
|
||||
|
||||
// send commit
|
||||
commitOnLeader(leaderUrl);
|
||||
|
@ -231,14 +231,14 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
|
||||
// solrcloud_debug
|
||||
if (LOG.isDebugEnabled()) {
|
||||
if (log.isDebugEnabled()) {
|
||||
try {
|
||||
RefCounted<SolrIndexSearcher> searchHolder = core
|
||||
.getNewestSearcher(false);
|
||||
SolrIndexSearcher searcher = searchHolder.get();
|
||||
Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
|
||||
try {
|
||||
LOG.debug(core.getCoreContainer()
|
||||
log.debug(core.getCoreContainer()
|
||||
.getZkController().getNodeName()
|
||||
+ " replicated "
|
||||
+ searcher.count(new MatchAllDocsQuery())
|
||||
|
@ -255,7 +255,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
searchHolder.decref();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.debug("Error in solrcloud_debug block", e);
|
||||
log.debug("Error in solrcloud_debug block", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,21 +283,21 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
try (SolrCore core = cc.getCore(coreName)) {
|
||||
|
||||
if (core == null) {
|
||||
SolrException.log(LOG, "SolrCore not found - cannot recover:" + coreName);
|
||||
SolrException.log(log, "SolrCore not found - cannot recover:" + coreName);
|
||||
return;
|
||||
}
|
||||
MDCLoggingContext.setCore(core);
|
||||
|
||||
LOG.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup);
|
||||
log.info("Starting recovery process. recoveringAfterStartup=" + recoveringAfterStartup);
|
||||
|
||||
try {
|
||||
doRecovery(core);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
SolrException.log(LOG, "", e);
|
||||
SolrException.log(log, "", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
} catch (Exception e) {
|
||||
LOG.error("", e);
|
||||
log.error("", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
}
|
||||
} finally {
|
||||
|
@ -317,7 +317,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
boolean successfulRecovery = false;
|
||||
|
||||
// if (core.getUpdateHandler().getUpdateLog() != null) {
|
||||
// SolrException.log(LOG, "'replicate-only' recovery strategy should only be used if no update logs are present, but this core has one: "
|
||||
// SolrException.log(log, "'replicate-only' recovery strategy should only be used if no update logs are present, but this core has one: "
|
||||
// + core.getUpdateHandler().getUpdateLog());
|
||||
// return;
|
||||
// }
|
||||
|
@ -340,50 +340,50 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
if (cloudDesc.isLeader()) {
|
||||
assert cloudDesc.getReplicaType() != Replica.Type.PULL;
|
||||
// we are now the leader - no one else must have been suitable
|
||||
LOG.warn("We have not yet recovered - but we are now the leader!");
|
||||
LOG.info("Finished recovery process.");
|
||||
log.warn("We have not yet recovered - but we are now the leader!");
|
||||
log.info("Finished recovery process.");
|
||||
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
LOG.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
|
||||
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leaderUrl,
|
||||
ourUrl);
|
||||
zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("Recovery for core {} has been closed", core.getName());
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
break;
|
||||
}
|
||||
LOG.info("Starting Replication Recovery.");
|
||||
log.info("Starting Replication Recovery.");
|
||||
|
||||
try {
|
||||
LOG.info("Stopping background replicate from leader process");
|
||||
log.info("Stopping background replicate from leader process");
|
||||
zkController.stopReplicationFromLeader(coreName);
|
||||
replicate(zkController.getNodeName(), core, leaderprops);
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("Recovery for core {} has been closed", core.getName());
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
break;
|
||||
}
|
||||
|
||||
LOG.info("Replication Recovery was successful.");
|
||||
log.info("Replication Recovery was successful.");
|
||||
successfulRecovery = true;
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Error while trying to recover", e);
|
||||
SolrException.log(log, "Error while trying to recover", e);
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Error while trying to recover. core=" + coreName, e);
|
||||
SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
|
||||
} finally {
|
||||
if (successfulRecovery) {
|
||||
LOG.info("Restaring background replicate from leader process");
|
||||
log.info("Restaring background replicate from leader process");
|
||||
zkController.startReplicationFromLeader(coreName, false);
|
||||
LOG.info("Registering as Active after recovery.");
|
||||
log.info("Registering as Active after recovery.");
|
||||
try {
|
||||
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Could not publish as ACTIVE after succesful recovery", e);
|
||||
log.error("Could not publish as ACTIVE after succesful recovery", e);
|
||||
successfulRecovery = false;
|
||||
}
|
||||
|
||||
|
@ -401,24 +401,24 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
try {
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("Recovery for core {} has been closed", core.getName());
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
break;
|
||||
}
|
||||
|
||||
LOG.error("Recovery failed - trying again... (" + retries + ")");
|
||||
log.error("Recovery failed - trying again... (" + retries + ")");
|
||||
|
||||
retries++;
|
||||
if (retries >= maxRetries) {
|
||||
SolrException.log(LOG, "Recovery failed - max retries exceeded (" + retries + ").");
|
||||
SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
|
||||
try {
|
||||
recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Could not publish that recovery failed", e);
|
||||
SolrException.log(log, "Could not publish that recovery failed", e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "An error has occurred during recovery", e);
|
||||
SolrException.log(log, "An error has occurred during recovery", e);
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -427,25 +427,25 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
|
||||
// order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
|
||||
int loopCount = retries < 4 ? (int) Math.min(Math.pow(2, retries), 12) : 12;
|
||||
LOG.info("Wait [{}] seconds before trying to recover again (attempt={})",
|
||||
log.info("Wait [{}] seconds before trying to recover again (attempt={})",
|
||||
TimeUnit.MILLISECONDS.toSeconds(loopCount * startingRecoveryDelayMilliSeconds), retries);
|
||||
for (int i = 0; i < loopCount; i++) {
|
||||
if (isClosed()) {
|
||||
LOG.info("Recovery for core {} has been closed", core.getName());
|
||||
log.info("Recovery for core {} has been closed", core.getName());
|
||||
break; // check if someone closed us
|
||||
}
|
||||
Thread.sleep(startingRecoveryDelayMilliSeconds);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Recovery was interrupted.", e);
|
||||
log.warn("Recovery was interrupted.", e);
|
||||
close = true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
// We skip core.seedVersionBuckets(); We don't have a transaction log
|
||||
LOG.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
|
||||
log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
|
||||
}
|
||||
|
||||
// TODO: perhaps make this grab a new core each time through the loop to handle core reloads?
|
||||
|
@ -455,7 +455,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
UpdateLog ulog;
|
||||
ulog = core.getUpdateHandler().getUpdateLog();
|
||||
if (ulog == null) {
|
||||
SolrException.log(LOG, "No UpdateLog found - cannot recover.");
|
||||
SolrException.log(log, "No UpdateLog found - cannot recover.");
|
||||
recoveryFailed(core, zkController, baseUrl, coreZkNodeName,
|
||||
core.getCoreDescriptor());
|
||||
return;
|
||||
|
@ -468,7 +468,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
try (UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates()) {
|
||||
recentVersions = recentUpdates.getVersions(ulog.getNumRecordsToKeep());
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Corrupt tlog - ignoring.", e);
|
||||
SolrException.log(log, "Corrupt tlog - ignoring.", e);
|
||||
recentVersions = new ArrayList<>(0);
|
||||
}
|
||||
|
||||
|
@ -484,13 +484,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
|
||||
if (oldIdx > 0) {
|
||||
LOG.info("####### Found new versions added after startup: num=[{}]", oldIdx);
|
||||
LOG.info("###### currentVersions=[{}]",recentVersions);
|
||||
log.info("####### Found new versions added after startup: num=[{}]", oldIdx);
|
||||
log.info("###### currentVersions=[{}]",recentVersions);
|
||||
}
|
||||
|
||||
LOG.info("###### startupVersions=[{}]", startingVersions);
|
||||
log.info("###### startupVersions=[{}]", startingVersions);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Error getting recent versions.", e);
|
||||
SolrException.log(log, "Error getting recent versions.", e);
|
||||
recentVersions = new ArrayList<>(0);
|
||||
}
|
||||
}
|
||||
|
@ -504,11 +504,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// this means we were previously doing a full index replication
|
||||
// that probably didn't complete and buffering updates in the
|
||||
// meantime.
|
||||
LOG.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
|
||||
log.info("Looks like a previous replication recovery did not complete - skipping peer sync.");
|
||||
firstTime = false; // skip peersync
|
||||
}
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Error trying to get ulog starting operation.", e);
|
||||
SolrException.log(log, "Error trying to get ulog starting operation.", e);
|
||||
firstTime = false; // skip peersync
|
||||
}
|
||||
}
|
||||
|
@ -524,7 +524,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
CloudDescriptor cloudDesc = core.getCoreDescriptor().getCloudDescriptor();
|
||||
final Replica leader = pingLeader(ourUrl, core.getCoreDescriptor(), true);
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -534,17 +534,17 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
if (cloudDesc.isLeader()) {
|
||||
// we are now the leader - no one else must have been suitable
|
||||
LOG.warn("We have not yet recovered - but we are now the leader!");
|
||||
LOG.info("Finished recovery process.");
|
||||
log.warn("We have not yet recovered - but we are now the leader!");
|
||||
log.info("Finished recovery process.");
|
||||
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG.info("Begin buffering updates. core=[{}]", coreName);
|
||||
log.info("Begin buffering updates. core=[{}]", coreName);
|
||||
// recalling buffer updates will drop the old buffer tlog
|
||||
ulog.bufferUpdates();
|
||||
|
||||
LOG.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leader.getCoreUrl(),
|
||||
log.info("Publishing state of core [{}] as recovering, leader is [{}] and I am [{}]", core.getName(), leader.getCoreUrl(),
|
||||
ourUrl);
|
||||
zkController.publish(core.getCoreDescriptor(), Replica.State.RECOVERING);
|
||||
|
||||
|
@ -559,14 +559,14 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break;
|
||||
}
|
||||
|
||||
sendPrepRecoveryCmd(leader.getBaseUrl(), leader.getCoreName(), slice);
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -584,7 +584,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// first thing we just try to sync
|
||||
if (firstTime) {
|
||||
firstTime = false; // only try sync the first time through the loop
|
||||
LOG.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(), recoveringAfterStartup);
|
||||
log.info("Attempting to PeerSync from [{}] - recoveringAfterStartup=[{}]", leader.getCoreUrl(), recoveringAfterStartup);
|
||||
// System.out.println("Attempting to PeerSync from " + leaderUrl
|
||||
// + " i am:" + zkController.getNodeName());
|
||||
PeerSyncWithLeader peerSyncWithLeader = new PeerSyncWithLeader(core,
|
||||
|
@ -596,12 +596,12 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// force open a new searcher
|
||||
core.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
|
||||
req.close();
|
||||
LOG.info("PeerSync stage of recovery was successful.");
|
||||
log.info("PeerSync stage of recovery was successful.");
|
||||
|
||||
// solrcloud_debug
|
||||
cloudDebugLog(core, "synced");
|
||||
|
||||
LOG.info("Replaying updates buffered during PeerSync.");
|
||||
log.info("Replaying updates buffered during PeerSync.");
|
||||
replay(core);
|
||||
|
||||
// sync success
|
||||
|
@ -609,54 +609,54 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
return;
|
||||
}
|
||||
|
||||
LOG.info("PeerSync Recovery was not successful - trying replication.");
|
||||
log.info("PeerSync Recovery was not successful - trying replication.");
|
||||
}
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break;
|
||||
}
|
||||
|
||||
LOG.info("Starting Replication Recovery.");
|
||||
log.info("Starting Replication Recovery.");
|
||||
|
||||
try {
|
||||
|
||||
replicate(zkController.getNodeName(), core, leader);
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break;
|
||||
}
|
||||
|
||||
replayFuture = replay(core);
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break;
|
||||
}
|
||||
|
||||
LOG.info("Replication Recovery was successful.");
|
||||
log.info("Replication Recovery was successful.");
|
||||
successfulRecovery = true;
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Recovery was interrupted", e);
|
||||
log.warn("Recovery was interrupted", e);
|
||||
close = true;
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Error while trying to recover", e);
|
||||
SolrException.log(log, "Error while trying to recover", e);
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Error while trying to recover. core=" + coreName, e);
|
||||
SolrException.log(log, "Error while trying to recover. core=" + coreName, e);
|
||||
} finally {
|
||||
if (successfulRecovery) {
|
||||
LOG.info("Registering as Active after recovery.");
|
||||
log.info("Registering as Active after recovery.");
|
||||
try {
|
||||
if (replicaType == Replica.Type.TLOG) {
|
||||
zkController.startReplicationFromLeader(coreName, true);
|
||||
}
|
||||
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Could not publish as ACTIVE after succesful recovery", e);
|
||||
log.error("Could not publish as ACTIVE after succesful recovery", e);
|
||||
successfulRecovery = false;
|
||||
}
|
||||
|
||||
|
@ -674,24 +674,24 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
try {
|
||||
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break;
|
||||
}
|
||||
|
||||
LOG.error("Recovery failed - trying again... (" + retries + ")");
|
||||
log.error("Recovery failed - trying again... (" + retries + ")");
|
||||
|
||||
retries++;
|
||||
if (retries >= maxRetries) {
|
||||
SolrException.log(LOG, "Recovery failed - max retries exceeded (" + retries + ").");
|
||||
SolrException.log(log, "Recovery failed - max retries exceeded (" + retries + ").");
|
||||
try {
|
||||
recoveryFailed(core, zkController, baseUrl, coreZkNodeName, core.getCoreDescriptor());
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Could not publish that recovery failed", e);
|
||||
SolrException.log(log, "Could not publish that recovery failed", e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "An error has occurred during recovery", e);
|
||||
SolrException.log(log, "An error has occurred during recovery", e);
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -700,17 +700,17 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// will always be the minimum of the two (12). Since we sleep at 5 seconds sub-intervals in
|
||||
// order to check if we were closed, 12 is chosen as the maximum loopCount (5s * 12 = 1m).
|
||||
double loopCount = retries < 4 ? Math.min(Math.pow(2, retries), 12) : 12;
|
||||
LOG.info("Wait [{}] seconds before trying to recover again (attempt={})", loopCount, retries);
|
||||
log.info("Wait [{}] seconds before trying to recover again (attempt={})", loopCount, retries);
|
||||
for (int i = 0; i < loopCount; i++) {
|
||||
if (isClosed()) {
|
||||
LOG.info("RecoveryStrategy has been closed");
|
||||
log.info("RecoveryStrategy has been closed");
|
||||
break; // check if someone closed us
|
||||
}
|
||||
Thread.sleep(startingRecoveryDelayMilliSeconds);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Recovery was interrupted.", e);
|
||||
log.warn("Recovery was interrupted.", e);
|
||||
close = true;
|
||||
}
|
||||
}
|
||||
|
@ -720,11 +720,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
// if replay was skipped (possibly to due pulling a full index from the leader),
|
||||
// then we still need to update version bucket seeds after recovery
|
||||
if (successfulRecovery && replayFuture == null) {
|
||||
LOG.info("Updating version bucket highest from index after successful recovery.");
|
||||
log.info("Updating version bucket highest from index after successful recovery.");
|
||||
core.seedVersionBuckets();
|
||||
}
|
||||
|
||||
LOG.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
|
||||
log.info("Finished recovery process, successful=[{}]", Boolean.toString(successfulRecovery));
|
||||
}
|
||||
|
||||
private final Replica pingLeader(String ourUrl, CoreDescriptor coreDesc, boolean mayPutReplicaAsDown) throws Exception {
|
||||
|
@ -763,11 +763,11 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
SolrPingResponse resp = httpSolrClient.ping();
|
||||
return leaderReplica;
|
||||
} catch (IOException e) {
|
||||
LOG.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
|
||||
log.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
|
||||
Thread.sleep(500);
|
||||
} catch (Exception e) {
|
||||
if (e.getCause() instanceof IOException) {
|
||||
LOG.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
|
||||
log.info("Failed to connect leader {} on recovery, try again", leaderReplica.getBaseUrl());
|
||||
Thread.sleep(500);
|
||||
} else {
|
||||
return leaderReplica;
|
||||
|
@ -794,13 +794,13 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
Future<RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().applyBufferedUpdates();
|
||||
if (future == null) {
|
||||
// no replay needed\
|
||||
LOG.info("No replay needed.");
|
||||
log.info("No replay needed.");
|
||||
} else {
|
||||
LOG.info("Replaying buffered documents.");
|
||||
log.info("Replaying buffered documents.");
|
||||
// wait for replay
|
||||
RecoveryInfo report = future.get();
|
||||
if (report.failed) {
|
||||
SolrException.log(LOG, "Replay failed");
|
||||
SolrException.log(log, "Replay failed");
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Replay failed");
|
||||
}
|
||||
}
|
||||
|
@ -815,7 +815,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
}
|
||||
|
||||
final private void cloudDebugLog(SolrCore core, String op) {
|
||||
if (!LOG.isDebugEnabled()) {
|
||||
if (!log.isDebugEnabled()) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
|
@ -824,12 +824,12 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
try {
|
||||
final int totalHits = searcher.count(new MatchAllDocsQuery());
|
||||
final String nodeName = core.getCoreContainer().getZkController().getNodeName();
|
||||
LOG.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
|
||||
log.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
|
||||
} finally {
|
||||
searchHolder.decref();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.debug("Error in solrcloud_debug block", e);
|
||||
log.debug("Error in solrcloud_debug block", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -861,7 +861,7 @@ public class RecoveryStrategy implements Runnable, Closeable {
|
|||
HttpUriRequestResponse mrr = client.httpUriRequest(prepCmd);
|
||||
prevSendPreRecoveryHttpUriRequest = mrr.httpUriRequest;
|
||||
|
||||
LOG.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd.toString());
|
||||
log.info("Sending prep recovery command to [{}]; [{}]", leaderBaseUrl, prepCmd.toString());
|
||||
|
||||
mrr.future.get();
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class ReplicateFromLeader {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private CoreContainer cc;
|
||||
private String coreName;
|
||||
|
@ -71,7 +71,7 @@ public class ReplicateFromLeader {
|
|||
} else if (uinfo.autoSoftCommmitMaxTime != -1) {
|
||||
pollIntervalStr = toPollIntervalStr(uinfo.autoSoftCommmitMaxTime/2);
|
||||
}
|
||||
LOG.info("Will start replication from leader with poll interval: {}", pollIntervalStr );
|
||||
log.info("Will start replication from leader with poll interval: {}", pollIntervalStr );
|
||||
|
||||
NamedList<Object> slaveConfig = new NamedList<>();
|
||||
slaveConfig.add("fetchFromLeader", Boolean.TRUE);
|
||||
|
@ -114,7 +114,7 @@ public class ReplicateFromLeader {
|
|||
if (commitVersion == null) return null;
|
||||
else return commitVersion;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Cannot get commit command version from index commit point ",e);
|
||||
log.warn("Cannot get commit command version from index commit point ",e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ public class SolrZkServer {
|
|||
// Allows us to set a default for the data dir before parsing
|
||||
// zoo.cfg (which validates that there is a dataDir)
|
||||
class SolrZkServerProps extends QuorumPeerConfig {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Pattern MISSING_MYID_FILE_PATTERN = Pattern.compile(".*myid file is missing$");
|
||||
|
||||
String solrPort; // port that Solr is listening on
|
||||
|
@ -164,7 +164,7 @@ class SolrZkServerProps extends QuorumPeerConfig {
|
|||
public static Properties getProperties(String path) throws ConfigException {
|
||||
File configFile = new File(path);
|
||||
|
||||
LOG.info("Reading configuration from: " + configFile);
|
||||
log.info("Reading configuration from: " + configFile);
|
||||
|
||||
try {
|
||||
if (!configFile.exists()) {
|
||||
|
|
|
@ -55,7 +55,7 @@ import org.slf4j.LoggerFactory;
|
|||
* the results should be correct but inefficient
|
||||
*/
|
||||
public class ZkDistributedQueue implements DistributedQueue {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
static final String PREFIX = "qn-";
|
||||
|
||||
|
@ -245,7 +245,7 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
try {
|
||||
zookeeper.delete(ops.get(j).getPath(), -1, true);
|
||||
} catch (KeeperException.NoNodeException e2) {
|
||||
LOG.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
|
||||
log.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -412,7 +412,7 @@ public class ZkDistributedQueue implements DistributedQueue {
|
|||
for (String childName : childNames) {
|
||||
// Check format
|
||||
if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) {
|
||||
LOG.debug("Found child node with improper name: " + childName);
|
||||
log.debug("Found child node with improper name: " + childName);
|
||||
continue;
|
||||
}
|
||||
orderedChildren.add(childName);
|
||||
|
|
|
@ -57,7 +57,7 @@ import org.slf4j.LoggerFactory;
|
|||
* </ul>
|
||||
*/
|
||||
public class HttpTriggerListener extends TriggerListenerBase {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private String urlTemplate;
|
||||
private String payloadTemplate;
|
||||
|
@ -158,7 +158,7 @@ public class HttpTriggerListener extends TriggerListenerBase {
|
|||
try {
|
||||
cloudManager.httpRequest(url, SolrRequest.METHOD.POST, headers, payload, timeout, followRedirects);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception sending request for event " + event, e);
|
||||
log.warn("Exception sending request for event " + event, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,11 +28,11 @@ import org.slf4j.LoggerFactory;
|
|||
* events to a log.
|
||||
*/
|
||||
public class LoggingListener extends TriggerListenerBase {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
public void onEvent(TriggerEvent event, TriggerEventProcessorStage stage, String actionName, ActionContext context,
|
||||
Throwable error, String message) {
|
||||
LOG.info("{}: stage={}, actionName={}, event={}, error={}, messsage={}", config.name, stage, actionName, event, error, message);
|
||||
log.info("{}: stage={}, actionName={}, event={}, error={}, messsage={}", config.name, stage, actionName, event, error, message);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory;
|
|||
* It handles state snapshot / restore in ZK.
|
||||
*/
|
||||
public abstract class TriggerBase implements AutoScaling.Trigger {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected final String name;
|
||||
protected SolrCloudManager cloudManager;
|
||||
|
@ -128,7 +128,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
|
|||
} catch (AlreadyExistsException e) {
|
||||
// ignore
|
||||
} catch (InterruptedException | KeeperException | IOException e) {
|
||||
LOG.warn("Exception checking ZK path " + ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
|
||||
log.warn("Exception checking ZK path " + ZkStateReader.SOLR_AUTOSCALING_TRIGGER_STATE_PATH, e);
|
||||
throw e;
|
||||
}
|
||||
for (TriggerAction action : actions) {
|
||||
|
@ -240,7 +240,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
|
|||
}
|
||||
lastState = state;
|
||||
} catch (InterruptedException | BadVersionException | AlreadyExistsException | IOException | KeeperException e) {
|
||||
LOG.warn("Exception updating trigger state '" + path + "'", e);
|
||||
log.warn("Exception updating trigger state '" + path + "'", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -254,7 +254,7 @@ public abstract class TriggerBase implements AutoScaling.Trigger {
|
|||
data = versionedData.getData();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception getting trigger state '" + path + "'", e);
|
||||
log.warn("Exception getting trigger state '" + path + "'", e);
|
||||
}
|
||||
if (data != null) {
|
||||
Map<String, Object> restoredState = (Map<String, Object>)Utils.fromJSON(data);
|
||||
|
|
|
@ -35,7 +35,7 @@ import org.slf4j.LoggerFactory;
|
|||
*
|
||||
*/
|
||||
public class TriggerEventQueue {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String ENQUEUE_TIME = "_enqueue_time_";
|
||||
public static final String DEQUEUE_TIME = "_dequeue_time_";
|
||||
|
@ -58,7 +58,7 @@ public class TriggerEventQueue {
|
|||
delegate.offer(data);
|
||||
return true;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception adding event " + event + " to queue " + triggerName, e);
|
||||
log.warn("Exception adding event " + event + " to queue " + triggerName, e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -68,19 +68,19 @@ public class TriggerEventQueue {
|
|||
try {
|
||||
while ((data = delegate.peek()) != null) {
|
||||
if (data.length == 0) {
|
||||
LOG.warn("ignoring empty data...");
|
||||
log.warn("ignoring empty data...");
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
|
||||
return fromMap(map);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
|
||||
log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception peeking queue of trigger " + triggerName, e);
|
||||
log.warn("Exception peeking queue of trigger " + triggerName, e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -90,19 +90,19 @@ public class TriggerEventQueue {
|
|||
try {
|
||||
while ((data = delegate.poll()) != null) {
|
||||
if (data.length == 0) {
|
||||
LOG.warn("ignoring empty data...");
|
||||
log.warn("ignoring empty data...");
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
Map<String, Object> map = (Map<String, Object>) Utils.fromJSON(data);
|
||||
return fromMap(map);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
|
||||
log.warn("Invalid event data, ignoring: " + new String(data, StandardCharsets.UTF_8));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception polling queue of trigger " + triggerName, e);
|
||||
log.warn("Exception polling queue of trigger " + triggerName, e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ import org.slf4j.LoggerFactory;
|
|||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||
|
||||
public class HdfsDirectoryFactory extends CachingDirectoryFactory implements SolrCoreAware, SolrMetricProducer {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String BLOCKCACHE_SLAB_COUNT = "solr.hdfs.blockcache.slab.count";
|
||||
public static final String BLOCKCACHE_DIRECT_MEMORY_ALLOCATION = "solr.hdfs.blockcache.direct.memory.allocation";
|
||||
|
@ -155,12 +155,12 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
if (this.hdfsDataDir != null && this.hdfsDataDir.length() == 0) {
|
||||
this.hdfsDataDir = null;
|
||||
} else {
|
||||
LOG.info(HDFS_HOME + "=" + this.hdfsDataDir);
|
||||
log.info(HDFS_HOME + "=" + this.hdfsDataDir);
|
||||
}
|
||||
cacheMerges = getConfig(CACHE_MERGES, false);
|
||||
cacheReadOnce = getConfig(CACHE_READONCE, false);
|
||||
boolean kerberosEnabled = getConfig(KERBEROS_ENABLED, false);
|
||||
LOG.info("Solr Kerberos Authentication "
|
||||
log.info("Solr Kerberos Authentication "
|
||||
+ (kerberosEnabled ? "enabled" : "disabled"));
|
||||
if (kerberosEnabled) {
|
||||
initKerberos();
|
||||
|
@ -171,7 +171,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
protected LockFactory createLockFactory(String rawLockType) throws IOException {
|
||||
if (null == rawLockType) {
|
||||
rawLockType = DirectoryFactory.LOCK_TYPE_HDFS;
|
||||
LOG.warn("No lockType configured, assuming '"+rawLockType+"'.");
|
||||
log.warn("No lockType configured, assuming '"+rawLockType+"'.");
|
||||
}
|
||||
final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim();
|
||||
switch (lockType) {
|
||||
|
@ -191,7 +191,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
@SuppressWarnings("resource")
|
||||
protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
|
||||
assert params != null : "init must be called before create";
|
||||
LOG.info("creating directory factory for path {}", path);
|
||||
log.info("creating directory factory for path {}", path);
|
||||
Configuration conf = getConf();
|
||||
|
||||
if (metrics == null) {
|
||||
|
@ -215,10 +215,10 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
boolean directAllocation = getConfig(BLOCKCACHE_DIRECT_MEMORY_ALLOCATION, true);
|
||||
|
||||
int slabSize = numberOfBlocksPerBank * blockSize;
|
||||
LOG.info(
|
||||
log.info(
|
||||
"Number of slabs of block cache [{}] with direct memory allocation set to [{}]",
|
||||
bankCount, directAllocation);
|
||||
LOG.info(
|
||||
log.info(
|
||||
"Block cache target memory usage, slab size of [{}] will allocate [{}] slabs and use ~[{}] bytes",
|
||||
new Object[] {slabSize, bankCount,
|
||||
((long) bankCount * (long) slabSize)});
|
||||
|
@ -285,13 +285,13 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
private BlockCache getBlockDirectoryCache(int numberOfBlocksPerBank, int blockSize, int bankCount,
|
||||
boolean directAllocation, int slabSize, int bufferSize, int bufferCount, boolean staticBlockCache) {
|
||||
if (!staticBlockCache) {
|
||||
LOG.info("Creating new single instance HDFS BlockCache");
|
||||
log.info("Creating new single instance HDFS BlockCache");
|
||||
return createBlockCache(numberOfBlocksPerBank, blockSize, bankCount, directAllocation, slabSize, bufferSize, bufferCount);
|
||||
}
|
||||
synchronized (HdfsDirectoryFactory.class) {
|
||||
|
||||
if (globalBlockCache == null) {
|
||||
LOG.info("Creating new global HDFS BlockCache");
|
||||
log.info("Creating new global HDFS BlockCache");
|
||||
globalBlockCache = createBlockCache(numberOfBlocksPerBank, blockSize, bankCount,
|
||||
directAllocation, slabSize, bufferSize, bufferCount);
|
||||
}
|
||||
|
@ -328,7 +328,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
try {
|
||||
return fileSystem.exists(hdfsDirPath);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Error checking if hdfs path exists", e);
|
||||
log.error("Error checking if hdfs path exists", e);
|
||||
throw new RuntimeException("Error checking if hdfs path exists", e);
|
||||
}
|
||||
}
|
||||
|
@ -351,7 +351,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
throw new RuntimeException("Could not remove directory");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error("Could not remove directory", e);
|
||||
log.error("Could not remove directory", e);
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
||||
"Could not remove directory", e);
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
try {
|
||||
return fileSystem.getContentSummary(hdfsDirPath).getLength();
|
||||
} catch (IOException e) {
|
||||
LOG.error("Error checking if hdfs path exists", e);
|
||||
log.error("Error checking if hdfs path exists", e);
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Error checking if hdfs path exists", e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(fileSystem);
|
||||
|
@ -474,7 +474,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
final Configuration ugiConf = new Configuration(getConf());
|
||||
ugiConf.set(HADOOP_SECURITY_AUTHENTICATION, kerberos);
|
||||
UserGroupInformation.setConfiguration(ugiConf);
|
||||
LOG.info(
|
||||
log.info(
|
||||
"Attempting to acquire kerberos ticket with keytab: {}, principal: {} ",
|
||||
keytabFile, principal);
|
||||
try {
|
||||
|
@ -482,7 +482,7 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
} catch (IOException ioe) {
|
||||
throw new RuntimeException(ioe);
|
||||
}
|
||||
LOG.info("Got Kerberos ticket");
|
||||
log.info("Got Kerberos ticket");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -514,10 +514,10 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
try {
|
||||
pathExists = fileSystem.exists(dataDirPath);
|
||||
} catch (IOException e) {
|
||||
LOG.error("Error checking if hdfs path "+dataDir+" exists", e);
|
||||
log.error("Error checking if hdfs path "+dataDir+" exists", e);
|
||||
}
|
||||
if (!pathExists) {
|
||||
LOG.warn("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDir);
|
||||
log.warn("{} does not point to a valid data directory; skipping clean-up of old index directories.", dataDir);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -534,16 +534,16 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
accept = fs.isDirectory(path) && !path.equals(currentIndexDirPath) &&
|
||||
(pathName.equals("index") || pathName.matches(INDEX_W_TIMESTAMP_REGEX));
|
||||
} catch (IOException e) {
|
||||
LOG.error("Error checking if path {} is an old index directory, caused by: {}", path, e);
|
||||
log.error("Error checking if path {} is an old index directory, caused by: {}", path, e);
|
||||
}
|
||||
return accept;
|
||||
}
|
||||
});
|
||||
} catch (FileNotFoundException fnfe) {
|
||||
// already deleted - ignore
|
||||
LOG.debug("Old index directory already deleted - skipping...", fnfe);
|
||||
log.debug("Old index directory already deleted - skipping...", fnfe);
|
||||
} catch (IOException ioExc) {
|
||||
LOG.error("Error checking for old index directories to clean-up.", ioExc);
|
||||
log.error("Error checking for old index directories to clean-up.", ioExc);
|
||||
}
|
||||
|
||||
if (oldIndexDirs == null || oldIndexDirs.length == 0)
|
||||
|
@ -560,23 +560,23 @@ public class HdfsDirectoryFactory extends CachingDirectoryFactory implements Sol
|
|||
|
||||
int i = 0;
|
||||
if (afterReload) {
|
||||
LOG.info("Will not remove most recent old directory on reload {}", oldIndexDirs[0]);
|
||||
log.info("Will not remove most recent old directory on reload {}", oldIndexDirs[0]);
|
||||
i = 1;
|
||||
}
|
||||
LOG.info("Found {} old index directories to clean-up under {} afterReload={}", oldIndexDirs.length - i, dataDirPath, afterReload);
|
||||
log.info("Found {} old index directories to clean-up under {} afterReload={}", oldIndexDirs.length - i, dataDirPath, afterReload);
|
||||
for (; i < oldIndexPaths.size(); i++) {
|
||||
Path oldDirPath = oldIndexPaths.get(i);
|
||||
if (livePaths.contains(oldDirPath.toString())) {
|
||||
LOG.warn("Cannot delete directory {} because it is still being referenced in the cache.", oldDirPath);
|
||||
log.warn("Cannot delete directory {} because it is still being referenced in the cache.", oldDirPath);
|
||||
} else {
|
||||
try {
|
||||
if (fileSystem.delete(oldDirPath, true)) {
|
||||
LOG.info("Deleted old index directory {}", oldDirPath);
|
||||
log.info("Deleted old index directory {}", oldDirPath);
|
||||
} else {
|
||||
LOG.warn("Failed to delete old index directory {}", oldDirPath);
|
||||
log.warn("Failed to delete old index directory {}", oldDirPath);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to delete old index directory {} due to: {}", oldDirPath, e);
|
||||
log.error("Failed to delete old index directory {} due to: {}", oldDirPath, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ import org.slf4j.LoggerFactory;
|
|||
* @see org.apache.lucene.index.IndexDeletionPolicy
|
||||
*/
|
||||
public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final IndexDeletionPolicy deletionPolicy;
|
||||
private volatile Map<Long, IndexCommit> solrVersionVsCommits = new ConcurrentHashMap<>();
|
||||
|
@ -94,7 +94,7 @@ public final class IndexDeletionPolicyWrapper extends IndexDeletionPolicy {
|
|||
// this is the common success case: the older time didn't exist, or
|
||||
// came before the new time.
|
||||
if (previousTime == null || previousTime <= timeToSet) {
|
||||
LOG.debug("Commit point reservation for generation {} set to {} (requested reserve time of {})",
|
||||
log.debug("Commit point reservation for generation {} set to {} (requested reserve time of {})",
|
||||
indexGen, timeToSet, reserveTime);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class BackupRepositoryFactory {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final Map<String,PluginInfo> backupRepoPluginByName = new HashMap<>();
|
||||
private PluginInfo defaultBackupRepoPlugin = null;
|
||||
|
@ -52,14 +52,14 @@ public class BackupRepositoryFactory {
|
|||
this.defaultBackupRepoPlugin = backupRepoPlugins[i];
|
||||
}
|
||||
backupRepoPluginByName.put(name, backupRepoPlugins[i]);
|
||||
LOG.info("Added backup repository with configuration params {}", backupRepoPlugins[i]);
|
||||
log.info("Added backup repository with configuration params {}", backupRepoPlugins[i]);
|
||||
}
|
||||
if (backupRepoPlugins.length == 1) {
|
||||
this.defaultBackupRepoPlugin = backupRepoPlugins[0];
|
||||
}
|
||||
|
||||
if (this.defaultBackupRepoPlugin != null) {
|
||||
LOG.info("Default configuration for backup repository is with configuration params {}",
|
||||
log.info("Default configuration for backup repository is with configuration params {}",
|
||||
defaultBackupRepoPlugin);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ public class IndexFetcher {
|
|||
|
||||
public static final String INDEX_PROPERTIES = "index.properties";
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private String masterUrl;
|
||||
|
||||
|
@ -242,7 +242,7 @@ public class IndexFetcher {
|
|||
"'masterUrl' is required for a slave");
|
||||
if (masterUrl != null && masterUrl.endsWith(ReplicationHandler.PATH)) {
|
||||
masterUrl = masterUrl.substring(0, masterUrl.length()-12);
|
||||
LOG.warn("'masterUrl' must be specified without the "+ReplicationHandler.PATH+" suffix");
|
||||
log.warn("'masterUrl' must be specified without the "+ReplicationHandler.PATH+" suffix");
|
||||
}
|
||||
this.masterUrl = masterUrl;
|
||||
|
||||
|
@ -327,7 +327,7 @@ public class IndexFetcher {
|
|||
filesToDownload = Collections.synchronizedList(files);
|
||||
else {
|
||||
filesToDownload = Collections.emptyList();
|
||||
LOG.error("No files to download for index generation: "+ gen);
|
||||
log.error("No files to download for index generation: "+ gen);
|
||||
}
|
||||
|
||||
files = (List<Map<String,Object>>) response.get(CONF_FILES);
|
||||
|
@ -373,7 +373,7 @@ public class IndexFetcher {
|
|||
// when we are a bit more confident we may want to try a partial replication
|
||||
// if the error is connection related or something, but we have to be careful
|
||||
forceReplication = true;
|
||||
LOG.info("Last replication failed, so I'll force replication");
|
||||
log.info("Last replication failed, so I'll force replication");
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -385,19 +385,19 @@ public class IndexFetcher {
|
|||
return IndexFetchResult.EXPECTING_NON_LEADER;
|
||||
}
|
||||
if (replica.getState() != Replica.State.ACTIVE) {
|
||||
LOG.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
|
||||
log.info("Replica {} is leader but it's state is {}, skipping replication", replica.getName(), replica.getState());
|
||||
return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
|
||||
}
|
||||
if (!solrCore.getCoreContainer().getZkController().getClusterState().liveNodesContain(replica.getNodeName())) {
|
||||
LOG.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
|
||||
log.info("Replica {} is leader but it's not hosted on a live node, skipping replication", replica.getName());
|
||||
return IndexFetchResult.LEADER_IS_NOT_ACTIVE;
|
||||
}
|
||||
if (!replica.getCoreUrl().equals(masterUrl)) {
|
||||
masterUrl = replica.getCoreUrl();
|
||||
LOG.info("Updated masterUrl to {}", masterUrl);
|
||||
log.info("Updated masterUrl to {}", masterUrl);
|
||||
// TODO: Do we need to set forceReplication = true?
|
||||
} else {
|
||||
LOG.debug("masterUrl didn't change");
|
||||
log.debug("masterUrl didn't change");
|
||||
}
|
||||
}
|
||||
//get the current 'replicateable' index version in the master
|
||||
|
@ -407,10 +407,10 @@ public class IndexFetcher {
|
|||
} catch (Exception e) {
|
||||
final String errorMsg = e.toString();
|
||||
if (!Strings.isNullOrEmpty(errorMsg) && errorMsg.contains(INTERRUPT_RESPONSE_MESSAGE)) {
|
||||
LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
|
||||
log.warn("Master at: " + masterUrl + " is not available. Index fetch failed by interrupt. Exception: " + errorMsg);
|
||||
return new IndexFetchResult(IndexFetchResult.FAILED_BY_INTERRUPT_MESSAGE, false, e);
|
||||
} else {
|
||||
LOG.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
|
||||
log.warn("Master at: " + masterUrl + " is not available. Index fetch failed by exception: " + errorMsg);
|
||||
return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
|
||||
}
|
||||
}
|
||||
|
@ -418,8 +418,8 @@ public class IndexFetcher {
|
|||
long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
|
||||
long latestGeneration = (Long) response.get(GENERATION);
|
||||
|
||||
LOG.info("Master's generation: " + latestGeneration);
|
||||
LOG.info("Master's version: " + latestVersion);
|
||||
log.info("Master's generation: " + latestGeneration);
|
||||
log.info("Master's version: " + latestVersion);
|
||||
|
||||
// TODO: make sure that getLatestCommit only returns commit points for the main index (i.e. no side-car indexes)
|
||||
IndexCommit commit = solrCore.getDeletionPolicy().getLatestCommit();
|
||||
|
@ -429,7 +429,7 @@ public class IndexFetcher {
|
|||
try {
|
||||
searcherRefCounted = solrCore.getNewestSearcher(false);
|
||||
if (searcherRefCounted == null) {
|
||||
LOG.warn("No open searcher found - fetch aborted");
|
||||
log.warn("No open searcher found - fetch aborted");
|
||||
return IndexFetchResult.NO_INDEX_COMMIT_EXIST;
|
||||
}
|
||||
commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
|
||||
|
@ -439,14 +439,14 @@ public class IndexFetcher {
|
|||
}
|
||||
}
|
||||
|
||||
LOG.info("Slave's generation: " + commit.getGeneration());
|
||||
LOG.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
|
||||
log.info("Slave's generation: " + commit.getGeneration());
|
||||
log.info("Slave's version: " + IndexDeletionPolicyWrapper.getCommitTimestamp(commit));
|
||||
|
||||
if (latestVersion == 0L) {
|
||||
if (commit.getGeneration() != 0) {
|
||||
// since we won't get the files for an empty index,
|
||||
// we just clear ours and commit
|
||||
LOG.info("New index in Master. Deleting mine...");
|
||||
log.info("New index in Master. Deleting mine...");
|
||||
RefCounted<IndexWriter> iw = solrCore.getUpdateHandler().getSolrCoreState().getIndexWriter(solrCore);
|
||||
try {
|
||||
iw.get().deleteAll();
|
||||
|
@ -464,27 +464,27 @@ public class IndexFetcher {
|
|||
|
||||
//there is nothing to be replicated
|
||||
successfulInstall = true;
|
||||
LOG.debug("Nothing to replicate, master's version is 0");
|
||||
log.debug("Nothing to replicate, master's version is 0");
|
||||
return IndexFetchResult.MASTER_VERSION_ZERO;
|
||||
}
|
||||
|
||||
// TODO: Should we be comparing timestamps (across machines) here?
|
||||
if (!forceReplication && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
|
||||
//master and slave are already in sync just return
|
||||
LOG.info("Slave in sync with master.");
|
||||
log.info("Slave in sync with master.");
|
||||
successfulInstall = true;
|
||||
return IndexFetchResult.ALREADY_IN_SYNC;
|
||||
}
|
||||
LOG.info("Starting replication process");
|
||||
log.info("Starting replication process");
|
||||
// get the list of files first
|
||||
fetchFileList(latestGeneration);
|
||||
// this can happen if the commit point is deleted before we fetch the file list.
|
||||
if (filesToDownload.isEmpty()) {
|
||||
return IndexFetchResult.PEER_INDEX_COMMIT_DELETED;
|
||||
}
|
||||
LOG.info("Number of files in latest index in master: " + filesToDownload.size());
|
||||
log.info("Number of files in latest index in master: " + filesToDownload.size());
|
||||
if (tlogFilesToDownload != null) {
|
||||
LOG.info("Number of tlog files in master: " + tlogFilesToDownload.size());
|
||||
log.info("Number of tlog files in master: " + tlogFilesToDownload.size());
|
||||
}
|
||||
|
||||
// Create the sync service
|
||||
|
@ -540,17 +540,17 @@ public class IndexFetcher {
|
|||
indexWriter.deleteUnusedFiles();
|
||||
while (hasUnusedFiles(indexDir, commit)) {
|
||||
indexWriter.deleteUnusedFiles();
|
||||
LOG.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
|
||||
log.info("Sleeping for 1000ms to wait for unused lucene index files to be delete-able");
|
||||
Thread.sleep(1000);
|
||||
c++;
|
||||
if (c >= 30) {
|
||||
LOG.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
|
||||
log.warn("IndexFetcher unable to cleanup unused lucene index files so we must do a full copy instead");
|
||||
isFullCopyNeeded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (c > 0) {
|
||||
LOG.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
|
||||
log.info("IndexFetcher slept for " + (c * 1000) + "ms for unused lucene index files to be delete-able");
|
||||
}
|
||||
} finally {
|
||||
writer.decref();
|
||||
|
@ -564,7 +564,7 @@ public class IndexFetcher {
|
|||
solrCore.getUpdateHandler().getSolrCoreState().closeIndexWriter(solrCore, true);
|
||||
}
|
||||
|
||||
LOG.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
|
||||
log.info("Starting download (fullCopy={}) to {}", isFullCopyNeeded, tmpIndexDir);
|
||||
successfulInstall = false;
|
||||
|
||||
long bytesDownloaded = downloadIndexFiles(isFullCopyNeeded, indexDir,
|
||||
|
@ -575,7 +575,7 @@ public class IndexFetcher {
|
|||
}
|
||||
final long timeTakenSeconds = getReplicationTimeElapsed();
|
||||
final Long bytesDownloadedPerSecond = (timeTakenSeconds != 0 ? Long.valueOf(bytesDownloaded / timeTakenSeconds) : null);
|
||||
LOG.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}",
|
||||
log.info("Total time taken for download (fullCopy={},bytesDownloaded={}) : {} secs ({} bytes/sec) to {}",
|
||||
isFullCopyNeeded, bytesDownloaded, timeTakenSeconds, bytesDownloadedPerSecond, tmpIndexDir);
|
||||
|
||||
Collection<Map<String,Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
|
||||
|
@ -603,7 +603,7 @@ public class IndexFetcher {
|
|||
}
|
||||
}
|
||||
|
||||
LOG.info("Configuration files are modified, core will be reloaded");
|
||||
log.info("Configuration files are modified, core will be reloaded");
|
||||
logReplicationTimeAndConfFiles(modifiedConfFiles,
|
||||
successfulInstall);// write to a file time of replication and
|
||||
// conf files.
|
||||
|
@ -633,7 +633,7 @@ public class IndexFetcher {
|
|||
|
||||
// we must reload the core after we open the IW back up
|
||||
if (successfulInstall && (reloadCore || forceCoreReload)) {
|
||||
LOG.info("Reloading SolrCore {}", solrCore.getName());
|
||||
log.info("Reloading SolrCore {}", solrCore.getName());
|
||||
reloadCore();
|
||||
}
|
||||
|
||||
|
@ -642,7 +642,7 @@ public class IndexFetcher {
|
|||
// let the system know we are changing dir's and the old one
|
||||
// may be closed
|
||||
if (indexDir != null) {
|
||||
LOG.info("removing old index directory " + indexDir);
|
||||
log.info("removing old index directory " + indexDir);
|
||||
solrCore.getDirectoryFactory().doneWithDirectory(indexDir);
|
||||
solrCore.getDirectoryFactory().remove(indexDir);
|
||||
}
|
||||
|
@ -658,7 +658,7 @@ public class IndexFetcher {
|
|||
cleanup(solrCore, tmpIndexDir, indexDir, deleteTmpIdxDir, tmpTlogDir, successfulInstall);
|
||||
cleanupDone = true;
|
||||
// we try with a full copy of the index
|
||||
LOG.warn(
|
||||
log.warn(
|
||||
"Replication attempt was not successful - trying a full index replication reloadCore={}",
|
||||
reloadCore);
|
||||
successfulInstall = fetchLatestIndex(true, reloadCore).getSuccessful();
|
||||
|
@ -667,7 +667,7 @@ public class IndexFetcher {
|
|||
markReplicationStop();
|
||||
return successfulInstall ? IndexFetchResult.INDEX_FETCH_SUCCESS : IndexFetchResult.INDEX_FETCH_FAILURE;
|
||||
} catch (ReplicationHandlerException e) {
|
||||
LOG.error("User aborted Replication");
|
||||
log.error("User aborted Replication");
|
||||
return new IndexFetchResult(IndexFetchResult.FAILED_BY_EXCEPTION_MESSAGE, false, e);
|
||||
} catch (SolrException e) {
|
||||
throw e;
|
||||
|
@ -699,7 +699,7 @@ public class IndexFetcher {
|
|||
logReplicationTimeAndConfFiles(null, successfulInstall);
|
||||
} catch (Exception e) {
|
||||
// this can happen on shutdown, a fetch may be running in a thread after DirectoryFactory is closed
|
||||
LOG.warn("Could not log failed replication details", e);
|
||||
log.warn("Could not log failed replication details", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -724,24 +724,24 @@ public class IndexFetcher {
|
|||
core.getDirectoryFactory().remove(tmpIndexDir);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, e);
|
||||
SolrException.log(log, e);
|
||||
} finally {
|
||||
try {
|
||||
if (tmpIndexDir != null) core.getDirectoryFactory().release(tmpIndexDir);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, e);
|
||||
SolrException.log(log, e);
|
||||
}
|
||||
try {
|
||||
if (indexDir != null) {
|
||||
core.getDirectoryFactory().release(indexDir);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, e);
|
||||
SolrException.log(log, e);
|
||||
}
|
||||
try {
|
||||
if (tmpTlogDir != null) delTree(tmpTlogDir);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, e);
|
||||
SolrException.log(log, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -754,7 +754,7 @@ public class IndexFetcher {
|
|||
String[] allFiles = indexDir.listAll();
|
||||
for (String file : allFiles) {
|
||||
if (!file.equals(segmentsFileName) && !currentFiles.contains(file) && !file.endsWith(".lock")) {
|
||||
LOG.info("Found unused file: " + file);
|
||||
log.info("Found unused file: " + file);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -838,7 +838,7 @@ public class IndexFetcher {
|
|||
|
||||
solrCore.getDirectoryFactory().renameWithOverwrite(dir, tmpFileName, REPLICATION_PROPERTIES);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception while updating statistics", e);
|
||||
log.warn("Exception while updating statistics", e);
|
||||
} finally {
|
||||
if (dir != null) {
|
||||
solrCore.getDirectoryFactory().release(dir);
|
||||
|
@ -899,7 +899,7 @@ public class IndexFetcher {
|
|||
try {
|
||||
waitSearcher[0].get();
|
||||
} catch (InterruptedException | ExecutionException e) {
|
||||
SolrException.log(LOG, e);
|
||||
SolrException.log(log, e);
|
||||
}
|
||||
}
|
||||
commitPoint = searcher.get().getIndexReader().getIndexCommit();
|
||||
|
@ -921,7 +921,7 @@ public class IndexFetcher {
|
|||
try {
|
||||
solrCore.getCoreContainer().reload(solrCore.getName());
|
||||
} catch (Exception e) {
|
||||
LOG.error("Could not reload core ", e);
|
||||
log.error("Could not reload core ", e);
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
|
@ -935,7 +935,7 @@ public class IndexFetcher {
|
|||
}
|
||||
|
||||
private void downloadConfFiles(List<Map<String, Object>> confFilesToDownload, long latestGeneration) throws Exception {
|
||||
LOG.info("Starting download of configuration files from master: " + confFilesToDownload);
|
||||
log.info("Starting download of configuration files from master: " + confFilesToDownload);
|
||||
confFilesDownloaded = Collections.synchronizedList(new ArrayList<>());
|
||||
File tmpconfDir = new File(solrCore.getResourceLoader().getConfigDir(), "conf." + getDateAsStr(new Date()));
|
||||
try {
|
||||
|
@ -964,7 +964,7 @@ public class IndexFetcher {
|
|||
* Download all the tlog files to the temp tlog directory.
|
||||
*/
|
||||
private long downloadTlogFiles(File tmpTlogDir, long latestGeneration) throws Exception {
|
||||
LOG.info("Starting download of tlog files from master: " + tlogFilesToDownload);
|
||||
log.info("Starting download of tlog files from master: " + tlogFilesToDownload);
|
||||
tlogFilesDownloaded = Collections.synchronizedList(new ArrayList<>());
|
||||
long bytesDownloaded = 0;
|
||||
|
||||
|
@ -998,8 +998,8 @@ public class IndexFetcher {
|
|||
private long downloadIndexFiles(boolean downloadCompleteIndex, Directory indexDir, Directory tmpIndexDir,
|
||||
String indexDirPath, String tmpIndexDirPath, long latestGeneration)
|
||||
throws Exception {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Download files to dir: " + Arrays.asList(indexDir.listAll()));
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Download files to dir: " + Arrays.asList(indexDir.listAll()));
|
||||
}
|
||||
long bytesDownloaded = 0;
|
||||
long bytesSkippedCopying = 0;
|
||||
|
@ -1013,12 +1013,12 @@ public class IndexFetcher {
|
|||
long size = (Long) file.get(SIZE);
|
||||
CompareResult compareResult = compareFile(indexDir, filename, size, (Long) file.get(CHECKSUM));
|
||||
boolean alwaysDownload = filesToAlwaysDownloadIfNoChecksums(filename, size, compareResult);
|
||||
LOG.debug("Downloading file={} size={} checksum={} alwaysDownload={}", filename, size, file.get(CHECKSUM), alwaysDownload);
|
||||
log.debug("Downloading file={} size={} checksum={} alwaysDownload={}", filename, size, file.get(CHECKSUM), alwaysDownload);
|
||||
if (!compareResult.equal || downloadCompleteIndex || alwaysDownload) {
|
||||
File localFile = new File(indexDirPath, filename);
|
||||
if (downloadCompleteIndex && doDifferentialCopy && compareResult.equal && compareResult.checkSummed
|
||||
&& localFile.exists()) {
|
||||
LOG.info("Don't need to download this file. Local file's path is: {}, checksum is: {}",
|
||||
log.info("Don't need to download this file. Local file's path is: {}, checksum is: {}",
|
||||
localFile.getAbsolutePath(), file.get(CHECKSUM));
|
||||
// A hard link here should survive the eventual directory move, and should be more space efficient as
|
||||
// compared to a file copy. TODO: Maybe we could do a move safely here?
|
||||
|
@ -1033,10 +1033,10 @@ public class IndexFetcher {
|
|||
}
|
||||
filesDownloaded.add(new HashMap<>(file));
|
||||
} else {
|
||||
LOG.info("Skipping download for {} because it already exists", file.get(NAME));
|
||||
log.info("Skipping download for {} because it already exists", file.get(NAME));
|
||||
}
|
||||
}
|
||||
LOG.info("Bytes downloaded: {}, Bytes skipped downloading: {}", bytesDownloaded, bytesSkippedCopying);
|
||||
log.info("Bytes downloaded: {}, Bytes skipped downloading: {}", bytesDownloaded, bytesSkippedCopying);
|
||||
return bytesDownloaded;
|
||||
}
|
||||
|
||||
|
@ -1065,7 +1065,7 @@ public class IndexFetcher {
|
|||
indexFileChecksum = CodecUtil.retrieveChecksum(indexInput);
|
||||
compareResult.checkSummed = true;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Could not retrieve checksum from file.", e);
|
||||
log.warn("Could not retrieve checksum from file.", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1076,7 +1076,7 @@ public class IndexFetcher {
|
|||
compareResult.equal = true;
|
||||
return compareResult;
|
||||
} else {
|
||||
LOG.info(
|
||||
log.info(
|
||||
"File {} did not match. expected length is {} and actual length is {}", filename, backupIndexFileLen, indexFileLen);
|
||||
compareResult.equal = false;
|
||||
return compareResult;
|
||||
|
@ -1089,7 +1089,7 @@ public class IndexFetcher {
|
|||
compareResult.equal = true;
|
||||
return compareResult;
|
||||
} else {
|
||||
LOG.warn("File {} did not match. expected checksum is {} and actual is checksum {}. " +
|
||||
log.warn("File {} did not match. expected checksum is {} and actual is checksum {}. " +
|
||||
"expected length is {} and actual length is {}", filename, backupIndexFileChecksum, indexFileChecksum,
|
||||
backupIndexFileLen, indexFileLen);
|
||||
compareResult.equal = false;
|
||||
|
@ -1100,7 +1100,7 @@ public class IndexFetcher {
|
|||
compareResult.equal = false;
|
||||
return compareResult;
|
||||
} catch (IOException e) {
|
||||
LOG.error("Could not read file " + filename + ". Downloading it again", e);
|
||||
log.error("Could not read file " + filename + ". Downloading it again", e);
|
||||
compareResult.equal = false;
|
||||
return compareResult;
|
||||
}
|
||||
|
@ -1139,7 +1139,7 @@ public class IndexFetcher {
|
|||
}
|
||||
} else {
|
||||
if (length != dir.fileLength(filename)) {
|
||||
LOG.warn("File {} did not match. expected length is {} and actual length is {}",
|
||||
log.warn("File {} did not match. expected length is {} and actual length is {}",
|
||||
filename, length, dir.fileLength(filename));
|
||||
return true;
|
||||
}
|
||||
|
@ -1154,25 +1154,25 @@ public class IndexFetcher {
|
|||
* <p/>
|
||||
*/
|
||||
private boolean moveAFile(Directory tmpIdxDir, Directory indexDir, String fname) {
|
||||
LOG.debug("Moving file: {}", fname);
|
||||
log.debug("Moving file: {}", fname);
|
||||
boolean success = false;
|
||||
try {
|
||||
if (slowFileExists(indexDir, fname)) {
|
||||
LOG.warn("Cannot complete replication attempt because file already exists:" + fname);
|
||||
log.warn("Cannot complete replication attempt because file already exists:" + fname);
|
||||
|
||||
// we fail - we downloaded the files we need, if we can't move one in, we can't
|
||||
// count on the correct index
|
||||
return false;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "could not check if a file exists", e);
|
||||
SolrException.log(log, "could not check if a file exists", e);
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
solrCore.getDirectoryFactory().move(tmpIdxDir, indexDir, fname, DirectoryFactory.IOCONTEXT_NO_CACHE);
|
||||
success = true;
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "Could not move file", e);
|
||||
SolrException.log(log, "Could not move file", e);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
@ -1181,10 +1181,10 @@ public class IndexFetcher {
|
|||
* Copy all index files from the temp index dir to the actual index. The segments_N file is copied last.
|
||||
*/
|
||||
private boolean moveIndexFiles(Directory tmpIdxDir, Directory indexDir) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
if (log.isDebugEnabled()) {
|
||||
try {
|
||||
LOG.info("From dir files:" + Arrays.asList(tmpIdxDir.listAll()));
|
||||
LOG.info("To dir files:" + Arrays.asList(indexDir.listAll()));
|
||||
log.info("From dir files:" + Arrays.asList(tmpIdxDir.listAll()));
|
||||
log.info("To dir files:" + Arrays.asList(indexDir.listAll()));
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
@ -1245,7 +1245,7 @@ public class IndexFetcher {
|
|||
((CdcrUpdateLog) ulog).initForRecovery(bufferedUpdates.tlog, bufferedUpdates.offset);
|
||||
}
|
||||
catch (Exception e) {
|
||||
LOG.error("Unable to copy tlog files", e);
|
||||
log.error("Unable to copy tlog files", e);
|
||||
return false;
|
||||
}
|
||||
finally {
|
||||
|
@ -1319,7 +1319,7 @@ public class IndexFetcher {
|
|||
try {
|
||||
Files.move(tlogDir, backupTlogDir, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "Unable to rename: " + tlogDir + " to: " + backupTlogDir, e);
|
||||
SolrException.log(log, "Unable to rename: " + tlogDir + " to: " + backupTlogDir, e);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1327,7 +1327,7 @@ public class IndexFetcher {
|
|||
try {
|
||||
Files.move(src, tlogDir, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "Unable to rename: " + src + " to: " + tlogDir, e);
|
||||
SolrException.log(log, "Unable to rename: " + src + " to: " + tlogDir, e);
|
||||
|
||||
// In case of error, try to revert back the original tlog directory
|
||||
try {
|
||||
|
@ -1404,7 +1404,7 @@ public class IndexFetcher {
|
|||
org.apache.lucene.util.IOUtils.rm(dir.toPath());
|
||||
return true;
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to delete directory : " + dir, e);
|
||||
log.warn("Unable to delete directory : " + dir, e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -1551,7 +1551,7 @@ public class IndexFetcher {
|
|||
fetch();
|
||||
} catch(Exception e) {
|
||||
if (!aborted) {
|
||||
SolrException.log(IndexFetcher.LOG, "Error fetching file, doing one retry...", e);
|
||||
SolrException.log(IndexFetcher.log, "Error fetching file, doing one retry...", e);
|
||||
// one retry
|
||||
fetch();
|
||||
} else {
|
||||
|
@ -1605,7 +1605,7 @@ public class IndexFetcher {
|
|||
//read the size of the packet
|
||||
int packetSize = readInt(intbytes);
|
||||
if (packetSize <= 0) {
|
||||
LOG.warn("No content received for file: {}", fileName);
|
||||
log.warn("No content received for file: {}", fileName);
|
||||
return NO_CONTENT;
|
||||
}
|
||||
//TODO consider recoding the remaining logic to not use/need buf[]; instead use the internal buffer of fis
|
||||
|
@ -1626,7 +1626,7 @@ public class IndexFetcher {
|
|||
checksum.update(buf, 0, packetSize);
|
||||
long checkSumClient = checksum.getValue();
|
||||
if (checkSumClient != checkSumServer) {
|
||||
LOG.error("Checksum not matched between client and server for file: {}", fileName);
|
||||
log.error("Checksum not matched between client and server for file: {}", fileName);
|
||||
//if checksum is wrong it is a problem return for retry
|
||||
return 1;
|
||||
}
|
||||
|
@ -1634,7 +1634,7 @@ public class IndexFetcher {
|
|||
//if everything is fine, write down the packet to the file
|
||||
file.write(buf, packetSize);
|
||||
bytesDownloaded += packetSize;
|
||||
LOG.debug("Fetched and wrote {} bytes of file: {}", bytesDownloaded, fileName);
|
||||
log.debug("Fetched and wrote {} bytes of file: {}", bytesDownloaded, fileName);
|
||||
if (bytesDownloaded >= size)
|
||||
return 0;
|
||||
//errorCount is always set to zero after a successful packet
|
||||
|
@ -1643,7 +1643,7 @@ public class IndexFetcher {
|
|||
} catch (ReplicationHandlerException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Error in fetching file: {} (downloaded {} of {} bytes)",
|
||||
log.warn("Error in fetching file: {} (downloaded {} of {} bytes)",
|
||||
fileName, bytesDownloaded, size, e);
|
||||
//for any failure, increment the error count
|
||||
errorCount++;
|
||||
|
@ -1686,7 +1686,7 @@ public class IndexFetcher {
|
|||
try {
|
||||
file.close();
|
||||
} catch (Exception e) {/* no-op */
|
||||
LOG.error("Error closing file: {}", this.saveAs, e);
|
||||
log.error("Error closing file: {}", this.saveAs, e);
|
||||
}
|
||||
if (bytesDownloaded != size) {
|
||||
//if the download is not complete then
|
||||
|
@ -1694,7 +1694,7 @@ public class IndexFetcher {
|
|||
try {
|
||||
file.delete();
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error deleting file: {}", this.saveAs, e);
|
||||
log.error("Error deleting file: {}", this.saveAs, e);
|
||||
}
|
||||
//if the failure is due to a user abort it is returned normally else an exception is thrown
|
||||
if (!aborted)
|
||||
|
|
|
@ -131,7 +131,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
|
||||
public static final String PATH = "/replication";
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
SolrCore core;
|
||||
|
||||
private volatile boolean closed = false;
|
||||
|
@ -158,11 +158,11 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
try {
|
||||
version = Long.parseLong(commitTime);
|
||||
} catch (NumberFormatException e) {
|
||||
LOG.warn("Version in commitData was not formatted correctly: " + commitTime, e);
|
||||
log.warn("Version in commitData was not formatted correctly: " + commitTime, e);
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to get version from commitData, commit: " + commit, e);
|
||||
log.warn("Unable to get version from commitData, commit: " + commit, e);
|
||||
}
|
||||
return new CommitVersionInfo(generation, version);
|
||||
}
|
||||
|
@ -375,7 +375,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
nl.add(CMD_GET_FILE_LIST, commitList);
|
||||
l.add(nl);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception while reading files for commit " + c, e);
|
||||
log.warn("Exception while reading files for commit " + c, e);
|
||||
}
|
||||
}
|
||||
return l;
|
||||
|
@ -392,7 +392,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
checksum.update(buffer, 0, bytesRead);
|
||||
return checksum.getValue();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception in finding checksum of " + f, e);
|
||||
log.warn("Exception in finding checksum of " + f, e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(fis);
|
||||
}
|
||||
|
@ -406,7 +406,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
if (!indexFetchLock.tryLock())
|
||||
return IndexFetchResult.LOCK_OBTAIN_FAILED;
|
||||
if (core.getCoreContainer().isShutDown()) {
|
||||
LOG.warn("I was asked to replicate but CoreContainer is shutting down");
|
||||
log.warn("I was asked to replicate but CoreContainer is shutting down");
|
||||
return IndexFetchResult.CONTAINER_IS_SHUTTING_DOWN;
|
||||
}
|
||||
try {
|
||||
|
@ -420,7 +420,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
}
|
||||
return currentIndexFetcher.fetchLatestIndex(forceReplication);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(LOG, "Index fetch failed ", e);
|
||||
SolrException.log(log, "Index fetch failed ", e);
|
||||
if (currentIndexFetcher != pollingIndexFetcher) {
|
||||
currentIndexFetcher.destroy();
|
||||
}
|
||||
|
@ -568,7 +568,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
snapShooter.validateCreateSnapshot();
|
||||
snapShooter.createSnapAsync(numberToKeep, (nl) -> snapShootDetails = nl);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception during creating a snapshot", e);
|
||||
log.error("Exception during creating a snapshot", e);
|
||||
rsp.add("exception", e);
|
||||
}
|
||||
}
|
||||
|
@ -629,7 +629,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
long checksum = CodecUtil.retrieveChecksum(in);
|
||||
fileMeta.put(CHECKSUM, checksum);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Could not read checksum from index file: " + file, e);
|
||||
log.warn("Could not read checksum from index file: " + file, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -647,7 +647,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
try {
|
||||
fileMeta.put(CHECKSUM, CodecUtil.retrieveChecksum(in));
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Could not read checksum from index file: " + infos.getSegmentsFileName(), e);
|
||||
log.warn("Could not read checksum from index file: " + infos.getSegmentsFileName(), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -655,13 +655,13 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
} catch (IOException e) {
|
||||
rsp.add("status", "unable to get file names for given index generation");
|
||||
rsp.add(EXCEPTION, e);
|
||||
LOG.error("Unable to get file names for indexCommit generation: " + gen, e);
|
||||
log.error("Unable to get file names for indexCommit generation: " + gen, e);
|
||||
} finally {
|
||||
if (dir != null) {
|
||||
try {
|
||||
core.getDirectoryFactory().release(dir);
|
||||
} catch (IOException e) {
|
||||
SolrException.log(LOG, "Could not release directory after fetching file list", e);
|
||||
SolrException.log(log, "Could not release directory after fetching file list", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -670,19 +670,19 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
if (solrParams.getBool(TLOG_FILES, false)) {
|
||||
try {
|
||||
List<Map<String, Object>> tlogfiles = getTlogFileList(commit);
|
||||
LOG.info("Adding tlog files to list: " + tlogfiles);
|
||||
log.info("Adding tlog files to list: " + tlogfiles);
|
||||
rsp.add(TLOG_FILES, tlogfiles);
|
||||
}
|
||||
catch (IOException e) {
|
||||
rsp.add("status", "unable to get tlog file names for given index generation");
|
||||
rsp.add(EXCEPTION, e);
|
||||
LOG.error("Unable to get tlog file names for indexCommit generation: " + gen, e);
|
||||
log.error("Unable to get tlog file names for indexCommit generation: " + gen, e);
|
||||
}
|
||||
}
|
||||
|
||||
if (confFileNameAlias.size() < 1 || core.getCoreContainer().isZooKeeperAware())
|
||||
return;
|
||||
LOG.debug("Adding config files to list: " + includeConfFiles);
|
||||
log.debug("Adding config files to list: " + includeConfFiles);
|
||||
//if configuration files need to be included get their details
|
||||
rsp.add(CONF_FILES, getConfFileInfoFromCache(confFileNameAlias, confFileInfoCache));
|
||||
}
|
||||
|
@ -776,14 +776,14 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
void disablePoll() {
|
||||
if (isSlave) {
|
||||
pollDisabled.set(true);
|
||||
LOG.info("inside disable poll, value of pollDisabled = " + pollDisabled);
|
||||
log.info("inside disable poll, value of pollDisabled = " + pollDisabled);
|
||||
}
|
||||
}
|
||||
|
||||
void enablePoll() {
|
||||
if (isSlave) {
|
||||
pollDisabled.set(false);
|
||||
LOG.info("inside enable poll, value of pollDisabled = " + pollDisabled);
|
||||
log.info("inside enable poll, value of pollDisabled = " + pollDisabled);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -828,7 +828,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
try {
|
||||
return core.withSearcher(searcher -> CommitVersionInfo.build(searcher.getIndexReader().getIndexCommit()));
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to get index commit: ", e);
|
||||
log.warn("Unable to get index commit: ", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -925,7 +925,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
NamedList nl = fetcher.getDetails();
|
||||
slave.add("masterDetails", nl.get(CMD_DETAILS));
|
||||
} catch (Exception e) {
|
||||
LOG.warn(
|
||||
log.warn(
|
||||
"Exception while invoking 'details' method for replication on master ",
|
||||
e);
|
||||
slave.add(ERR_STATUS, "invalid_master");
|
||||
|
@ -1033,7 +1033,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
slave.add("timeRemaining", String.valueOf(estimatedTimeRemaining) + "s");
|
||||
slave.add("downloadSpeed", NumberUtils.readableSize(downloadSpeed));
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception while writing replication details: ", e);
|
||||
log.error("Exception while writing replication details: ", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1141,22 +1141,22 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
pollIntervalStr = intervalStr;
|
||||
pollIntervalNs = readIntervalNs(pollIntervalStr);
|
||||
if (pollIntervalNs == null || pollIntervalNs <= 0) {
|
||||
LOG.info(" No value set for 'pollInterval'. Timer Task not started.");
|
||||
log.info(" No value set for 'pollInterval'. Timer Task not started.");
|
||||
return;
|
||||
}
|
||||
|
||||
Runnable task = () -> {
|
||||
if (pollDisabled.get()) {
|
||||
LOG.info("Poll disabled");
|
||||
log.info("Poll disabled");
|
||||
return;
|
||||
}
|
||||
try {
|
||||
LOG.debug("Polling for index modifications");
|
||||
log.debug("Polling for index modifications");
|
||||
markScheduledExecutionStart();
|
||||
IndexFetchResult fetchResult = doFetch(null, false);
|
||||
if (pollListener != null) pollListener.onComplete(core, fetchResult);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception in fetching index", e);
|
||||
log.error("Exception in fetching index", e);
|
||||
}
|
||||
};
|
||||
executorService = Executors.newSingleThreadScheduledExecutor(
|
||||
|
@ -1165,7 +1165,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
long initialDelayNs = new Random().nextLong() % pollIntervalNs
|
||||
+ TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
|
||||
executorService.scheduleAtFixedRate(task, initialDelayNs, pollIntervalNs, TimeUnit.NANOSECONDS);
|
||||
LOG.info("Poll scheduled at an interval of {}ms",
|
||||
log.info("Poll scheduled at an interval of {}ms",
|
||||
TimeUnit.MILLISECONDS.convert(pollIntervalNs, TimeUnit.NANOSECONDS));
|
||||
}
|
||||
|
||||
|
@ -1193,7 +1193,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
|
||||
if (enableMaster || (enableSlave && !currentIndexFetcher.fetchFromLeader)) {
|
||||
if (core.getCoreContainer().getZkController() != null) {
|
||||
LOG.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" +
|
||||
log.warn("SolrCloud is enabled for core " + core.getName() + " but so is old-style replication. Make sure you" +
|
||||
" intend this behavior, it usually indicates a mis-configuration. Master setting is " +
|
||||
Boolean.toString(enableMaster) + " and slave setting is " + Boolean.toString(enableSlave));
|
||||
}
|
||||
|
@ -1214,7 +1214,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
// if there is an alias add it or it is null
|
||||
confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
|
||||
}
|
||||
LOG.info("Replication enabled for following config files: " + includeConfFiles);
|
||||
log.info("Replication enabled for following config files: " + includeConfFiles);
|
||||
}
|
||||
List backup = master.getAll("backupAfter");
|
||||
boolean backupOnCommit = backup.contains("commit");
|
||||
|
@ -1238,7 +1238,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
solrPolicy.setMaxOptimizedCommitsToKeep(1);
|
||||
}
|
||||
} else {
|
||||
LOG.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
|
||||
log.warn("Replication can't call setMaxOptimizedCommitsToKeep on " + policy);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1282,7 +1282,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
iw.decref();
|
||||
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to get IndexCommit on startup", e);
|
||||
log.warn("Unable to get IndexCommit on startup", e);
|
||||
} finally {
|
||||
if (s!=null) s.decref();
|
||||
}
|
||||
|
@ -1309,7 +1309,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
}
|
||||
}
|
||||
}
|
||||
LOG.info("Commits will be reserved for " + reserveCommitDuration + "ms.");
|
||||
log.info("Commits will be reserved for " + reserveCommitDuration + "ms.");
|
||||
}
|
||||
|
||||
// check master or slave is enabled
|
||||
|
@ -1416,7 +1416,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
snapShooter.validateCreateSnapshot();
|
||||
snapShooter.createSnapAsync(numberToKeep, (nl) -> snapShootDetails = nl);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception while snapshooting", e);
|
||||
log.error("Exception while snapshooting", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1560,7 +1560,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
}
|
||||
fos.write(buf, 0, read);
|
||||
fos.flush();
|
||||
LOG.debug("Wrote {} bytes for file {}", offset + read, fileName);
|
||||
log.debug("Wrote {} bytes for file {}", offset + read, fileName);
|
||||
|
||||
//Pause if necessary
|
||||
maxBytesBeforePause += read;
|
||||
|
@ -1577,7 +1577,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
in.seek(offset);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception while writing response for params: " + params, e);
|
||||
log.warn("Exception while writing response for params: " + params, e);
|
||||
} finally {
|
||||
if (in != null) {
|
||||
in.close();
|
||||
|
@ -1645,7 +1645,7 @@ public class ReplicationHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
writeNothingAndFlush();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Exception while writing response for params: " + params, e);
|
||||
log.warn("Exception while writing response for params: " + params, e);
|
||||
} finally {
|
||||
IOUtils.closeQuietly(inputStream);
|
||||
extendReserveAndReleaseCommitPoint();
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.slf4j.LoggerFactory;
|
|||
* @since solr 1.4
|
||||
*/
|
||||
public class SnapShooter {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private SolrCore solrCore;
|
||||
private String snapshotName = null;
|
||||
private String directoryName = null;
|
||||
|
@ -204,7 +204,7 @@ public class SnapShooter {
|
|||
try {
|
||||
result.accept(createSnapshot(indexCommit));
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception while creating snapshot", e);
|
||||
log.error("Exception while creating snapshot", e);
|
||||
NamedList snapShootDetails = new NamedList<>();
|
||||
snapShootDetails.add("exception", e.getMessage());
|
||||
result.accept(snapShootDetails);
|
||||
|
@ -215,7 +215,7 @@ public class SnapShooter {
|
|||
try {
|
||||
deleteOldBackups(numberToKeep);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to delete old snapshots ", e);
|
||||
log.warn("Unable to delete old snapshots ", e);
|
||||
}
|
||||
}
|
||||
}).start();
|
||||
|
@ -225,7 +225,7 @@ public class SnapShooter {
|
|||
// note: remember to reserve the indexCommit first so it won't get deleted concurrently
|
||||
protected NamedList createSnapshot(final IndexCommit indexCommit) throws Exception {
|
||||
assert indexCommit != null;
|
||||
LOG.info("Creating backup snapshot " + (snapshotName == null ? "<not named>" : snapshotName) + " at " + baseSnapDirPath);
|
||||
log.info("Creating backup snapshot " + (snapshotName == null ? "<not named>" : snapshotName) + " at " + baseSnapDirPath);
|
||||
boolean success = false;
|
||||
try {
|
||||
NamedList<Object> details = new NamedList<>();
|
||||
|
@ -245,7 +245,7 @@ public class SnapShooter {
|
|||
details.add("status", "success");
|
||||
details.add("snapshotCompletedAt", new Date().toString());//bad; should be Instant.now().toString()
|
||||
details.add("snapshotName", snapshotName);
|
||||
LOG.info("Done creating backup snapshot: " + (snapshotName == null ? "<not named>" : snapshotName) +
|
||||
log.info("Done creating backup snapshot: " + (snapshotName == null ? "<not named>" : snapshotName) +
|
||||
" at " + baseSnapDirPath);
|
||||
success = true;
|
||||
return details;
|
||||
|
@ -254,7 +254,7 @@ public class SnapShooter {
|
|||
try {
|
||||
backupRepo.deleteDirectory(snapshotDirPath);
|
||||
} catch (Exception excDuringDelete) {
|
||||
LOG.warn("Failed to delete "+snapshotDirPath+" after snapshot creation failed due to: "+excDuringDelete);
|
||||
log.warn("Failed to delete "+snapshotDirPath+" after snapshot creation failed due to: "+excDuringDelete);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ public class SnapShooter {
|
|||
}
|
||||
|
||||
protected void deleteNamedSnapshot(ReplicationHandler replicationHandler) {
|
||||
LOG.info("Deleting snapshot: " + snapshotName);
|
||||
log.info("Deleting snapshot: " + snapshotName);
|
||||
|
||||
NamedList<Object> details = new NamedList<>();
|
||||
|
||||
|
@ -297,7 +297,7 @@ public class SnapShooter {
|
|||
|
||||
} catch (IOException e) {
|
||||
details.add("status", "Unable to delete snapshot: " + snapshotName);
|
||||
LOG.warn("Unable to delete snapshot: " + snapshotName, e);
|
||||
log.warn("Unable to delete snapshot: " + snapshotName, e);
|
||||
}
|
||||
|
||||
replicationHandler.snapShootDetails = details;
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory;
|
|||
* collection.
|
||||
*/
|
||||
public class AutoscalingHistoryHandler extends RequestHandlerBase implements PermissionNameProvider {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String SYSTEM_COLLECTION_PARAM = "systemCollection";
|
||||
|
||||
|
@ -134,7 +134,7 @@ public class AutoscalingHistoryHandler extends RequestHandlerBase implements Per
|
|||
if ((e instanceof SolrException) && e.getMessage().contains("Collection not found")) {
|
||||
// relatively benign
|
||||
String msg = "Collection " + collection + " does not exist.";
|
||||
LOG.info(msg);
|
||||
log.info(msg);
|
||||
rsp.getValues().add("error", msg);
|
||||
} else {
|
||||
throw e;
|
||||
|
|
|
@ -122,7 +122,7 @@ import org.slf4j.LoggerFactory;
|
|||
public class QueryComponent extends SearchComponent
|
||||
{
|
||||
public static final String COMPONENT_NAME = "query";
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
public void prepare(ResponseBuilder rb) throws IOException
|
||||
|
@ -299,7 +299,7 @@ public class QueryComponent extends SearchComponent
|
|||
@Override
|
||||
public void process(ResponseBuilder rb) throws IOException
|
||||
{
|
||||
LOG.debug("process: {}", rb.req.getParams());
|
||||
log.debug("process: {}", rb.req.getParams());
|
||||
|
||||
SolrQueryRequest req = rb.req;
|
||||
SolrParams params = req.getParams();
|
||||
|
|
|
@ -87,7 +87,7 @@ import org.slf4j.LoggerFactory;
|
|||
* @since solr 1.3
|
||||
*/
|
||||
public class SpellCheckComponent extends SearchComponent implements SolrCoreAware, SpellingParams {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final boolean DEFAULT_ONLY_MORE_POPULAR = false;
|
||||
|
||||
|
@ -258,10 +258,10 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
}
|
||||
}
|
||||
} catch (IOException e){
|
||||
LOG.error(e.toString());
|
||||
log.error(e.toString());
|
||||
return null;
|
||||
} catch (SyntaxError e) {
|
||||
LOG.error(e.toString());
|
||||
log.error(e.toString());
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
|
||||
"Unable to read spelling info for shard: " + srsp.getShard(), e);
|
||||
}
|
||||
LOG.info(srsp.getShard() + " " + nl);
|
||||
log.info(srsp.getShard() + " " + nl);
|
||||
if (nl != null) {
|
||||
mergeData.totalNumberShardResponses++;
|
||||
collectShardSuggestions(nl, mergeData);
|
||||
|
@ -704,7 +704,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
@Override
|
||||
public void inform(SolrCore core) {
|
||||
if (initParams != null) {
|
||||
LOG.info("Initializing spell checkers");
|
||||
log.info("Initializing spell checkers");
|
||||
boolean hasDefault = false;
|
||||
for (int i = 0; i < initParams.size(); i++) {
|
||||
if (initParams.getName(i).equals("spellchecker")) {
|
||||
|
@ -728,7 +728,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
|
||||
//ensure that there is at least one query converter defined
|
||||
if (queryConverters.size() == 0) {
|
||||
LOG.trace("No queryConverter defined, using default converter");
|
||||
log.trace("No queryConverter defined, using default converter");
|
||||
queryConverters.put("queryConverter", new SpellingQueryConverter());
|
||||
}
|
||||
|
||||
|
@ -778,7 +778,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
boolean buildOnCommit = Boolean.parseBoolean((String) spellchecker.get("buildOnCommit"));
|
||||
boolean buildOnOptimize = Boolean.parseBoolean((String) spellchecker.get("buildOnOptimize"));
|
||||
if (buildOnCommit || buildOnOptimize) {
|
||||
LOG.info("Registering newSearcher listener for spellchecker: " + checker.getDictionaryName());
|
||||
log.info("Registering newSearcher listener for spellchecker: " + checker.getDictionaryName());
|
||||
core.registerNewSearcherListener(new SpellCheckerListener(core, checker, buildOnCommit, buildOnOptimize));
|
||||
}
|
||||
} else {
|
||||
|
@ -810,11 +810,11 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
if (currentSearcher == null) {
|
||||
// firstSearcher event
|
||||
try {
|
||||
LOG.info("Loading spell index for spellchecker: "
|
||||
log.info("Loading spell index for spellchecker: "
|
||||
+ checker.getDictionaryName());
|
||||
checker.reload(core, newSearcher);
|
||||
} catch (IOException e) {
|
||||
LOG.error( "Exception in reloading spell check index for spellchecker: " + checker.getDictionaryName(), e);
|
||||
log.error( "Exception in reloading spell check index for spellchecker: " + checker.getDictionaryName(), e);
|
||||
}
|
||||
} else {
|
||||
// newSearcher event
|
||||
|
@ -824,7 +824,7 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
if (newSearcher.getIndexReader().leaves().size() == 1) {
|
||||
buildSpellIndex(newSearcher);
|
||||
} else {
|
||||
LOG.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName());
|
||||
log.info("Index is not optimized therefore skipping building spell check index for: " + checker.getDictionaryName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -833,10 +833,10 @@ public class SpellCheckComponent extends SearchComponent implements SolrCoreAwar
|
|||
|
||||
private void buildSpellIndex(SolrIndexSearcher newSearcher) {
|
||||
try {
|
||||
LOG.info("Building spell index for spell checker: " + checker.getDictionaryName());
|
||||
log.info("Building spell index for spell checker: " + checker.getDictionaryName());
|
||||
checker.build(core, newSearcher);
|
||||
} catch (Exception e) {
|
||||
LOG.error(
|
||||
log.error(
|
||||
"Exception in building spell check index for spellchecker: " + checker.getDictionaryName(), e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,7 +65,7 @@ import org.slf4j.LoggerFactory;
|
|||
* and for initializing them as specified by SolrConfig
|
||||
*/
|
||||
public class SuggestComponent extends SearchComponent implements SolrCoreAware, SuggesterParams, Accountable, SolrMetricProducer {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/** Name used to identify whether the user query concerns this component */
|
||||
public static final String COMPONENT_NAME = "suggest";
|
||||
|
@ -116,7 +116,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public void inform(SolrCore core) {
|
||||
if (initParams != null) {
|
||||
LOG.info("Initializing SuggestComponent");
|
||||
log.info("Initializing SuggestComponent");
|
||||
boolean hasDefault = false;
|
||||
for (int i = 0; i < initParams.size(); i++) {
|
||||
if (initParams.getName(i).equals(CONFIG_PARAM_LABEL)) {
|
||||
|
@ -152,7 +152,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
|
||||
if (buildOnCommit || buildOnOptimize || buildOnStartup) {
|
||||
SuggesterListener listener = new SuggesterListener(core, suggester, buildOnCommit, buildOnOptimize, buildOnStartup, core.isReloaded());
|
||||
LOG.info("Registering searcher listener for suggester: " + suggester.getName() + " - " + listener);
|
||||
log.info("Registering searcher listener for suggester: " + suggester.getName() + " - " + listener);
|
||||
core.registerFirstSearcherListener(listener);
|
||||
core.registerNewSearcherListener(listener);
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public void prepare(ResponseBuilder rb) throws IOException {
|
||||
SolrParams params = rb.req.getParams();
|
||||
LOG.info("SuggestComponent prepare with : " + params);
|
||||
log.info("SuggestComponent prepare with : " + params);
|
||||
if (!params.getBool(COMPONENT_NAME, false)) {
|
||||
return;
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public int distributedProcess(ResponseBuilder rb) {
|
||||
SolrParams params = rb.req.getParams();
|
||||
LOG.info("SuggestComponent distributedProcess with : " + params);
|
||||
log.info("SuggestComponent distributedProcess with : " + params);
|
||||
if (rb.stage < ResponseBuilder.STAGE_EXECUTE_QUERY)
|
||||
return ResponseBuilder.STAGE_EXECUTE_QUERY;
|
||||
if (rb.stage == ResponseBuilder.STAGE_EXECUTE_QUERY) {
|
||||
|
@ -219,7 +219,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public void process(ResponseBuilder rb) throws IOException {
|
||||
SolrParams params = rb.req.getParams();
|
||||
LOG.info("SuggestComponent process with : " + params);
|
||||
log.info("SuggestComponent process with : " + params);
|
||||
if (!params.getBool(COMPONENT_NAME, false) || suggesters.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@Override
|
||||
public void finishStage(ResponseBuilder rb) {
|
||||
SolrParams params = rb.req.getParams();
|
||||
LOG.info("SuggestComponent finishStage with : " + params);
|
||||
log.info("SuggestComponent finishStage with : " + params);
|
||||
if (!params.getBool(COMPONENT_NAME, false) || rb.stage != ResponseBuilder.STAGE_GET_FIELDS)
|
||||
return;
|
||||
int count = params.getInt(SUGGEST_COUNT, 1);
|
||||
|
@ -289,7 +289,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
@SuppressWarnings("unchecked")
|
||||
Map<String, SimpleOrderedMap<NamedList<Object>>> namedList =
|
||||
(Map<String, SimpleOrderedMap<NamedList<Object>>>) resp.get(SuggesterResultLabels.SUGGEST);
|
||||
LOG.info(srsp.getShard() + " : " + namedList);
|
||||
log.info(srsp.getShard() + " : " + namedList);
|
||||
suggesterResults.add(toSuggesterResult(namedList));
|
||||
}
|
||||
}
|
||||
|
@ -508,20 +508,20 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
SolrIndexSearcher currentSearcher) {
|
||||
long thisCallCount = callCount.incrementAndGet();
|
||||
if (isCoreReload && thisCallCount == 1) {
|
||||
LOG.info("Skipping first newSearcher call for suggester " + suggester + " in core reload");
|
||||
log.info("Skipping first newSearcher call for suggester " + suggester + " in core reload");
|
||||
return;
|
||||
} else if (thisCallCount == 1 || (isCoreReload && thisCallCount == 2)) {
|
||||
if (buildOnStartup) {
|
||||
LOG.info("buildOnStartup: " + suggester.getName());
|
||||
log.info("buildOnStartup: " + suggester.getName());
|
||||
buildSuggesterIndex(newSearcher);
|
||||
}
|
||||
} else {
|
||||
if (buildOnCommit) {
|
||||
LOG.info("buildOnCommit: " + suggester.getName());
|
||||
log.info("buildOnCommit: " + suggester.getName());
|
||||
buildSuggesterIndex(newSearcher);
|
||||
} else if (buildOnOptimize) {
|
||||
if (newSearcher.getIndexReader().leaves().size() == 1) {
|
||||
LOG.info("buildOnOptimize: " + suggester.getName());
|
||||
log.info("buildOnOptimize: " + suggester.getName());
|
||||
buildSuggesterIndex(newSearcher);
|
||||
}
|
||||
}
|
||||
|
@ -533,7 +533,7 @@ public class SuggestComponent extends SearchComponent implements SolrCoreAware,
|
|||
try {
|
||||
suggester.build(core, newSearcher);
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception in building suggester index for: " + suggester.getName(), e);
|
||||
log.error("Exception in building suggester index for: " + suggester.getName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
|
|||
* {@link org.apache.solr.metrics.SolrMetricReporter}.
|
||||
*/
|
||||
public class ReporterClientCache<T> implements Closeable {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final Map<String, T> cache = new ConcurrentHashMap<>();
|
||||
|
||||
|
@ -59,7 +59,7 @@ public class ReporterClientCache<T> implements Closeable {
|
|||
item = clientProvider.get();
|
||||
cache.put(id, item);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Error providing a new client for id=" + id, e);
|
||||
log.warn("Error providing a new client for id=" + id, e);
|
||||
item = null;
|
||||
}
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ public class ReporterClientCache<T> implements Closeable {
|
|||
try {
|
||||
((Closeable)client).close();
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Error closing client " + client + ", ignoring...", e);
|
||||
log.warn("Error closing client " + client + ", ignoring...", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ import org.slf4j.LoggerFactory;
|
|||
* </ul>
|
||||
*/
|
||||
public class JmxMetricsReporter implements Reporter, Closeable {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String INSTANCE_TAG = "_instanceTag";
|
||||
|
||||
|
@ -520,11 +520,11 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
private void registerMBean(Object mBean, ObjectName objectName) throws InstanceAlreadyExistsException, JMException {
|
||||
// remove previous bean if exists
|
||||
if (mBeanServer.isRegistered(objectName)) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
if (log.isDebugEnabled()) {
|
||||
Set<ObjectInstance> objects = mBeanServer.queryMBeans(objectName, null);
|
||||
LOG.debug("## removing existing " + objects.size() + " bean(s) for " + objectName.getCanonicalName() + ", current tag=" + tag + ":");
|
||||
log.debug("## removing existing " + objects.size() + " bean(s) for " + objectName.getCanonicalName() + ", current tag=" + tag + ":");
|
||||
for (ObjectInstance inst : objects) {
|
||||
LOG.debug("## - tag=" + mBeanServer.getAttribute(inst.getObjectName(), INSTANCE_TAG));
|
||||
log.debug("## - tag=" + mBeanServer.getAttribute(inst.getObjectName(), INSTANCE_TAG));
|
||||
}
|
||||
}
|
||||
mBeanServer.unregisterMBean(objectName);
|
||||
|
@ -538,7 +538,7 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
} else {
|
||||
registered.put(objectName, objectName);
|
||||
}
|
||||
LOG.debug("## registered " + objectInstance.getObjectName().getCanonicalName() + ", tag=" + tag);
|
||||
log.debug("## registered " + objectInstance.getObjectName().getCanonicalName() + ", tag=" + tag);
|
||||
}
|
||||
|
||||
private void unregisterMBean(ObjectName originalObjectName) throws InstanceNotFoundException, MBeanRegistrationException {
|
||||
|
@ -548,7 +548,7 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
}
|
||||
Set<ObjectInstance> objects = mBeanServer.queryMBeans(objectName, exp);
|
||||
for (ObjectInstance o : objects) {
|
||||
LOG.debug("## Unregistered " + o.getObjectName().getCanonicalName() + ", tag=" + tag);
|
||||
log.debug("## Unregistered " + o.getObjectName().getCanonicalName() + ", tag=" + tag);
|
||||
mBeanServer.unregisterMBean(o.getObjectName());
|
||||
}
|
||||
}
|
||||
|
@ -566,9 +566,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
}
|
||||
}
|
||||
} catch (InstanceAlreadyExistsException e) {
|
||||
LOG.debug("Unable to register gauge", e);
|
||||
log.debug("Unable to register gauge", e);
|
||||
} catch (JMException e) {
|
||||
LOG.warn("Unable to register gauge", e);
|
||||
log.warn("Unable to register gauge", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -578,9 +578,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
final ObjectName objectName = createName("gauges", name);
|
||||
unregisterMBean(objectName);
|
||||
} catch (InstanceNotFoundException e) {
|
||||
LOG.debug("Unable to unregister gauge", e);
|
||||
log.debug("Unable to unregister gauge", e);
|
||||
} catch (MBeanRegistrationException e) {
|
||||
LOG.warn("Unable to unregister gauge", e);
|
||||
log.warn("Unable to unregister gauge", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -592,9 +592,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
registerMBean(new JmxCounter(counter, objectName, tag), objectName);
|
||||
}
|
||||
} catch (InstanceAlreadyExistsException e) {
|
||||
LOG.debug("Unable to register counter", e);
|
||||
log.debug("Unable to register counter", e);
|
||||
} catch (JMException e) {
|
||||
LOG.warn("Unable to register counter", e);
|
||||
log.warn("Unable to register counter", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -604,9 +604,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
final ObjectName objectName = createName("counters", name);
|
||||
unregisterMBean(objectName);
|
||||
} catch (InstanceNotFoundException e) {
|
||||
LOG.debug("Unable to unregister counter", e);
|
||||
log.debug("Unable to unregister counter", e);
|
||||
} catch (MBeanRegistrationException e) {
|
||||
LOG.warn("Unable to unregister counter", e);
|
||||
log.warn("Unable to unregister counter", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -618,9 +618,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
registerMBean(new JmxHistogram(histogram, objectName, tag), objectName);
|
||||
}
|
||||
} catch (InstanceAlreadyExistsException e) {
|
||||
LOG.debug("Unable to register histogram", e);
|
||||
log.debug("Unable to register histogram", e);
|
||||
} catch (JMException e) {
|
||||
LOG.warn("Unable to register histogram", e);
|
||||
log.warn("Unable to register histogram", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -630,9 +630,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
final ObjectName objectName = createName("histograms", name);
|
||||
unregisterMBean(objectName);
|
||||
} catch (InstanceNotFoundException e) {
|
||||
LOG.debug("Unable to unregister histogram", e);
|
||||
log.debug("Unable to unregister histogram", e);
|
||||
} catch (MBeanRegistrationException e) {
|
||||
LOG.warn("Unable to unregister histogram", e);
|
||||
log.warn("Unable to unregister histogram", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -644,9 +644,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
registerMBean(new JmxMeter(meter, objectName, rateUnit, tag), objectName);
|
||||
}
|
||||
} catch (InstanceAlreadyExistsException e) {
|
||||
LOG.debug("Unable to register meter", e);
|
||||
log.debug("Unable to register meter", e);
|
||||
} catch (JMException e) {
|
||||
LOG.warn("Unable to register meter", e);
|
||||
log.warn("Unable to register meter", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -656,9 +656,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
final ObjectName objectName = createName("meters", name);
|
||||
unregisterMBean(objectName);
|
||||
} catch (InstanceNotFoundException e) {
|
||||
LOG.debug("Unable to unregister meter", e);
|
||||
log.debug("Unable to unregister meter", e);
|
||||
} catch (MBeanRegistrationException e) {
|
||||
LOG.warn("Unable to unregister meter", e);
|
||||
log.warn("Unable to unregister meter", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -670,9 +670,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
registerMBean(new JmxTimer(timer, objectName, rateUnit, durationUnit, tag), objectName);
|
||||
}
|
||||
} catch (InstanceAlreadyExistsException e) {
|
||||
LOG.debug("Unable to register timer", e);
|
||||
log.debug("Unable to register timer", e);
|
||||
} catch (JMException e) {
|
||||
LOG.warn("Unable to register timer", e);
|
||||
log.warn("Unable to register timer", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -682,9 +682,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
final ObjectName objectName = createName("timers", name);
|
||||
unregisterMBean(objectName);
|
||||
} catch (InstanceNotFoundException e) {
|
||||
LOG.debug("Unable to unregister timer", e);
|
||||
log.debug("Unable to unregister timer", e);
|
||||
} catch (MBeanRegistrationException e) {
|
||||
LOG.warn("Unable to unregister timer", e);
|
||||
log.warn("Unable to unregister timer", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -697,9 +697,9 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
try {
|
||||
unregisterMBean(name);
|
||||
} catch (InstanceNotFoundException e) {
|
||||
LOG.debug("Unable to unregister metric", e);
|
||||
log.debug("Unable to unregister metric", e);
|
||||
} catch (MBeanRegistrationException e) {
|
||||
LOG.warn("Unable to unregister metric", e);
|
||||
log.warn("Unable to unregister metric", e);
|
||||
}
|
||||
}
|
||||
registered.clear();
|
||||
|
@ -737,7 +737,7 @@ public class JmxMetricsReporter implements Reporter, Closeable {
|
|||
} else if (v instanceof Gauge) {
|
||||
listener.onGaugeAdded(k, (Gauge)v);
|
||||
} else {
|
||||
LOG.warn("Unknown metric type " + v.getClass().getName() + " for metric '" + k + "', ignoring");
|
||||
log.warn("Unknown metric type " + v.getClass().getName() + " for metric '" + k + "', ignoring");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ public class SolrShardReporter extends SolrCoreReporter {
|
|||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final List<String> DEFAULT_FILTERS = new ArrayList(){{
|
||||
add("TLOG.*");
|
||||
add("Tlog.*");
|
||||
add("CORE\\.fs.*");
|
||||
add("REPLICATION.*");
|
||||
add("INDEX\\.flush.*");
|
||||
|
|
|
@ -107,7 +107,7 @@ import static org.apache.solr.common.params.CommonParams.SORT;
|
|||
* to leverage any of its functionality.
|
||||
*/
|
||||
public class SimpleFacets {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/** The main set of documents all facet counts should be relative to */
|
||||
protected DocSet docsOrig;
|
||||
|
@ -518,7 +518,7 @@ public class SimpleFacets {
|
|||
if (ft.isPointField() && mincount <= 0) { // default is mincount=0. See SOLR-10033 & SOLR-11174.
|
||||
String warningMessage
|
||||
= "Raising facet.mincount from " + mincount + " to 1, because field " + field + " is Points-based.";
|
||||
LOG.warn(warningMessage);
|
||||
log.warn(warningMessage);
|
||||
List<String> warnings = (List<String>)rb.rsp.getResponseHeader().get("warnings");
|
||||
if (null == warnings) {
|
||||
warnings = new ArrayList<>();
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
|
||||
public class BinaryResponseWriter implements BinaryQueryResponseWriter {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse response) throws IOException {
|
||||
|
@ -103,7 +103,7 @@ public class BinaryResponseWriter implements BinaryQueryResponseWriter {
|
|||
try {
|
||||
o = DocsStreamer.getValue(sf, f);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Error reading a field : " + o, e);
|
||||
log.warn("Error reading a field : " + o, e);
|
||||
}
|
||||
}
|
||||
return o;
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class JsonPreAnalyzedParser implements PreAnalyzedParser {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String VERSION = "1";
|
||||
|
||||
|
@ -132,7 +132,7 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
|
|||
try {
|
||||
tokenStart = Integer.parseInt(String.valueOf(obj));
|
||||
} catch (NumberFormatException nfe) {
|
||||
LOG.warn("Invalid " + OFFSET_START_KEY + " attribute, skipped: '" + obj + "'");
|
||||
log.warn("Invalid " + OFFSET_START_KEY + " attribute, skipped: '" + obj + "'");
|
||||
hasOffsetStart = false;
|
||||
}
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
|
|||
try {
|
||||
tokenEnd = Integer.parseInt(String.valueOf(obj));
|
||||
} catch (NumberFormatException nfe) {
|
||||
LOG.warn("Invalid " + OFFSET_END_KEY + " attribute, skipped: '" + obj + "'");
|
||||
log.warn("Invalid " + OFFSET_END_KEY + " attribute, skipped: '" + obj + "'");
|
||||
hasOffsetEnd = false;
|
||||
}
|
||||
}
|
||||
|
@ -158,7 +158,7 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
|
|||
try {
|
||||
posIncr = Integer.parseInt(String.valueOf(obj));
|
||||
} catch (NumberFormatException nfe) {
|
||||
LOG.warn("Invalid " + POSINCR_KEY + " attribute, skipped: '" + obj + "'");
|
||||
log.warn("Invalid " + POSINCR_KEY + " attribute, skipped: '" + obj + "'");
|
||||
}
|
||||
}
|
||||
PositionIncrementAttribute patt = parent.addAttribute(PositionIncrementAttribute.class);
|
||||
|
@ -178,13 +178,13 @@ public class JsonPreAnalyzedParser implements PreAnalyzedParser {
|
|||
FlagsAttribute flags = parent.addAttribute(FlagsAttribute.class);
|
||||
flags.setFlags(f);
|
||||
} catch (NumberFormatException nfe) {
|
||||
LOG.warn("Invalid " + FLAGS_KEY + " attribute, skipped: '" + e.getValue() + "'");
|
||||
log.warn("Invalid " + FLAGS_KEY + " attribute, skipped: '" + e.getValue() + "'");
|
||||
}
|
||||
} else if (key.equals(TYPE_KEY)) {
|
||||
TypeAttribute tattr = parent.addAttribute(TypeAttribute.class);
|
||||
tattr.setType(String.valueOf(e.getValue()));
|
||||
} else {
|
||||
LOG.warn("Unknown attribute, skipped: " + e.getKey() + "=" + e.getValue());
|
||||
log.warn("Unknown attribute, skipped: " + e.getKey() + "=" + e.getValue());
|
||||
}
|
||||
}
|
||||
// handle offset attr
|
||||
|
|
|
@ -52,7 +52,7 @@ import static org.apache.solr.common.params.CommonParams.JSON;
|
|||
* optionally with an independent stored value of a field.
|
||||
*/
|
||||
public class PreAnalyzedField extends TextField implements HasImplicitIndexAnalyzer {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/** Init argument name. Value is a fully-qualified class name of the parser
|
||||
* that implements {@link PreAnalyzedParser}.
|
||||
|
@ -83,7 +83,7 @@ public class PreAnalyzedField extends TextField implements HasImplicitIndexAnaly
|
|||
Constructor<?> c = implClazz.getConstructor(new Class<?>[0]);
|
||||
parser = (PreAnalyzedParser) c.newInstance(new Object[0]);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Can't use the configured PreAnalyzedParser class '" + implName +
|
||||
log.warn("Can't use the configured PreAnalyzedParser class '" + implName +
|
||||
"', using default " + DEFAULT_IMPL, e);
|
||||
parser = new JsonPreAnalyzedParser();
|
||||
}
|
||||
|
@ -124,7 +124,7 @@ public class PreAnalyzedField extends TextField implements HasImplicitIndexAnaly
|
|||
try {
|
||||
f = fromString(field, String.valueOf(value));
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Error parsing pre-analyzed field '" + field.getName() + "'", e);
|
||||
log.warn("Error parsing pre-analyzed field '" + field.getName() + "'", e);
|
||||
return null;
|
||||
}
|
||||
return f;
|
||||
|
@ -168,8 +168,8 @@ public class PreAnalyzedField extends TextField implements HasImplicitIndexAnaly
|
|||
*/
|
||||
public static org.apache.lucene.document.FieldType createFieldType(SchemaField field) {
|
||||
if (!field.indexed() && !field.stored()) {
|
||||
if (LOG.isTraceEnabled())
|
||||
LOG.trace("Ignoring unindexed/unstored field: " + field);
|
||||
if (log.isTraceEnabled())
|
||||
log.trace("Ignoring unindexed/unstored field: " + field);
|
||||
return null;
|
||||
}
|
||||
org.apache.lucene.document.FieldType newType = new org.apache.lucene.document.FieldType();
|
||||
|
|
|
@ -79,7 +79,7 @@ public class SurroundQParserPlugin extends QParserPlugin {
|
|||
try {
|
||||
this.maxBasicQueries = Integer.parseInt(mbqparam);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Couldn't parse maxBasicQueries value " + mbqparam +", using default of 1000");
|
||||
log.warn("Couldn't parse maxBasicQueries value " + mbqparam +", using default of 1000");
|
||||
this.maxBasicQueries = DEFMAXBASICQUERIES;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
|
||||
public class ExactSharedStatsCache extends ExactStatsCache {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
// local stats obtained from shard servers
|
||||
private final Map<String,Map<String,TermStats>> perShardTermStats = new ConcurrentHashMap<>();
|
||||
|
@ -40,7 +40,7 @@ public class ExactSharedStatsCache extends ExactStatsCache {
|
|||
|
||||
@Override
|
||||
public StatsSource get(SolrQueryRequest req) {
|
||||
LOG.debug("total={}, cache {}", currentGlobalColStats, currentGlobalTermStats.size());
|
||||
log.debug("total={}, cache {}", currentGlobalColStats, currentGlobalTermStats.size());
|
||||
return new ExactStatsSource(currentGlobalTermStats, currentGlobalColStats);
|
||||
}
|
||||
|
||||
|
@ -55,7 +55,7 @@ public class ExactSharedStatsCache extends ExactStatsCache {
|
|||
|
||||
@Override
|
||||
protected void printStats(SolrQueryRequest req) {
|
||||
LOG.debug("perShardColStats={}, perShardTermStats={}", perShardColStats, perShardTermStats);
|
||||
log.debug("perShardColStats={}, perShardTermStats={}", perShardColStats, perShardTermStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -55,7 +55,7 @@ import org.slf4j.LoggerFactory;
|
|||
* query terms (and collection statistics for term fields).
|
||||
*/
|
||||
public class ExactStatsCache extends StatsCache {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
// experimenting with strategy that takes more RAM, but also doesn't share memory
|
||||
// across threads
|
||||
|
@ -74,7 +74,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
if (currentGlobalTermStats == null) {
|
||||
currentGlobalTermStats = Collections.emptyMap();
|
||||
}
|
||||
LOG.debug("Returning StatsSource. Collection stats={}, Term stats size= {}", currentGlobalColStats, currentGlobalTermStats.size());
|
||||
log.debug("Returning StatsSource. Collection stats={}, Term stats size= {}", currentGlobalColStats, currentGlobalTermStats.size());
|
||||
return new ExactStatsSource(currentGlobalTermStats, currentGlobalColStats);
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
public void mergeToGlobalStats(SolrQueryRequest req, List<ShardResponse> responses) {
|
||||
Set<Object> allTerms = new HashSet<>();
|
||||
for (ShardResponse r : responses) {
|
||||
LOG.debug("Merging to global stats, shard={}, response={}", r.getShard(), r.getSolrResponse().getResponse());
|
||||
log.debug("Merging to global stats, shard={}, response={}", r.getShard(), r.getSolrResponse().getResponse());
|
||||
String shard = r.getShard();
|
||||
SolrResponse res = r.getSolrResponse();
|
||||
NamedList<Object> nl = res.getResponse();
|
||||
|
@ -116,7 +116,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
if (allTerms.size() > 0) {
|
||||
req.getContext().put(TERMS_KEY, Lists.newArrayList(allTerms));
|
||||
}
|
||||
if (LOG.isDebugEnabled()) printStats(req);
|
||||
if (log.isDebugEnabled()) printStats(req);
|
||||
}
|
||||
|
||||
protected void addToPerShardColStats(SolrQueryRequest req, String shard, Map<String,CollectionStats> colStats) {
|
||||
|
@ -137,7 +137,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
if (perShardColStats == null) {
|
||||
perShardColStats = Collections.emptyMap();
|
||||
}
|
||||
LOG.debug("perShardColStats={}, perShardTermStats={}", perShardColStats, perShardTermStats);
|
||||
log.debug("perShardColStats={}, perShardTermStats={}", perShardColStats, perShardTermStats);
|
||||
}
|
||||
|
||||
protected void addToPerShardTermStats(SolrQueryRequest req, String shard, String termStatsString) {
|
||||
|
@ -182,19 +182,19 @@ public class ExactStatsCache extends StatsCache {
|
|||
if (statsMap.size() != 0) { //Don't add empty keys
|
||||
String termStatsString = StatsUtil.termStatsMapToString(statsMap);
|
||||
rb.rsp.add(TERM_STATS_KEY, termStatsString);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("termStats={}, terms={}, numDocs={}", termStatsString, terms, searcher.maxDoc());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("termStats={}, terms={}, numDocs={}", termStatsString, terms, searcher.maxDoc());
|
||||
}
|
||||
}
|
||||
if (colMap.size() != 0){
|
||||
String colStatsString = StatsUtil.colStatsMapToString(colMap);
|
||||
rb.rsp.add(COL_STATS_KEY, colStatsString);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("collectionStats={}, terms={}, numDocs={}", colStatsString, terms, searcher.maxDoc());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("collectionStats={}, terms={}, numDocs={}", colStatsString, terms, searcher.maxDoc());
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
LOG.error("Error collecting local stats, query='" + q.toString() + "'", e);
|
||||
log.error("Error collecting local stats, query='" + q.toString() + "'", e);
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Error collecting local stats.", e);
|
||||
}
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
g.add(termStats);
|
||||
}
|
||||
}
|
||||
LOG.debug("terms={}, termStats={}", terms, globalTermStats);
|
||||
log.debug("terms={}, termStats={}", terms, globalTermStats);
|
||||
// need global TermStats here...
|
||||
params.add(TERM_STATS_KEY, StatsUtil.termStatsMapToString(globalTermStats));
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
}
|
||||
}
|
||||
}
|
||||
LOG.debug("Global collection stats={}", globalColStats);
|
||||
log.debug("Global collection stats={}", globalColStats);
|
||||
if (globalTermStats == null) return;
|
||||
Map<String,TermStats> termStats = StatsUtil.termStatsMapFromString(globalTermStats);
|
||||
if (termStats != null) {
|
||||
|
@ -329,7 +329,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
// see returnLocalStats, if docFreq == 0, they are not added anyway
|
||||
// Not sure we need a warning here
|
||||
if (termStats == null) {
|
||||
LOG.debug("Missing global termStats info for term={}, using local stats", term);
|
||||
log.debug("Missing global termStats info for term={}, using local stats", term);
|
||||
return localSearcher.localTermStatistics(term, context);
|
||||
} else {
|
||||
return termStats.toTermStatistics();
|
||||
|
@ -341,7 +341,7 @@ public class ExactStatsCache extends StatsCache {
|
|||
throws IOException {
|
||||
CollectionStats colStats = colStatsCache.get(field);
|
||||
if (colStats == null) {
|
||||
LOG.debug("Missing global colStats info for field={}, using local", field);
|
||||
log.debug("Missing global colStats info for field={}, using local", field);
|
||||
return localSearcher.localCollectionStatistics(field);
|
||||
} else {
|
||||
return colStats.toCollectionStatistics();
|
||||
|
|
|
@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory;
|
|||
* that is updated with the global statistics on every request.
|
||||
*/
|
||||
public class LRUStatsCache extends ExactStatsCache {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
// local stats obtained from shard servers
|
||||
private final Map<String,SolrCache<String,TermStats>> perShardTermStats = new ConcurrentHashMap<>();
|
||||
|
@ -65,7 +65,7 @@ public class LRUStatsCache extends ExactStatsCache {
|
|||
|
||||
@Override
|
||||
public StatsSource get(SolrQueryRequest req) {
|
||||
LOG.debug("## GET total={}, cache {}", currentGlobalColStats , currentGlobalTermStats.size());
|
||||
log.debug("## GET total={}, cache {}", currentGlobalColStats , currentGlobalTermStats.size());
|
||||
return new LRUStatsSource(currentGlobalTermStats, currentGlobalColStats);
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ public class LRUStatsCache extends ExactStatsCache {
|
|||
|
||||
@Override
|
||||
protected void printStats(SolrQueryRequest req) {
|
||||
LOG.debug("## MERGED: perShardColStats={}, perShardTermStats={}", perShardColStats, perShardTermStats);
|
||||
log.debug("## MERGED: perShardColStats={}, perShardTermStats={}", perShardColStats, perShardTermStats);
|
||||
}
|
||||
|
||||
static class LRUStatsSource extends StatsSource {
|
||||
|
@ -136,7 +136,7 @@ public class LRUStatsCache extends ExactStatsCache {
|
|||
throws IOException {
|
||||
TermStats termStats = termStatsCache.get(term.toString());
|
||||
if (termStats == null) {
|
||||
LOG.debug("## Missing global termStats info: {}, using local", term);
|
||||
log.debug("## Missing global termStats info: {}, using local", term);
|
||||
return localSearcher.localTermStatistics(term, context);
|
||||
} else {
|
||||
return termStats.toTermStatistics();
|
||||
|
@ -148,7 +148,7 @@ public class LRUStatsCache extends ExactStatsCache {
|
|||
throws IOException {
|
||||
CollectionStats colStats = colStatsCache.get(field);
|
||||
if (colStats == null) {
|
||||
LOG.debug("## Missing global colStats info: {}, using local", field);
|
||||
log.debug("## Missing global colStats info: {}, using local", field);
|
||||
return localSearcher.localCollectionStatistics(field);
|
||||
} else {
|
||||
return colStats.toCollectionStatistics();
|
||||
|
|
|
@ -34,11 +34,11 @@ import org.slf4j.LoggerFactory;
|
|||
* uses local term statistics.
|
||||
*/
|
||||
public class LocalStatsCache extends StatsCache {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@Override
|
||||
public StatsSource get(SolrQueryRequest req) {
|
||||
LOG.debug("## GET {}", req);
|
||||
log.debug("## GET {}", req);
|
||||
return new LocalStatsSource();
|
||||
}
|
||||
|
||||
|
@ -49,33 +49,33 @@ public class LocalStatsCache extends StatsCache {
|
|||
// by returning null we don't create additional round-trip request.
|
||||
@Override
|
||||
public ShardRequest retrieveStatsRequest(ResponseBuilder rb) {
|
||||
LOG.debug("## RDR {}", rb.req);
|
||||
log.debug("## RDR {}", rb.req);
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mergeToGlobalStats(SolrQueryRequest req,
|
||||
List<ShardResponse> responses) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("## MTGD {}", req);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("## MTGD {}", req);
|
||||
for (ShardResponse r : responses) {
|
||||
LOG.debug(" - {}", r);
|
||||
log.debug(" - {}", r);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void returnLocalStats(ResponseBuilder rb, SolrIndexSearcher searcher) {
|
||||
LOG.debug("## RLD {}", rb.req);
|
||||
log.debug("## RLD {}", rb.req);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void receiveGlobalStats(SolrQueryRequest req) {
|
||||
LOG.debug("## RGD {}", req);
|
||||
log.debug("## RGD {}", req);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sendGlobalStats(ResponseBuilder rb, ShardRequest outgoing) {
|
||||
LOG.debug("## SGD {}", outgoing);
|
||||
log.debug("## SGD {}", outgoing);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class StatsUtil {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/**
|
||||
* Make a String representation of {@link CollectionStats}
|
||||
|
@ -54,12 +54,12 @@ public class StatsUtil {
|
|||
|
||||
private static CollectionStats colStatsFromString(String data) {
|
||||
if (data == null || data.trim().length() == 0) {
|
||||
LOG.warn("Invalid empty collection stats string");
|
||||
log.warn("Invalid empty collection stats string");
|
||||
return null;
|
||||
}
|
||||
String[] vals = data.split(",");
|
||||
if (vals.length != 5) {
|
||||
LOG.warn("Invalid collection stats string, num fields " + vals.length
|
||||
log.warn("Invalid collection stats string, num fields " + vals.length
|
||||
+ " != 5, '" + data + "'");
|
||||
return null;
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ public class StatsUtil {
|
|||
return new CollectionStats(field, maxDoc, docCount, sumTotalTermFreq,
|
||||
sumDocFreq);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Invalid collection stats string '" + data + "': "
|
||||
log.warn("Invalid collection stats string '" + data + "': "
|
||||
+ e.toString());
|
||||
return null;
|
||||
}
|
||||
|
@ -88,12 +88,12 @@ public class StatsUtil {
|
|||
|
||||
private static Term termFromString(String data) {
|
||||
if (data == null || data.trim().length() == 0) {
|
||||
LOG.warn("Invalid empty term value");
|
||||
log.warn("Invalid empty term value");
|
||||
return null;
|
||||
}
|
||||
int idx = data.indexOf(':');
|
||||
if (idx == -1) {
|
||||
LOG.warn("Invalid term data without ':': '" + data + "'");
|
||||
log.warn("Invalid term data without ':': '" + data + "'");
|
||||
return null;
|
||||
}
|
||||
String field = data.substring(0, idx);
|
||||
|
@ -104,7 +104,7 @@ public class StatsUtil {
|
|||
// byte[] bytes = Base64.base64ToByteArray(value);
|
||||
// return new Term(field, new BytesRef(bytes));
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Invalid term value '" + value + "'");
|
||||
log.warn("Invalid term value '" + value + "'");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -123,12 +123,12 @@ public class StatsUtil {
|
|||
|
||||
private static TermStats termStatsFromString(String data, Term t) {
|
||||
if (data == null || data.trim().length() == 0) {
|
||||
LOG.warn("Invalid empty term stats string");
|
||||
log.warn("Invalid empty term stats string");
|
||||
return null;
|
||||
}
|
||||
String[] vals = data.split(",");
|
||||
if (vals.length < 2) {
|
||||
LOG.warn("Invalid term stats string, num fields " + vals.length
|
||||
log.warn("Invalid term stats string, num fields " + vals.length
|
||||
+ " < 2, '" + data + "'");
|
||||
return null;
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ public class StatsUtil {
|
|||
termToUse = t;
|
||||
}
|
||||
if (termToUse == null) {
|
||||
LOG.warn("Missing term in termStats '" + data + "'");
|
||||
log.warn("Missing term in termStats '" + data + "'");
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
|
@ -158,7 +158,7 @@ public class StatsUtil {
|
|||
long totalTermFreq = Long.parseLong(vals[idx]);
|
||||
return new TermStats(termToUse.toString(), docFreq, totalTermFreq);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Invalid termStats string '" + data + "'");
|
||||
log.warn("Invalid termStats string '" + data + "'");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,13 +16,15 @@
|
|||
*/
|
||||
package org.apache.solr.servlet;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
final class CheckLoggingConfiguration {
|
||||
|
||||
static void check() {
|
||||
try {
|
||||
LoggerFactory.getLogger(CheckLoggingConfiguration.class);
|
||||
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
} catch (NoClassDefFoundError e) {
|
||||
throw new NoClassDefFoundError("Failed to initialize Apache Solr: "
|
||||
+"Could not find necessary SLF4j logging jars. If using Jetty, the SLF4j logging jars need to go in "
|
||||
|
|
|
@ -59,7 +59,7 @@ import org.slf4j.LoggerFactory;
|
|||
* @see DirectSpellChecker
|
||||
*/
|
||||
public class DirectSolrSpellChecker extends SolrSpellChecker {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
// configuration params shared with other spellcheckers
|
||||
public static final String COMPARATOR_CLASS = AbstractLuceneSpellChecker.COMPARATOR_CLASS;
|
||||
|
@ -96,7 +96,7 @@ public class DirectSolrSpellChecker extends SolrSpellChecker {
|
|||
|
||||
SolrParams params = config.toSolrParams();
|
||||
|
||||
LOG.info("init: " + config);
|
||||
log.info("init: " + config);
|
||||
String name = super.init(config, core);
|
||||
|
||||
Comparator<SuggestWord> comp = SuggestWordQueue.DEFAULT_COMPARATOR;
|
||||
|
@ -173,7 +173,7 @@ public class DirectSolrSpellChecker extends SolrSpellChecker {
|
|||
@Override
|
||||
public SpellingResult getSuggestions(SpellingOptions options)
|
||||
throws IOException {
|
||||
LOG.debug("getSuggestions: " + options.tokens);
|
||||
log.debug("getSuggestions: " + options.tokens);
|
||||
|
||||
SpellingResult result = new SpellingResult();
|
||||
float accuracy = (options.accuracy == Float.MIN_VALUE) ? checker.getAccuracy() : options.accuracy;
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory;
|
|||
import static org.apache.solr.common.params.CommonParams.ID;
|
||||
|
||||
public class SpellCheckCollator {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private int maxCollations = 1;
|
||||
private int maxCollationTries = 0;
|
||||
private int maxCollationEvaluations = 10000;
|
||||
|
@ -73,7 +73,7 @@ public class SpellCheckCollator {
|
|||
verifyCandidateWithQuery = false;
|
||||
}
|
||||
if (queryComponent == null && verifyCandidateWithQuery) {
|
||||
LOG.info("Could not find an instance of QueryComponent. Disabling collation verification against the index.");
|
||||
log.info("Could not find an instance of QueryComponent. Disabling collation verification against the index.");
|
||||
maxTries = 1;
|
||||
verifyCandidateWithQuery = false;
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ public class SpellCheckCollator {
|
|||
/ (float)etce.getNumberScanned() );
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception trying to re-query to check if a spell check possibility would return any hits.", e);
|
||||
log.warn("Exception trying to re-query to check if a spell check possibility would return any hits.", e);
|
||||
} finally {
|
||||
checkResponse.req.close();
|
||||
}
|
||||
|
@ -193,8 +193,8 @@ public class SpellCheckCollator {
|
|||
collation.setMisspellingsAndCorrections(misspellingsAndCorrections);
|
||||
collations.add(collation);
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Collation: " + collationQueryStr + (verifyCandidateWithQuery ? (" will return " + hits + " hits.") : ""));
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Collation: " + collationQueryStr + (verifyCandidateWithQuery ? (" will return " + hits + " hits.") : ""));
|
||||
}
|
||||
}
|
||||
return collations;
|
||||
|
|
|
@ -58,7 +58,7 @@ import static org.apache.solr.spelling.suggest.fst.AnalyzingInfixLookupFactory.C
|
|||
* {@link Dictionary}
|
||||
* */
|
||||
public class SolrSuggester implements Accountable {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/** Name used when an unnamed suggester config is passed */
|
||||
public static final String DEFAULT_DICT_NAME = "default";
|
||||
|
@ -100,7 +100,7 @@ public class SolrSuggester implements Accountable {
|
|||
* Lucene suggester
|
||||
* */
|
||||
public String init(NamedList<?> config, SolrCore core) {
|
||||
LOG.info("init: " + config);
|
||||
log.info("init: " + config);
|
||||
|
||||
// read the config
|
||||
name = config.get(NAME) != null ? (String) config.get(NAME)
|
||||
|
@ -112,7 +112,7 @@ public class SolrSuggester implements Accountable {
|
|||
|
||||
if (lookupImpl == null) {
|
||||
lookupImpl = LookupFactory.DEFAULT_FILE_BASED_DICT;
|
||||
LOG.info("No " + LOOKUP_IMPL + " parameter was provided falling back to " + lookupImpl);
|
||||
log.info("No " + LOOKUP_IMPL + " parameter was provided falling back to " + lookupImpl);
|
||||
}
|
||||
|
||||
contextFilterQueryAnalyzer = new TokenizerChain(new StandardTokenizerFactory(Collections.EMPTY_MAP), null);
|
||||
|
@ -128,7 +128,7 @@ public class SolrSuggester implements Accountable {
|
|||
try {
|
||||
((Closeable) lookup).close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Could not close the suggester lookup.", e);
|
||||
log.warn("Could not close the suggester lookup.", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -147,13 +147,13 @@ public class SolrSuggester implements Accountable {
|
|||
if (!storeDir.exists()) {
|
||||
storeDir.mkdirs();
|
||||
} else if (getStoreFile().exists()) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("attempt reload of the stored lookup from file " + getStoreFile());
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("attempt reload of the stored lookup from file " + getStoreFile());
|
||||
}
|
||||
try {
|
||||
lookup.load(new FileInputStream(getStoreFile()));
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Loading stored lookup data failed, possibly not cached yet");
|
||||
log.warn("Loading stored lookup data failed, possibly not cached yet");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -162,19 +162,19 @@ public class SolrSuggester implements Accountable {
|
|||
if (dictionaryImpl == null) {
|
||||
dictionaryImpl = (sourceLocation == null) ? DictionaryFactory.DEFAULT_INDEX_BASED_DICT :
|
||||
DictionaryFactory.DEFAULT_FILE_BASED_DICT;
|
||||
LOG.info("No " + DICTIONARY_IMPL + " parameter was provided falling back to " + dictionaryImpl);
|
||||
log.info("No " + DICTIONARY_IMPL + " parameter was provided falling back to " + dictionaryImpl);
|
||||
}
|
||||
|
||||
dictionaryFactory = core.getResourceLoader().newInstance(dictionaryImpl, DictionaryFactory.class);
|
||||
dictionaryFactory.setParams(config);
|
||||
LOG.info("Dictionary loaded with params: " + config);
|
||||
log.info("Dictionary loaded with params: " + config);
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
/** Build the underlying Lucene Suggester */
|
||||
public void build(SolrCore core, SolrIndexSearcher searcher) throws IOException {
|
||||
LOG.info("SolrSuggester.build(" + name + ")");
|
||||
log.info("SolrSuggester.build(" + name + ")");
|
||||
|
||||
dictionary = dictionaryFactory.create(core, searcher);
|
||||
try {
|
||||
|
@ -188,16 +188,16 @@ public class SolrSuggester implements Accountable {
|
|||
if (storeDir != null) {
|
||||
File target = getStoreFile();
|
||||
if(!lookup.store(new FileOutputStream(target))) {
|
||||
LOG.error("Store Lookup build failed");
|
||||
log.error("Store Lookup build failed");
|
||||
} else {
|
||||
LOG.info("Stored suggest data to: " + target.getAbsolutePath());
|
||||
log.info("Stored suggest data to: " + target.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Reloads the underlying Lucene Suggester */
|
||||
public void reload(SolrCore core, SolrIndexSearcher searcher) throws IOException {
|
||||
LOG.info("SolrSuggester.reload(" + name + ")");
|
||||
log.info("SolrSuggester.reload(" + name + ")");
|
||||
if (dictionary == null && storeDir != null) {
|
||||
File lookupFile = getStoreFile();
|
||||
if (lookupFile.exists()) {
|
||||
|
@ -211,7 +211,7 @@ public class SolrSuggester implements Accountable {
|
|||
IOUtils.closeWhileHandlingException(is);
|
||||
}
|
||||
} else {
|
||||
LOG.info("lookup file doesn't exist");
|
||||
log.info("lookup file doesn't exist");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -230,9 +230,9 @@ public class SolrSuggester implements Accountable {
|
|||
|
||||
/** Returns suggestions based on the {@link SuggesterOptions} passed */
|
||||
public SuggesterResult getSuggestions(SuggesterOptions options) throws IOException {
|
||||
LOG.debug("getSuggestions: " + options.token);
|
||||
log.debug("getSuggestions: " + options.token);
|
||||
if (lookup == null) {
|
||||
LOG.info("Lookup is null - invoke suggest.build first");
|
||||
log.info("Lookup is null - invoke suggest.build first");
|
||||
return EMPTY_RESULT;
|
||||
}
|
||||
|
||||
|
@ -247,7 +247,7 @@ public class SolrSuggester implements Accountable {
|
|||
if(suggestions == null){
|
||||
// Context filtering not supported/configured by lookup
|
||||
// Silently ignore filtering and serve a result by querying without context filtering
|
||||
LOG.debug("Context Filtering Query not supported by {}", lookup.getClass());
|
||||
log.debug("Context Filtering Query not supported by {}", lookup.getClass());
|
||||
suggestions = lookup.lookup(options.token, false, options.count);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class Suggester extends SolrSpellChecker {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/** Location of the source data - either a path to a file, or null for the
|
||||
* current IndexReader.
|
||||
|
@ -86,7 +86,7 @@ public class Suggester extends SolrSpellChecker {
|
|||
|
||||
@Override
|
||||
public String init(NamedList config, SolrCore core) {
|
||||
LOG.info("init: " + config);
|
||||
log.info("init: " + config);
|
||||
String name = super.init(config, core);
|
||||
threshold = config.get(THRESHOLD_TOKEN_FREQUENCY) == null ? 0.0f
|
||||
: (Float)config.get(THRESHOLD_TOKEN_FREQUENCY);
|
||||
|
@ -112,7 +112,7 @@ public class Suggester extends SolrSpellChecker {
|
|||
try {
|
||||
((Closeable) lookup).close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Could not close the suggester lookup.", e);
|
||||
log.warn("Could not close the suggester lookup.", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ public class Suggester extends SolrSpellChecker {
|
|||
try {
|
||||
lookup.load(new FileInputStream(new File(storeDir, factory.storeFileName())));
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Loading stored lookup data failed", e);
|
||||
log.warn("Loading stored lookup data failed", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ public class Suggester extends SolrSpellChecker {
|
|||
|
||||
@Override
|
||||
public void build(SolrCore core, SolrIndexSearcher searcher) throws IOException {
|
||||
LOG.info("build()");
|
||||
log.info("build()");
|
||||
if (sourceLocation == null) {
|
||||
reader = searcher.getIndexReader();
|
||||
dictionary = new HighFrequencyDictionary(reader, field, threshold);
|
||||
|
@ -154,7 +154,7 @@ public class Suggester extends SolrSpellChecker {
|
|||
core.getResourceLoader().openResource(sourceLocation), StandardCharsets.UTF_8));
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
// should not happen
|
||||
LOG.error("should not happen", e);
|
||||
log.error("should not happen", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,19 +164,19 @@ public class Suggester extends SolrSpellChecker {
|
|||
if(!lookup.store(new FileOutputStream(target))) {
|
||||
if (sourceLocation == null) {
|
||||
assert reader != null && field != null;
|
||||
LOG.error("Store Lookup build from index on field: " + field + " failed reader has: " + reader.maxDoc() + " docs");
|
||||
log.error("Store Lookup build from index on field: " + field + " failed reader has: " + reader.maxDoc() + " docs");
|
||||
} else {
|
||||
LOG.error("Store Lookup build from sourceloaction: " + sourceLocation + " failed");
|
||||
log.error("Store Lookup build from sourceloaction: " + sourceLocation + " failed");
|
||||
}
|
||||
} else {
|
||||
LOG.info("Stored suggest data to: " + target.getAbsolutePath());
|
||||
log.info("Stored suggest data to: " + target.getAbsolutePath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void reload(SolrCore core, SolrIndexSearcher searcher) throws IOException {
|
||||
LOG.info("reload()");
|
||||
log.info("reload()");
|
||||
if (dictionary == null && storeDir != null) {
|
||||
// this may be a firstSearcher event, try loading it
|
||||
FileInputStream is = new FileInputStream(new File(storeDir, factory.storeFileName()));
|
||||
|
@ -187,7 +187,7 @@ public class Suggester extends SolrSpellChecker {
|
|||
} finally {
|
||||
IOUtils.closeWhileHandlingException(is);
|
||||
}
|
||||
LOG.debug("load failed, need to build Lookup again");
|
||||
log.debug("load failed, need to build Lookup again");
|
||||
}
|
||||
// loading was unsuccessful - build it again
|
||||
build(core, searcher);
|
||||
|
@ -197,9 +197,9 @@ public class Suggester extends SolrSpellChecker {
|
|||
|
||||
@Override
|
||||
public SpellingResult getSuggestions(SpellingOptions options) throws IOException {
|
||||
LOG.debug("getSuggestions: " + options.tokens);
|
||||
log.debug("getSuggestions: " + options.tokens);
|
||||
if (lookup == null) {
|
||||
LOG.info("Lookup is null - invoke spellchecker.build first");
|
||||
log.info("Lookup is null - invoke spellchecker.build first");
|
||||
return EMPTY_RESULT;
|
||||
}
|
||||
SpellingResult res = new SpellingResult();
|
||||
|
|
|
@ -31,12 +31,12 @@ import org.slf4j.LoggerFactory;
|
|||
* <b>Note:</b> This Suggester is not very RAM efficient.
|
||||
*/
|
||||
public class JaspellLookupFactory extends LookupFactory {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final String FILENAME = "jaspell.dat";
|
||||
|
||||
@Override
|
||||
public Lookup create(NamedList params, SolrCore core) {
|
||||
LOG.info("init: " + params);
|
||||
log.info("init: " + params);
|
||||
return new JaspellLookup();
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ import org.slf4j.LoggerFactory;
|
|||
* @lucene.experimental
|
||||
*/
|
||||
public class BlockDirectory extends FilterDirectory implements ShutdownAwareDirectory {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final long BLOCK_SHIFT = Integer.getInteger("solr.hdfs.blockcache.blockshift", 13);
|
||||
|
||||
|
@ -118,11 +118,11 @@ public class BlockDirectory extends FilterDirectory implements ShutdownAwareDire
|
|||
}
|
||||
this.blockCacheReadEnabled = blockCacheReadEnabled;
|
||||
if (!blockCacheReadEnabled) {
|
||||
LOG.info("Block cache on read is disabled");
|
||||
log.info("Block cache on read is disabled");
|
||||
}
|
||||
this.blockCacheWriteEnabled = blockCacheWriteEnabled;
|
||||
if (!blockCacheWriteEnabled) {
|
||||
LOG.info("Block cache on write is disabled");
|
||||
log.info("Block cache on write is disabled");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,7 @@ public class BlockDirectory extends FilterDirectory implements ShutdownAwareDire
|
|||
|
||||
@Override
|
||||
public void closeOnShutdown() throws IOException {
|
||||
LOG.info("BlockDirectory closing on shutdown");
|
||||
log.info("BlockDirectory closing on shutdown");
|
||||
// we are shutting down, no need to clean up cache
|
||||
super.close();
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class HdfsDirectory extends BaseDirectory {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
public static final int DEFAULT_BUFFER_SIZE = 4096;
|
||||
|
||||
private static final String LF_EXT = ".lf";
|
||||
|
@ -69,7 +69,7 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
if (fileSystem instanceof DistributedFileSystem) {
|
||||
// Make sure dfs is not in safe mode
|
||||
while (((DistributedFileSystem) fileSystem).setSafeMode(SafeModeAction.SAFEMODE_GET, true)) {
|
||||
LOG.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
|
||||
log.warn("The NameNode is in SafeMode - Solr will wait 5 seconds and try again.");
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -94,7 +94,7 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
LOG.info("Closing hdfs directory {}", hdfsDirPath);
|
||||
log.info("Closing hdfs directory {}", hdfsDirPath);
|
||||
fileSystem.close();
|
||||
isOpen = false;
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
@Override
|
||||
public void deleteFile(String name) throws IOException {
|
||||
Path path = new Path(hdfsDirPath, name);
|
||||
LOG.debug("Deleting {}", path);
|
||||
log.debug("Deleting {}", path);
|
||||
getFileSystem().delete(path, false);
|
||||
}
|
||||
|
||||
|
@ -197,7 +197,7 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
}
|
||||
|
||||
public static class HdfsIndexInput extends CustomBufferedIndexInput {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final Path path;
|
||||
private final FSDataInputStream inputStream;
|
||||
|
@ -208,7 +208,7 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
int bufferSize) throws IOException {
|
||||
super(name, bufferSize);
|
||||
this.path = path;
|
||||
LOG.debug("Opening normal index input on {}", path);
|
||||
log.debug("Opening normal index input on {}", path);
|
||||
FileStatus fileStatus = fileSystem.getFileStatus(path);
|
||||
length = fileStatus.getLen();
|
||||
inputStream = fileSystem.open(path, bufferSize);
|
||||
|
@ -227,7 +227,7 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
|
||||
@Override
|
||||
protected void closeInternal() throws IOException {
|
||||
LOG.debug("Closing normal index input on {}", path);
|
||||
log.debug("Closing normal index input on {}", path);
|
||||
if (!clone) {
|
||||
inputStream.close();
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ public class HdfsDirectory extends BaseDirectory {
|
|||
|
||||
@Override
|
||||
public void sync(Collection<String> names) throws IOException {
|
||||
LOG.debug("Sync called on {}", Arrays.toString(names.toArray()));
|
||||
log.debug("Sync called on {}", Arrays.toString(names.toArray()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -57,7 +57,7 @@ import org.slf4j.LoggerFactory;
|
|||
* Metrics specific utility functions.
|
||||
*/
|
||||
public class MetricUtils {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String METRIC_NAME = "metric";
|
||||
public static final String VALUE = "value";
|
||||
|
@ -274,7 +274,7 @@ public class MetricUtils {
|
|||
convertGauge(n, gauge, propertyFilter, simple, compact, separator, consumer);
|
||||
} catch (InternalError ie) {
|
||||
if (n.startsWith("memory.") && ie.getMessage().contains("Memory Pool not found")) {
|
||||
LOG.warn("Error converting gauge '" + n + "', possible JDK bug: SOLR-10362", ie);
|
||||
log.warn("Error converting gauge '" + n + "', possible JDK bug: SOLR-10362", ie);
|
||||
consumer.accept(n, null);
|
||||
} else {
|
||||
throw ie;
|
||||
|
@ -577,7 +577,7 @@ public class MetricUtils {
|
|||
try {
|
||||
beanInfo = Introspector.getBeanInfo(intf, intf.getSuperclass(), Introspector.IGNORE_ALL_BEANINFO);
|
||||
} catch (IntrospectionException e) {
|
||||
LOG.warn("Unable to fetch properties of MXBean " + obj.getClass().getName());
|
||||
log.warn("Unable to fetch properties of MXBean " + obj.getClass().getName());
|
||||
return;
|
||||
}
|
||||
for (final PropertyDescriptor desc : beanInfo.getPropertyDescriptors()) {
|
||||
|
|
|
@ -57,7 +57,7 @@ import static org.apache.solr.common.cloud.Replica.State.DOWN;
|
|||
|
||||
public class DeleteReplicaTest extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@BeforeClass
|
||||
public static void setupCluster() throws Exception {
|
||||
|
@ -253,7 +253,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
|
|||
if (times.incrementAndGet() > 1) {
|
||||
return false;
|
||||
}
|
||||
LOG.info("Running delete core {}",cd);
|
||||
log.info("Running delete core {}",cd);
|
||||
|
||||
try {
|
||||
ZkNodeProps m = new ZkNodeProps(
|
||||
|
@ -371,7 +371,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
|
|||
try {
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", String.valueOf(doc++)));
|
||||
} catch (Exception e) {
|
||||
LOG.error("Failed on adding document to {}", collectionName, e);
|
||||
log.error("Failed on adding document to {}", collectionName, e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -389,7 +389,7 @@ public class DeleteReplicaTest extends SolrCloudTestCase {
|
|||
try {
|
||||
cluster.getSolrClient().waitForState(collectionName, 20, TimeUnit.SECONDS, (liveNodes, collectionState) -> collectionState.getReplicas().size() == 1);
|
||||
} catch (TimeoutException e) {
|
||||
LOG.info("Timeout wait for state {}", getCollectionState(collectionName));
|
||||
log.info("Timeout wait for state {}", getCollectionState(collectionName));
|
||||
throw e;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.slf4j.LoggerFactory;
|
|||
@Deprecated
|
||||
public class LIROnShardRestartTest extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
@BeforeClass
|
||||
public static void setupCluster() throws Exception {
|
||||
|
@ -166,7 +166,7 @@ public class LIROnShardRestartTest extends SolrCloudTestCase {
|
|||
} catch (Throwable th) {
|
||||
String electionPath = "/collections/allReplicasInLIR/leader_elect/shard1/election/";
|
||||
List<String> children = zkClient().getChildren(electionPath, null, true);
|
||||
LOG.info("Election queue {}", children);
|
||||
log.info("Election queue {}", children);
|
||||
throw th;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
public class LIRRollingUpdatesTest extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static Map<URI, SocketProxy> proxies;
|
||||
private static Map<URI, JettySolrRunner> jettys;
|
||||
|
@ -79,7 +79,7 @@ public class LIRRollingUpdatesTest extends SolrCloudTestCase {
|
|||
cluster.stopJettySolrRunner(jetty);//TODO: Can we avoid this restart
|
||||
cluster.startJettySolrRunner(jetty);
|
||||
proxy.open(jetty.getBaseUrl().toURI());
|
||||
LOG.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
log.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
proxies.put(proxy.getUrl(), proxy);
|
||||
jettys.put(proxy.getUrl(), jetty);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final int NODE_COUNT = 4;
|
||||
|
||||
private static Map<JettySolrRunner, SocketProxy> proxies;
|
||||
|
@ -70,7 +70,7 @@ public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
|
|||
cluster.stopJettySolrRunner(jetty);//TODO: Can we avoid this restart
|
||||
cluster.startJettySolrRunner(jetty);
|
||||
proxy.open(jetty.getBaseUrl().toURI());
|
||||
LOG.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
log.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
proxies.put(jetty, proxy);
|
||||
jettys.put(proxy.getUrl(), jetty);
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ public class LeaderVoteWaitTimeoutTest extends SolrCloudTestCase {
|
|||
} catch (Exception e) {
|
||||
List<String> children = zkClient().getChildren("/collections/"+collectionName+"/leader_elect/shard1/election",
|
||||
null, true);
|
||||
LOG.info("{} election nodes:{}", collectionName, children);
|
||||
log.info("{} election nodes:{}", collectionName, children);
|
||||
throw e;
|
||||
}
|
||||
cluster.getJettySolrRunner(0).start();
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
public class TestCloudConsistency extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static Map<JettySolrRunner, SocketProxy> proxies;
|
||||
private static Map<URI, JettySolrRunner> jettys;
|
||||
|
@ -70,7 +70,7 @@ public class TestCloudConsistency extends SolrCloudTestCase {
|
|||
cluster.stopJettySolrRunner(jetty);//TODO: Can we avoid this restart
|
||||
cluster.startJettySolrRunner(jetty);
|
||||
proxy.open(jetty.getBaseUrl().toURI());
|
||||
LOG.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
log.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
proxies.put(jetty, proxy);
|
||||
jettys.put(proxy.getUrl(), jetty);
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ import com.carrotsearch.randomizedtesting.annotations.Repeat;
|
|||
@Slow
|
||||
public class TestPullReplica extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private String collectionName = null;
|
||||
private final static int REPLICATION_TIMEOUT_SECS = 10;
|
||||
|
@ -85,7 +85,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
.addConfig("conf", configset("cloud-minimal"))
|
||||
.configure();
|
||||
Boolean useLegacyCloud = rarely();
|
||||
LOG.info("Using legacyCloud?: {}", useLegacyCloud);
|
||||
log.info("Using legacyCloud?: {}", useLegacyCloud);
|
||||
CollectionAdminRequest.ClusterProp clusterPropRequest = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, String.valueOf(useLegacyCloud));
|
||||
CollectionAdminResponse response = clusterPropRequest.process(cluster.getSolrClient());
|
||||
assertEquals(0, response.getStatus());
|
||||
|
@ -107,14 +107,14 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
public void tearDown() throws Exception {
|
||||
for (JettySolrRunner jetty:cluster.getJettySolrRunners()) {
|
||||
if (!jetty.isRunning()) {
|
||||
LOG.warn("Jetty {} not running, probably some bad test. Starting it", jetty.getLocalPort());
|
||||
log.warn("Jetty {} not running, probably some bad test. Starting it", jetty.getLocalPort());
|
||||
ChaosMonkey.start(jetty);
|
||||
}
|
||||
}
|
||||
if (cluster.getSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName) != null) {
|
||||
LOG.info("tearDown deleting collection");
|
||||
log.info("tearDown deleting collection");
|
||||
CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
|
||||
LOG.info("Collection deleted");
|
||||
log.info("Collection deleted");
|
||||
waitForDeletion(collectionName);
|
||||
}
|
||||
super.tearDown();
|
||||
|
@ -321,18 +321,18 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
List<Replica.State> statesSeen = new ArrayList<>(3);
|
||||
cluster.getSolrClient().registerCollectionStateWatcher(collectionName, (liveNodes, collectionState) -> {
|
||||
Replica r = collectionState.getSlice("shard1").getReplica("core_node2");
|
||||
LOG.info("CollectionStateWatcher state change: {}", r);
|
||||
log.info("CollectionStateWatcher state change: {}", r);
|
||||
if (r == null) {
|
||||
return false;
|
||||
}
|
||||
statesSeen.add(r.getState());
|
||||
LOG.info("CollectionStateWatcher saw state: {}", r.getState());
|
||||
log.info("CollectionStateWatcher saw state: {}", r.getState());
|
||||
return r.getState() == Replica.State.ACTIVE;
|
||||
});
|
||||
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1", Replica.Type.PULL).process(cluster.getSolrClient());
|
||||
waitForState("Replica not added", collectionName, activeReplicaCount(1, 0, 1));
|
||||
zkClient().printLayoutToStdOut();
|
||||
LOG.info("Saw states: " + Arrays.toString(statesSeen.toArray()));
|
||||
log.info("Saw states: " + Arrays.toString(statesSeen.toArray()));
|
||||
assertEquals("Expecting DOWN->RECOVERING->ACTIVE but saw: " + Arrays.toString(statesSeen.toArray()), 3, statesSeen.size());
|
||||
assertEquals("Expecting DOWN->RECOVERING->ACTIVE but saw: " + Arrays.toString(statesSeen.toArray()), Replica.State.DOWN, statesSeen.get(0));
|
||||
assertEquals("Expecting DOWN->RECOVERING->ACTIVE but saw: " + Arrays.toString(statesSeen.toArray()), Replica.State.RECOVERING, statesSeen.get(0));
|
||||
|
@ -557,7 +557,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
|
||||
TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
|
||||
LOG.info("Collection not yet deleted");
|
||||
log.info("Collection not yet deleted");
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
if (t.hasTimedOut()) {
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TestPullReplicaErrorHandling extends SolrCloudTestCase {
|
|||
|
||||
private final static int REPLICATION_TIMEOUT_SECS = 10;
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static Map<URI, SocketProxy> proxies;
|
||||
private static Map<URI, JettySolrRunner> jettys;
|
||||
|
||||
|
@ -83,7 +83,7 @@ public class TestPullReplicaErrorHandling extends SolrCloudTestCase {
|
|||
cluster.stopJettySolrRunner(jetty);//TODO: Can we avoid this restart
|
||||
cluster.startJettySolrRunner(jetty);
|
||||
proxy.open(jetty.getBaseUrl().toURI());
|
||||
LOG.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
log.info("Adding proxy for URL: " + jetty.getBaseUrl() + ". Proxy: " + proxy.getUrl());
|
||||
proxies.put(proxy.getUrl(), proxy);
|
||||
jettys.put(proxy.getUrl(), jetty);
|
||||
}
|
||||
|
@ -124,9 +124,9 @@ public class TestPullReplicaErrorHandling extends SolrCloudTestCase {
|
|||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
if (cluster.getSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName) != null) {
|
||||
LOG.info("tearDown deleting collection");
|
||||
log.info("tearDown deleting collection");
|
||||
CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
|
||||
LOG.info("Collection deleted");
|
||||
log.info("Collection deleted");
|
||||
waitForDeletion(collectionName);
|
||||
}
|
||||
collectionName = null;
|
||||
|
@ -198,7 +198,7 @@ public void testCantConnectToPullReplica() throws Exception {
|
|||
}
|
||||
assertNumDocs(10, cluster.getSolrClient());
|
||||
} finally {
|
||||
LOG.info("Opening leader node");
|
||||
log.info("Opening leader node");
|
||||
proxy.reopen();
|
||||
}
|
||||
// Back to normal
|
||||
|
@ -304,7 +304,7 @@ public void testCantConnectToPullReplica() throws Exception {
|
|||
private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
|
||||
TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
|
||||
LOG.info("Collection not yet deleted");
|
||||
log.info("Collection not yet deleted");
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
if (t.hasTimedOut()) {
|
||||
|
|
|
@ -77,7 +77,7 @@ import org.slf4j.LoggerFactory;
|
|||
@Slow
|
||||
public class TestTlogReplica extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private String collectionName = null;
|
||||
private final static int REPLICATION_TIMEOUT_SECS = 10;
|
||||
|
@ -93,7 +93,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
.addConfig("conf", configset("cloud-minimal-inplace-updates"))
|
||||
.configure();
|
||||
Boolean useLegacyCloud = rarely();
|
||||
LOG.info("Using legacyCloud?: {}", useLegacyCloud);
|
||||
log.info("Using legacyCloud?: {}", useLegacyCloud);
|
||||
CollectionAdminRequest.ClusterProp clusterPropRequest = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, String.valueOf(useLegacyCloud));
|
||||
CollectionAdminResponse response = clusterPropRequest.process(cluster.getSolrClient());
|
||||
assertEquals(0, response.getStatus());
|
||||
|
@ -115,12 +115,12 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
public void tearDown() throws Exception {
|
||||
for (JettySolrRunner jetty:cluster.getJettySolrRunners()) {
|
||||
if (!jetty.isRunning()) {
|
||||
LOG.warn("Jetty {} not running, probably some bad test. Starting it", jetty.getLocalPort());
|
||||
log.warn("Jetty {} not running, probably some bad test. Starting it", jetty.getLocalPort());
|
||||
ChaosMonkey.start(jetty);
|
||||
}
|
||||
}
|
||||
if (cluster.getSolrClient().getZkStateReader().getClusterState().getCollectionOrNull(collectionName) != null) {
|
||||
LOG.info("tearDown deleting collection");
|
||||
log.info("tearDown deleting collection");
|
||||
CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
|
||||
waitForDeletion(collectionName);
|
||||
}
|
||||
|
@ -561,7 +561,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
if ((Integer)((NamedList<Object>)response.get("responseHeader")).get(UpdateRequest.REPFACT) >= 2) {
|
||||
break;
|
||||
}
|
||||
LOG.info("Min RF not achieved yet. retrying");
|
||||
log.info("Min RF not achieved yet. retrying");
|
||||
}
|
||||
checkRTG(3,7, cluster.getJettySolrRunners());
|
||||
DirectUpdateHandler2.commitOnClose = false;
|
||||
|
@ -603,7 +603,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
if ((Integer)((NamedList<Object>)response.get("responseHeader")).get(UpdateRequest.REPFACT) >= 2) {
|
||||
break;
|
||||
}
|
||||
LOG.info("Min RF not achieved yet. retrying");
|
||||
log.info("Min RF not achieved yet. retrying");
|
||||
}
|
||||
new UpdateRequest()
|
||||
.add(sdoc("id", "9"))
|
||||
|
|
|
@ -57,7 +57,7 @@ import org.slf4j.LoggerFactory;
|
|||
* Implementation based on {@link org.apache.solr.cloud.ZkDistributedQueue}
|
||||
*/
|
||||
public class GenericDistributedQueue implements DistributedQueue {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
static final String PREFIX = "qn-";
|
||||
|
||||
|
@ -252,7 +252,7 @@ public class GenericDistributedQueue implements DistributedQueue {
|
|||
try {
|
||||
stateManager.removeData(ops.get(j).getPath(), -1);
|
||||
} catch (NoSuchElementException e2) {
|
||||
LOG.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
|
||||
log.debug("Can not remove node which is not exist : " + ops.get(j).getPath());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -419,7 +419,7 @@ public class GenericDistributedQueue implements DistributedQueue {
|
|||
for (String childName : childNames) {
|
||||
// Check format
|
||||
if (!childName.regionMatches(0, PREFIX, 0, PREFIX.length())) {
|
||||
LOG.debug("Found child node with improper name: " + childName);
|
||||
log.debug("Found child node with improper name: " + childName);
|
||||
continue;
|
||||
}
|
||||
orderedChildren.add(childName);
|
||||
|
|
|
@ -105,7 +105,7 @@ import static org.apache.solr.cloud.api.collections.OverseerCollectionMessageHan
|
|||
* Simulated {@link SolrCloudManager}.
|
||||
*/
|
||||
public class SimCloudManager implements SolrCloudManager {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final SimDistribStateManager stateManager;
|
||||
private final SimClusterStateProvider clusterStateProvider;
|
||||
|
@ -395,7 +395,7 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
String nodeId = (String)values.get(ImplicitSnitch.NODE);
|
||||
nodeStateProvider.simSetNodeValues(nodeId, values);
|
||||
clusterStateProvider.simAddNode(nodeId);
|
||||
LOG.trace("-- added node " + nodeId);
|
||||
log.trace("-- added node " + nodeId);
|
||||
// initialize history handler if this is the first node
|
||||
if (metricsHistoryHandler == null && liveNodesSet.size() == 1) {
|
||||
metricsHandler = new MetricsHandler(metricManager);
|
||||
|
@ -428,7 +428,7 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
metricsHandler = null;
|
||||
}
|
||||
}
|
||||
LOG.trace("-- removed node " + nodeId);
|
||||
log.trace("-- removed node " + nodeId);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -517,7 +517,7 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
* @param killNodeId optional nodeId to kill. If null then don't kill any node, just restart the thread
|
||||
*/
|
||||
public void simRestartOverseer(String killNodeId) throws Exception {
|
||||
LOG.info("=== Restarting OverseerTriggerThread and clearing object cache...");
|
||||
log.info("=== Restarting OverseerTriggerThread and clearing object cache...");
|
||||
triggerThread.interrupt();
|
||||
IOUtils.closeQuietly(triggerThread);
|
||||
if (killNodeId != null) {
|
||||
|
@ -648,7 +648,7 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
// pay the penalty for remote request, at least 5 ms
|
||||
timeSource.sleep(5);
|
||||
|
||||
LOG.trace("--- got SolrRequest: " + req.getMethod() + " " + req.getPath() +
|
||||
log.trace("--- got SolrRequest: " + req.getMethod() + " " + req.getPath() +
|
||||
(req.getParams() != null ? " " + req.getParams().toQueryString() : ""));
|
||||
if (req.getPath() != null) {
|
||||
if (req.getPath().startsWith("/admin/autoscaling") ||
|
||||
|
@ -674,7 +674,7 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
cw.write(baos);
|
||||
String payload = baos.toString("UTF-8");
|
||||
LOG.trace("-- payload: {}", payload);
|
||||
log.trace("-- payload: {}", payload);
|
||||
queryRequest.setContentStreams(Collections.singletonList(new ContentStreamBase.StringStream(payload)));
|
||||
}
|
||||
queryRequest.getContext().put("httpMethod", req.getMethod().toString());
|
||||
|
@ -698,12 +698,12 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
}
|
||||
}
|
||||
if (queryResponse.getException() != null) {
|
||||
LOG.debug("-- exception handling request", queryResponse.getException());
|
||||
log.debug("-- exception handling request", queryResponse.getException());
|
||||
throw new IOException(queryResponse.getException());
|
||||
}
|
||||
SolrResponse rsp = new SolrResponseBase();
|
||||
rsp.setResponse(queryResponse.getValues());
|
||||
LOG.trace("-- response: {}", rsp);
|
||||
log.trace("-- response: {}", rsp);
|
||||
return rsp;
|
||||
}
|
||||
}
|
||||
|
@ -736,7 +736,7 @@ public class SimCloudManager implements SolrCloudManager {
|
|||
if (action == null) {
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown action: " + a);
|
||||
}
|
||||
LOG.trace("Invoking Collection Action :{} with params {}", action.toLower(), req.getParams().toQueryString());
|
||||
log.trace("Invoking Collection Action :{} with params {}", action.toLower(), req.getParams().toQueryString());
|
||||
NamedList results = new NamedList();
|
||||
rsp.setResponse(results);
|
||||
incrementCount(action.name());
|
||||
|
|
|
@ -113,7 +113,7 @@ import static org.apache.solr.common.params.CommonParams.NAME;
|
|||
* </ul>
|
||||
*/
|
||||
public class SimClusterStateProvider implements ClusterStateProvider {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final long DEFAULT_DOC_SIZE_BYTES = 500;
|
||||
|
||||
|
@ -333,7 +333,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
// pick first
|
||||
overseerLeader = liveNodes.iterator().next();
|
||||
LOG.debug("--- new Overseer leader: " + overseerLeader);
|
||||
log.debug("--- new Overseer leader: " + overseerLeader);
|
||||
// record it in ZK
|
||||
Map<String, Object> id = new HashMap<>();
|
||||
id.put("id", cloudManager.getTimeSource().getTimeNs() +
|
||||
|
@ -341,7 +341,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
try {
|
||||
cloudManager.getDistribStateManager().makePath(path, Utils.toJSON(id), CreateMode.EPHEMERAL, false);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception saving overseer leader id", e);
|
||||
log.warn("Exception saving overseer leader id", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -513,7 +513,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
"", true, "INDEX.sizeInBytes");
|
||||
// at this point nuke our cached DocCollection state
|
||||
collectionsStatesRef.set(null);
|
||||
LOG.trace("-- simAddReplica {}", replicaInfo);
|
||||
log.trace("-- simAddReplica {}", replicaInfo);
|
||||
if (runLeaderElection) {
|
||||
simRunLeaderElection(Collections.singleton(replicaInfo.getCollection()), true);
|
||||
}
|
||||
|
@ -552,7 +552,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
cloudManager.getSimNodeStateProvider().simSetNodeValue(nodeId, ImplicitSnitch.DISK, disk + 1);
|
||||
}
|
||||
LOG.trace("-- simRemoveReplica {}", ri);
|
||||
log.trace("-- simRemoveReplica {}", ri);
|
||||
simRunLeaderElection(Collections.singleton(ri.getCollection()), true);
|
||||
return;
|
||||
}
|
||||
|
@ -612,14 +612,14 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
dc.getSlices().forEach(s -> {
|
||||
if (s.getLeader() != null) {
|
||||
LOG.debug("-- already has leader {} / {}", dc.getName(), s.getName());
|
||||
log.debug("-- already has leader {} / {}", dc.getName(), s.getName());
|
||||
return;
|
||||
}
|
||||
if (s.getReplicas().isEmpty()) {
|
||||
LOG.debug("-- no replicas in {} / {}", dc.getName(), s.getName());
|
||||
log.debug("-- no replicas in {} / {}", dc.getName(), s.getName());
|
||||
return;
|
||||
}
|
||||
LOG.debug("-- submit leader election for {} / {}", dc.getName(), s.getName());
|
||||
log.debug("-- submit leader election for {} / {}", dc.getName(), s.getName());
|
||||
cloudManager.submit(() -> {
|
||||
simRunLeaderElection(dc.getName(), s, saveClusterState);
|
||||
return true;
|
||||
|
@ -632,9 +632,9 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
AtomicBoolean stateChanged = new AtomicBoolean(Boolean.FALSE);
|
||||
Replica leader = s.getLeader();
|
||||
if (leader == null || !liveNodes.contains(leader.getNodeName())) {
|
||||
LOG.debug("Running leader election for {} / {}", collection, s.getName());
|
||||
log.debug("Running leader election for {} / {}", collection, s.getName());
|
||||
if (s.getReplicas().isEmpty()) { // no replicas - punt
|
||||
LOG.debug("-- no replicas in {} / {}", collection, s.getName());
|
||||
log.debug("-- no replicas in {} / {}", collection, s.getName());
|
||||
return;
|
||||
}
|
||||
ActionThrottle lt = getThrottle(collection, s.getName());
|
||||
|
@ -651,14 +651,14 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
synchronized (ri) {
|
||||
if (r.isActive(liveNodes.get())) {
|
||||
if (ri.getVariables().get(ZkStateReader.LEADER_PROP) != null) {
|
||||
LOG.trace("-- found existing leader {} / {}: {}, {}", collection, s.getName(), ri, r);
|
||||
log.trace("-- found existing leader {} / {}: {}, {}", collection, s.getName(), ri, r);
|
||||
alreadyHasLeader.set(true);
|
||||
return;
|
||||
} else {
|
||||
active.add(ri);
|
||||
}
|
||||
} else { // if it's on a node that is not live mark it down
|
||||
LOG.trace("-- replica not active on live nodes: {}, {}", liveNodes.get(), r);
|
||||
log.trace("-- replica not active on live nodes: {}, {}", liveNodes.get(), r);
|
||||
if (!liveNodes.contains(r.getNodeName())) {
|
||||
ri.getVariables().put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
|
||||
ri.getVariables().remove(ZkStateReader.LEADER_PROP);
|
||||
|
@ -668,12 +668,12 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
});
|
||||
if (alreadyHasLeader.get()) {
|
||||
LOG.debug("-- already has leader {} / {}: {}", collection, s.getName(), s);
|
||||
log.debug("-- already has leader {} / {}: {}", collection, s.getName(), s);
|
||||
return;
|
||||
}
|
||||
if (active.isEmpty()) {
|
||||
LOG.warn("Can't find any active replicas for {} / {}: {}", collection, s.getName(), s);
|
||||
LOG.debug("-- liveNodes: {}", liveNodes.get());
|
||||
log.warn("Can't find any active replicas for {} / {}: {}", collection, s.getName(), s);
|
||||
log.debug("-- liveNodes: {}", liveNodes.get());
|
||||
return;
|
||||
}
|
||||
// pick first active one
|
||||
|
@ -685,7 +685,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
}
|
||||
}
|
||||
if (ri == null) {
|
||||
LOG.warn("-- can't find any suitable replica type for {} / {}: {}", collection, s.getName(), s);
|
||||
log.warn("-- can't find any suitable replica type for {} / {}: {}", collection, s.getName(), s);
|
||||
return;
|
||||
}
|
||||
// now mark the leader election throttle
|
||||
|
@ -695,10 +695,10 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
ri.getVariables().put(ZkStateReader.LEADER_PROP, "true");
|
||||
}
|
||||
stateChanged.set(true);
|
||||
LOG.debug("-- elected new leader for " + collection + " / " + s.getName() + ": " + ri.getName());
|
||||
log.debug("-- elected new leader for " + collection + " / " + s.getName() + ": " + ri.getName());
|
||||
}
|
||||
} else {
|
||||
LOG.debug("-- already has leader for {} / {}", collection, s.getName());
|
||||
log.debug("-- already has leader for {} / {}", collection, s.getName());
|
||||
}
|
||||
if (stateChanged.get() || saveState) {
|
||||
collectionsStatesRef.set(null);
|
||||
|
@ -751,7 +751,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
|
||||
ZkWriteCommand cmd = new ClusterStateMutator(cloudManager).createCollection(clusterState, props);
|
||||
if (cmd.noop) {
|
||||
LOG.warn("Collection {} already exists. exit", collectionName);
|
||||
log.warn("Collection {} already exists. exit", collectionName);
|
||||
results.add("success", "no-op");
|
||||
return;
|
||||
}
|
||||
|
@ -906,7 +906,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
saveClusterState.set(true);
|
||||
results.add("success", "");
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception", e);
|
||||
log.warn("Exception", e);
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
@ -973,7 +973,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
String newSolrCoreName = Assign.buildSolrCoreName(stateManager, coll, slice.getName(), replica.getType());
|
||||
String coreNodeName = Assign.assignCoreNodeName(stateManager, coll);
|
||||
ReplicaInfo newReplica = new ReplicaInfo(coreNodeName, newSolrCoreName, collection, slice.getName(), replica.getType(), targetNode, null);
|
||||
LOG.debug("-- new replica: " + newReplica);
|
||||
log.debug("-- new replica: " + newReplica);
|
||||
// xxx should run leader election here already?
|
||||
simAddReplica(targetNode, newReplica, false);
|
||||
// this will trigger leader election
|
||||
|
@ -1276,14 +1276,14 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
// NOTE: we don't use getProperty because it uses PROPERTY_PROP_PREFIX
|
||||
Replica leader = s.getLeader();
|
||||
if (leader == null) {
|
||||
LOG.debug("-- no leader in " + s);
|
||||
log.debug("-- no leader in " + s);
|
||||
continue;
|
||||
}
|
||||
cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(), leader)).counter("UPDATE./update.requests").inc();
|
||||
ReplicaInfo ri = getReplicaInfo(leader);
|
||||
Number numDocs = (Number)ri.getVariable("SEARCHER.searcher.numDocs");
|
||||
if (numDocs == null || numDocs.intValue() <= 0) {
|
||||
LOG.debug("-- attempting to delete nonexistent doc " + id + " from " + s.getLeader());
|
||||
log.debug("-- attempting to delete nonexistent doc " + id + " from " + s.getLeader());
|
||||
continue;
|
||||
}
|
||||
modified = true;
|
||||
|
@ -1314,7 +1314,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
for (Slice s : coll.getSlices()) {
|
||||
Replica leader = s.getLeader();
|
||||
if (leader == null) {
|
||||
LOG.debug("-- no leader in " + s);
|
||||
log.debug("-- no leader in " + s);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -1348,7 +1348,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
|
|||
Slice s = router.getTargetSlice(id, null, null, req.getParams(), coll);
|
||||
Replica leader = s.getLeader();
|
||||
if (leader == null) {
|
||||
LOG.debug("-- no leader in " + s);
|
||||
log.debug("-- no leader in " + s);
|
||||
continue;
|
||||
}
|
||||
cloudManager.getMetricManager().registry(createRegistryName(collection, s.getName(), leader)).counter("UPDATE./update.requests").inc();
|
||||
|
|
|
@ -68,7 +68,7 @@ import org.slf4j.LoggerFactory;
|
|||
* invoked.
|
||||
*/
|
||||
public class SimDistribStateManager implements DistribStateManager {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final class Node {
|
||||
ReentrantLock dataLock = new ReentrantLock();
|
||||
|
|
|
@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory;
|
|||
* exposed anywhere.
|
||||
*/
|
||||
public class SimDistributedQueueFactory implements DistributedQueueFactory {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
Map<String, SimDistributedQueue> queues = new ConcurrentHashMap<>();
|
||||
|
||||
|
@ -190,7 +190,7 @@ public class SimDistributedQueueFactory implements DistributedQueueFactory {
|
|||
try {
|
||||
queue.offer(new Pair(String.format(Locale.ROOT, "qn-%010d", seq), data));
|
||||
seq++;
|
||||
LOG.trace("=== offer " + System.nanoTime());
|
||||
log.trace("=== offer " + System.nanoTime());
|
||||
changed.signalAll();
|
||||
} finally {
|
||||
updateLock.unlock();
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
|
|||
* to setup core-level metrics use {@link SimClusterStateProvider#simSetCollectionValue(String, String, Object, boolean, boolean)}.
|
||||
*/
|
||||
public class SimNodeStateProvider implements NodeStateProvider {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final Map<String, Map<String, Object>> nodeValues = new ConcurrentHashMap<>();
|
||||
private final SimClusterStateProvider clusterStateProvider;
|
||||
|
@ -164,7 +164,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
|
|||
* @param node node id
|
||||
*/
|
||||
public void simRemoveNodeValues(String node) throws InterruptedException {
|
||||
LOG.debug("--removing value for " + node);
|
||||
log.debug("--removing value for " + node);
|
||||
lock.lockInterruptibly();
|
||||
try {
|
||||
Map<String, Object> values = nodeValues.remove(node);
|
||||
|
@ -187,7 +187,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
|
|||
try {
|
||||
AtomicBoolean updateRoles = new AtomicBoolean(false);
|
||||
myNodes.forEach(n -> {
|
||||
LOG.debug("- removing dead node values: " + n);
|
||||
log.debug("- removing dead node values: " + n);
|
||||
Map<String, Object> vals = nodeValues.remove(n);
|
||||
if (vals.containsKey("nodeRole")) {
|
||||
updateRoles.set(true);
|
||||
|
@ -253,7 +253,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
|
|||
for (String tag : tags) {
|
||||
String[] parts = tag.split(":");
|
||||
if (parts.length < 3 || !parts[0].equals("metrics")) {
|
||||
LOG.warn("Invalid metrics: tag: " + tag);
|
||||
log.warn("Invalid metrics: tag: " + tag);
|
||||
continue;
|
||||
}
|
||||
if (!parts[1].startsWith("solr.core.")) {
|
||||
|
@ -263,7 +263,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
|
|||
Matcher m = REGISTRY_PATTERN.matcher(parts[1]);
|
||||
|
||||
if (!m.matches()) {
|
||||
LOG.warn("Invalid registry name: " + parts[1]);
|
||||
log.warn("Invalid registry name: " + parts[1]);
|
||||
continue;
|
||||
}
|
||||
String collection = m.group(1);
|
||||
|
@ -291,7 +291,7 @@ public class SimNodeStateProvider implements NodeStateProvider {
|
|||
|
||||
@Override
|
||||
public Map<String, Object> getNodeValues(String node, Collection<String> tags) {
|
||||
LOG.trace("-- requested values for " + node + ": " + tags);
|
||||
log.trace("-- requested values for " + node + ": " + tags);
|
||||
if (!liveNodesSet.contains(node)) {
|
||||
throw new RuntimeException("non-live node " + node);
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ import org.slf4j.LoggerFactory;
|
|||
* This test compares the cluster state of a real cluster and a simulated one.
|
||||
*/
|
||||
public class TestClusterStateProvider extends SolrCloudTestCase {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static int NODE_COUNT = 3;
|
||||
private static boolean simulated;
|
||||
|
@ -72,7 +72,7 @@ public class TestClusterStateProvider extends SolrCloudTestCase {
|
|||
@BeforeClass
|
||||
public static void setupCluster() throws Exception {
|
||||
simulated = random().nextBoolean();
|
||||
LOG.info("####### Using simulated components? " + simulated);
|
||||
log.info("####### Using simulated components? " + simulated);
|
||||
|
||||
configureCluster(NODE_COUNT)
|
||||
.addConfig("conf", configset("cloud-minimal"))
|
||||
|
|
|
@ -47,7 +47,7 @@ import org.slf4j.LoggerFactory;
|
|||
* This test compares a ZK-based {@link DistribStateManager} to the simulated one.
|
||||
*/
|
||||
public class TestDistribStateManager extends SolrTestCaseJ4 {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private DistribStateManager stateManager;
|
||||
private ZkTestServer zkTestServer;
|
||||
|
@ -80,7 +80,7 @@ public class TestDistribStateManager extends SolrTestCaseJ4 {
|
|||
solrZkClient = new SolrZkClient(zkTestServer.getZkHost(), 30000);
|
||||
stateManager = new ZkDistribStateManager(solrZkClient);
|
||||
}
|
||||
LOG.info("Using " + stateManager.getClass().getName());
|
||||
log.info("Using " + stateManager.getClass().getName());
|
||||
}
|
||||
|
||||
private DistribStateManager createDistribStateManager() {
|
||||
|
|
|
@ -42,7 +42,7 @@ import org.junit.Test;
|
|||
|
||||
public class CurrencyRangeFacetCloudTest extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static final String COLLECTION = MethodHandles.lookup().lookupClass().getName();
|
||||
private static final String CONF = COLLECTION + "_configSet";
|
||||
|
|
|
@ -53,7 +53,7 @@ import org.junit.BeforeClass;
|
|||
*/
|
||||
public class RangeFacetCloudTest extends SolrCloudTestCase {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static final String COLLECTION = MethodHandles.lookup().lookupClass().getName();
|
||||
private static final String CONF = COLLECTION + "_configSet";
|
||||
|
|
|
@ -43,7 +43,7 @@ import static java.util.stream.Collectors.toList;
|
|||
* lazily.
|
||||
*/
|
||||
public class AutoScalingConfig implements MapWriter {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private final Map<String, Object> jsonMap;
|
||||
private final boolean empty;
|
||||
|
@ -81,7 +81,7 @@ public class AutoScalingConfig implements MapWriter {
|
|||
TriggerEventProcessorStage stage = TriggerEventProcessorStage.valueOf(String.valueOf(stageName).toUpperCase(Locale.ROOT));
|
||||
stages.add(stage);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Invalid stage name '" + name + "' in listener config, skipping: " + properties);
|
||||
log.warn("Invalid stage name '" + name + "' in listener config, skipping: " + properties);
|
||||
}
|
||||
}
|
||||
listenerClass = (String)this.properties.get(AutoScalingParams.CLASS);
|
||||
|
|
|
@ -69,7 +69,7 @@ import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.WITH_
|
|||
*
|
||||
*/
|
||||
public class Policy implements MapWriter {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String POLICY = "policy";
|
||||
public static final String EACH = "#EACH";
|
||||
|
@ -281,7 +281,7 @@ public class Policy implements MapWriter {
|
|||
return p.compare(r1, r2, false);
|
||||
});
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception! prefs = {}, recent r1 = {}, r2 = {}, matrix = {}",
|
||||
log.error("Exception! prefs = {}, recent r1 = {}, r2 = {}, matrix = {}",
|
||||
clusterPreferences,
|
||||
lastComparison[0],
|
||||
lastComparison[1],
|
||||
|
@ -498,9 +498,9 @@ public class Policy implements MapWriter {
|
|||
this.nodeStateProvider = cloudManager.getNodeStateProvider();
|
||||
try {
|
||||
state = cloudManager.getClusterStateProvider().getClusterState();
|
||||
LOG.trace("-- session created with cluster state: {}", state);
|
||||
log.trace("-- session created with cluster state: {}", state);
|
||||
} catch (Exception e) {
|
||||
LOG.trace("-- session created, can't obtain cluster state", e);
|
||||
log.trace("-- session created, can't obtain cluster state", e);
|
||||
}
|
||||
this.znodeVersion = state != null ? state.getZNodeVersion() : -1;
|
||||
this.nodes = new ArrayList<>(cloudManager.getClusterStateProvider().getLiveNodes());
|
||||
|
|
|
@ -73,7 +73,7 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class HttpClientUtil {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final int DEFAULT_CONNECT_TIMEOUT = 60000;
|
||||
public static final int DEFAULT_SO_TIMEOUT = 600000;
|
||||
|
@ -147,7 +147,7 @@ public class HttpClientUtil {
|
|||
// Configure the HttpClientBuilder if user has specified the factory type.
|
||||
String factoryClassName = System.getProperty(SYS_PROP_HTTP_CLIENT_BUILDER_FACTORY);
|
||||
if (factoryClassName != null) {
|
||||
logger.debug ("Using " + factoryClassName);
|
||||
log.debug ("Using " + factoryClassName);
|
||||
try {
|
||||
HttpClientBuilderFactory factory = (HttpClientBuilderFactory)Class.forName(factoryClassName).newInstance();
|
||||
httpClientBuilder = factory.getHttpClientBuilder(Optional.of(SolrHttpClientBuilder.create()));
|
||||
|
@ -176,7 +176,7 @@ public class HttpClientUtil {
|
|||
try {
|
||||
interceptor.process(request, context);
|
||||
} catch (Exception e) {
|
||||
logger.error("", e);
|
||||
log.error("", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -234,7 +234,7 @@ public class HttpClientUtil {
|
|||
} else {
|
||||
sslConnectionSocketFactory = new SSLConnectionSocketFactory(SSLContexts.createSystemDefault(),
|
||||
NoopHostnameVerifier.INSTANCE);
|
||||
logger.debug(HttpClientUtil.SYS_PROP_CHECK_PEER_NAME + "is false, hostname checks disabled.");
|
||||
log.debug(HttpClientUtil.SYS_PROP_CHECK_PEER_NAME + "is false, hostname checks disabled.");
|
||||
}
|
||||
builder.register("https", sslConnectionSocketFactory);
|
||||
|
||||
|
@ -268,8 +268,8 @@ public class HttpClientUtil {
|
|||
|
||||
public static CloseableHttpClient createClient(final SolrParams params, PoolingHttpClientConnectionManager cm, boolean sharedConnectionManager, HttpRequestExecutor httpRequestExecutor) {
|
||||
final ModifiableSolrParams config = new ModifiableSolrParams(params);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Creating new http client, config:" + config);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Creating new http client, config:" + config);
|
||||
}
|
||||
|
||||
cm.setMaxTotal(params.getInt(HttpClientUtil.PROP_MAX_CONNECTIONS, 10000));
|
||||
|
|
|
@ -50,7 +50,7 @@ import org.slf4j.LoggerFactory;
|
|||
public class Krb5HttpClientBuilder implements HttpClientBuilderFactory {
|
||||
|
||||
public static final String LOGIN_CONFIG_PROP = "java.security.auth.login.config";
|
||||
private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private static Configuration jaasConfig = new SolrJaasConfiguration();
|
||||
|
||||
|
@ -85,7 +85,7 @@ public class Krb5HttpClientBuilder implements HttpClientBuilderFactory {
|
|||
String configValue = System.getProperty(LOGIN_CONFIG_PROP);
|
||||
|
||||
if (configValue != null) {
|
||||
logger.info("Setting up SPNego auth with config: " + configValue);
|
||||
log.info("Setting up SPNego auth with config: " + configValue);
|
||||
final String useSubjectCredsProp = "javax.security.auth.useSubjectCredsOnly";
|
||||
String useSubjectCredsVal = System.getProperty(useSubjectCredsProp);
|
||||
|
||||
|
@ -97,7 +97,7 @@ public class Krb5HttpClientBuilder implements HttpClientBuilderFactory {
|
|||
else if (!useSubjectCredsVal.toLowerCase(Locale.ROOT).equals("false")) {
|
||||
// Don't overwrite the prop value if it's already been written to something else,
|
||||
// but log because it is likely the Credentials won't be loaded correctly.
|
||||
logger.warn("System Property: " + useSubjectCredsProp + " set to: " + useSubjectCredsVal
|
||||
log.warn("System Property: " + useSubjectCredsProp + " set to: " + useSubjectCredsVal
|
||||
+ " not false. SPNego authentication may not be successful.");
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ public class Krb5HttpClientBuilder implements HttpClientBuilderFactory {
|
|||
HttpClientUtil.addRequestInterceptor(bufferedEntityInterceptor);
|
||||
}
|
||||
} else {
|
||||
logger.warn("{} is configured without specifying system property '{}'",
|
||||
log.warn("{} is configured without specifying system property '{}'",
|
||||
getClass().getName(), LOGIN_CONFIG_PROP);
|
||||
}
|
||||
|
||||
|
@ -176,11 +176,11 @@ public class Krb5HttpClientBuilder implements HttpClientBuilderFactory {
|
|||
public AppConfigurationEntry[] getAppConfigurationEntry(String appName) {
|
||||
if (baseConfig == null) return null;
|
||||
|
||||
logger.debug("Login prop: "+System.getProperty(LOGIN_CONFIG_PROP));
|
||||
log.debug("Login prop: "+System.getProperty(LOGIN_CONFIG_PROP));
|
||||
|
||||
String clientAppName = System.getProperty("solr.kerberos.jaas.appname", "Client");
|
||||
if (initiateAppNames.contains(appName)) {
|
||||
logger.debug("Using AppConfigurationEntry for appName '"+clientAppName+"' instead of: " + appName);
|
||||
log.debug("Using AppConfigurationEntry for appName '"+clientAppName+"' instead of: " + appName);
|
||||
return baseConfig.getAppConfigurationEntry(clientAppName);
|
||||
}
|
||||
return baseConfig.getAppConfigurationEntry(appName);
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory;
|
|||
*/
|
||||
public class CommitStream extends TupleStream implements Expressible {
|
||||
private static final long serialVersionUID = 1L;
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
// Part of expression / passed in
|
||||
private String collection;
|
||||
|
@ -252,7 +252,7 @@ public class CommitStream extends TupleStream implements Expressible {
|
|||
try {
|
||||
clientCache.getCloudSolrClient(zkHost).commit(collection, waitFlush, waitSearcher, softCommit);
|
||||
} catch (SolrServerException | IOException e) {
|
||||
LOG.warn(String.format(Locale.ROOT, "Unable to commit documents to collection '%s' due to unexpected error.", collection), e);
|
||||
log.warn(String.format(Locale.ROOT, "Unable to commit documents to collection '%s' due to unexpected error.", collection), e);
|
||||
String className = e.getClass().getName();
|
||||
String message = e.getMessage();
|
||||
throw new IOException(String.format(Locale.ROOT,"Unexpected error when committing documents to collection %s- %s:%s", collection, className, message));
|
||||
|
|
|
@ -60,7 +60,7 @@ public class DaemonStream extends TupleStream implements Expressible {
|
|||
private Map<String, DaemonStream> daemons;
|
||||
private boolean terminate;
|
||||
private boolean closed = false;
|
||||
private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public DaemonStream(StreamExpression expression, StreamFactory factory) throws IOException{
|
||||
|
||||
|
@ -329,14 +329,14 @@ public class DaemonStream extends TupleStream implements Expressible {
|
|||
}
|
||||
} catch (IOException e) {
|
||||
exception = e;
|
||||
logger.error("Error in DaemonStream:" + id, e);
|
||||
log.error("Error in DaemonStream:" + id, e);
|
||||
++errors;
|
||||
if (errors > 100) {
|
||||
logger.error("Too many consectutive errors. Stopping DaemonStream:" + id);
|
||||
log.error("Too many consectutive errors. Stopping DaemonStream:" + id);
|
||||
break OUTER;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.error("Fatal Error in DaemonStream:" + id, t);
|
||||
log.error("Fatal Error in DaemonStream:" + id, t);
|
||||
//For anything other then IOException break out of the loop and shutdown the thread.
|
||||
break OUTER;
|
||||
} finally {
|
||||
|
@ -345,7 +345,7 @@ public class DaemonStream extends TupleStream implements Expressible {
|
|||
} catch (IOException e1) {
|
||||
if (exception == null) {
|
||||
exception = e1;
|
||||
logger.error("Error in DaemonStream:" + id, e1);
|
||||
log.error("Error in DaemonStream:" + id, e1);
|
||||
break OUTER;
|
||||
}
|
||||
}
|
||||
|
@ -357,7 +357,7 @@ public class DaemonStream extends TupleStream implements Expressible {
|
|||
try {
|
||||
Thread.sleep(sleepMillis);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("Error in DaemonStream:" + id, e);
|
||||
log.error("Error in DaemonStream:" + id, e);
|
||||
break OUTER;
|
||||
}
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ public class DaemonStream extends TupleStream implements Expressible {
|
|||
try {
|
||||
queue.put(tuple);
|
||||
} catch (InterruptedException e) {
|
||||
logger.error("Error in DaemonStream:"+id, e);
|
||||
log.error("Error in DaemonStream:"+id, e);
|
||||
}
|
||||
}
|
||||
setStopTime(new Date().getTime());
|
||||
|
|
|
@ -55,7 +55,7 @@ import static org.apache.solr.common.params.CommonParams.ID;
|
|||
|
||||
public class ExecutorStream extends TupleStream implements Expressible {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private TupleStream stream;
|
||||
|
||||
|
@ -148,7 +148,7 @@ public class ExecutorStream extends TupleStream implements Expressible {
|
|||
try {
|
||||
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.SECONDS);
|
||||
} catch(InterruptedException e) {
|
||||
logger.error("Interrupted while waiting for termination", e);
|
||||
log.error("Interrupted while waiting for termination", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -214,12 +214,12 @@ public class ExecutorStream extends TupleStream implements Expressible {
|
|||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Executor Error: id="+id+" expr_s="+expr, e);
|
||||
log.error("Executor Error: id="+id+" expr_s="+expr, e);
|
||||
} finally {
|
||||
try {
|
||||
stream.close();
|
||||
} catch (Exception e1) {
|
||||
logger.error("Executor Error", e1);
|
||||
log.error("Executor Error", e1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ import static org.apache.solr.common.params.CommonParams.VERSION_FIELD;
|
|||
* @since 6.0.0
|
||||
*/
|
||||
public class UpdateStream extends TupleStream implements Expressible {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static String BATCH_INDEXED_FIELD_NAME = "batchIndexed"; // field name in summary tuple for #docs updated in batch
|
||||
private String collection;
|
||||
|
@ -281,7 +281,7 @@ public class UpdateStream extends TupleStream implements Expressible {
|
|||
}
|
||||
}
|
||||
}
|
||||
LOG.debug("Tuple [{}] was converted into SolrInputDocument [{}].", tuple, doc);
|
||||
log.debug("Tuple [{}] was converted into SolrInputDocument [{}].", tuple, doc);
|
||||
|
||||
return doc;
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ public class UpdateStream extends TupleStream implements Expressible {
|
|||
try {
|
||||
cloudSolrClient.add(collection, documentBatch);
|
||||
} catch (SolrServerException | IOException e) {
|
||||
LOG.warn("Unable to add documents to collection due to unexpected error.", e);
|
||||
log.warn("Unable to add documents to collection due to unexpected error.", e);
|
||||
String className = e.getClass().getName();
|
||||
String message = e.getMessage();
|
||||
throw new IOException(String.format(Locale.ROOT,"Unexpected error when adding documents to collection %s- %s:%s", collection, className, message));
|
||||
|
|
|
@ -36,7 +36,7 @@ import java.util.regex.Pattern;
|
|||
*/
|
||||
public class ZkConfigManager {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
/** ZkNode where named configs are stored */
|
||||
public static final String CONFIGS_ZKNODE = "/configs";
|
||||
|
@ -139,7 +139,7 @@ public class ZkConfigManager {
|
|||
List<String> children = zkClient.getChildren(fromZkPath + "/" + file, null, true);
|
||||
if (children.size() == 0) {
|
||||
final String toZkFilePath = toZkPath + "/" + file;
|
||||
logger.info("Copying zk node {} to {}",
|
||||
log.info("Copying zk node {} to {}",
|
||||
fromZkPath + "/" + file, toZkFilePath);
|
||||
byte[] data = zkClient.getData(fromZkPath + "/" + file, null, null, true);
|
||||
zkClient.makePath(toZkFilePath, data, true);
|
||||
|
|
|
@ -72,7 +72,7 @@ import static org.apache.solr.common.util.Utils.fromJSON;
|
|||
|
||||
public class ZkStateReader implements Closeable {
|
||||
public static final int STATE_UPDATE_DELAY = Integer.getInteger("solr.OverseerStateUpdateDelay", 2000); // delay between cloud state updates
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static final String BASE_URL_PROP = "base_url";
|
||||
public static final String NODE_NAME_PROP = "node_name";
|
||||
|
@ -243,7 +243,7 @@ public class ZkStateReader implements Closeable {
|
|||
String configName = null;
|
||||
|
||||
String path = COLLECTIONS_ZKNODE + "/" + collection;
|
||||
LOG.debug("Loading collection config from: [{}]", path);
|
||||
log.debug("Loading collection config from: [{}]", path);
|
||||
|
||||
try {
|
||||
byte[] data = zkClient.getData(path, null, null, true);
|
||||
|
@ -256,10 +256,10 @@ public class ZkStateReader implements Closeable {
|
|||
if (configName != null) {
|
||||
String configPath = CONFIGS_ZKNODE + "/" + configName;
|
||||
if (!zkClient.exists(configPath, true)) {
|
||||
LOG.error("Specified config=[{}] does not exist in ZooKeeper at location=[{}]", configName, configPath);
|
||||
log.error("Specified config=[{}] does not exist in ZooKeeper at location=[{}]", configName, configPath);
|
||||
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "Specified config does not exist in ZooKeeper: " + configName);
|
||||
} else {
|
||||
LOG.debug("path=[{}] [{}]=[{}] specified config exists in ZooKeeper", configPath, CONFIGNAME_PROP, configName);
|
||||
log.debug("path=[{}] [{}]=[{}] specified config exists in ZooKeeper", configPath, CONFIGNAME_PROP, configName);
|
||||
}
|
||||
} else {
|
||||
throw new ZooKeeperException(ErrorCode.INVALID_STATE, "No config data found at path: " + path);
|
||||
|
@ -300,12 +300,12 @@ public class ZkStateReader implements Closeable {
|
|||
try {
|
||||
ZkStateReader.this.createClusterStateWatchersAndUpdate();
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("A ZK error has occurred", e);
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.error("Interrupted", e);
|
||||
log.error("Interrupted", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted", e);
|
||||
}
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ public class ZkStateReader implements Closeable {
|
|||
|
||||
synchronized (getUpdateLock()) {
|
||||
if (clusterState == null) {
|
||||
LOG.warn("ClusterState watchers have not been initialized");
|
||||
log.warn("ClusterState watchers have not been initialized");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -367,20 +367,20 @@ public class ZkStateReader implements Closeable {
|
|||
if (ref == null || legacyCollectionStates.containsKey(collection)) {
|
||||
// We either don't know anything about this collection (maybe it's new?) or it's legacy.
|
||||
// First update the legacy cluster state.
|
||||
LOG.debug("Checking legacy cluster state for collection {}", collection);
|
||||
log.debug("Checking legacy cluster state for collection {}", collection);
|
||||
refreshLegacyClusterState(null);
|
||||
if (!legacyCollectionStates.containsKey(collection)) {
|
||||
// No dice, see if a new collection just got created.
|
||||
LazyCollectionRef tryLazyCollection = new LazyCollectionRef(collection);
|
||||
if (tryLazyCollection.get() != null) {
|
||||
// What do you know, it exists!
|
||||
LOG.debug("Adding lazily-loaded reference for collection {}", collection);
|
||||
log.debug("Adding lazily-loaded reference for collection {}", collection);
|
||||
lazyCollectionStates.putIfAbsent(collection, tryLazyCollection);
|
||||
constructState(Collections.singleton(collection));
|
||||
}
|
||||
}
|
||||
} else if (ref.isLazilyLoaded()) {
|
||||
LOG.debug("Refreshing lazily-loaded state for collection {}", collection);
|
||||
log.debug("Refreshing lazily-loaded state for collection {}", collection);
|
||||
if (ref.get() != null) {
|
||||
return;
|
||||
}
|
||||
|
@ -388,13 +388,13 @@ public class ZkStateReader implements Closeable {
|
|||
refreshLegacyClusterState(null);
|
||||
} else if (watchedCollectionStates.containsKey(collection)) {
|
||||
// Exists as a watched collection, force a refresh.
|
||||
LOG.debug("Forcing refresh of watched collection state for {}", collection);
|
||||
log.debug("Forcing refresh of watched collection state for {}", collection);
|
||||
DocCollection newState = fetchCollectionState(collection, null);
|
||||
if (updateWatchedCollection(collection, newState)) {
|
||||
constructState(Collections.singleton(collection));
|
||||
}
|
||||
} else {
|
||||
LOG.error("Collection {} is not lazy or watched!", collection);
|
||||
log.error("Collection {} is not lazy or watched!", collection);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -409,7 +409,7 @@ public class ZkStateReader implements Closeable {
|
|||
DocCollection collection = clusterState.getCollectionOrNull(coll);
|
||||
if (collection == null) return null;
|
||||
if (collection.getZNodeVersion() < version) {
|
||||
LOG.debug("Server older than client {}<{}", collection.getZNodeVersion(), version);
|
||||
log.debug("Server older than client {}<{}", collection.getZNodeVersion(), version);
|
||||
DocCollection nu = getCollectionLive(this, coll);
|
||||
if (nu == null) return -1 ;
|
||||
if (nu.getZNodeVersion() > collection.getZNodeVersion()) {
|
||||
|
@ -426,7 +426,7 @@ public class ZkStateReader implements Closeable {
|
|||
return null;
|
||||
}
|
||||
|
||||
LOG.debug("Wrong version from client [{}]!=[{}]", version, collection.getZNodeVersion());
|
||||
log.debug("Wrong version from client [{}]!=[{}]", version, collection.getZNodeVersion());
|
||||
|
||||
return collection.getZNodeVersion();
|
||||
}
|
||||
|
@ -435,7 +435,7 @@ public class ZkStateReader implements Closeable {
|
|||
InterruptedException {
|
||||
// We need to fetch the current cluster state and the set of live nodes
|
||||
|
||||
LOG.debug("Updating cluster state from ZooKeeper... ");
|
||||
log.debug("Updating cluster state from ZooKeeper... ");
|
||||
|
||||
// Sanity check ZK structure.
|
||||
if (!zkClient.exists(CLUSTER_STATE, true)) {
|
||||
|
@ -480,7 +480,7 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
try {
|
||||
synchronized (ZkStateReader.this.getUpdateLock()) {
|
||||
LOG.debug("Updating [{}] ... ", SOLR_SECURITY_CONF_PATH);
|
||||
log.debug("Updating [{}] ... ", SOLR_SECURITY_CONF_PATH);
|
||||
|
||||
// remake watch
|
||||
final Watcher thisWatch = this;
|
||||
|
@ -489,18 +489,18 @@ public class ZkStateReader implements Closeable {
|
|||
try {
|
||||
callback.call(new Pair<>(data, stat));
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error running collections node listener", e);
|
||||
log.error("Error running collections node listener", e);
|
||||
}
|
||||
}
|
||||
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
|
||||
LOG.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("A ZK error has occurred", e);
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Interrupted", e);
|
||||
log.warn("Interrupted", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -534,15 +534,15 @@ public class ZkStateReader implements Closeable {
|
|||
|
||||
this.clusterState = new ClusterState(liveNodes, result, legacyClusterStateVersion);
|
||||
|
||||
LOG.debug("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
|
||||
log.debug("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
|
||||
legacyCollectionStates.keySet().size(),
|
||||
collectionWatches.keySet().size(),
|
||||
watchedCollectionStates.keySet().size(),
|
||||
lazyCollectionStates.keySet().size(),
|
||||
clusterState.getCollectionStates().size());
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
|
||||
if (log.isTraceEnabled()) {
|
||||
log.trace("clusterStateSet: legacy [{}] interesting [{}] watched [{}] lazy [{}] total [{}]",
|
||||
legacyCollectionStates.keySet(),
|
||||
collectionWatches.keySet(),
|
||||
watchedCollectionStates.keySet(),
|
||||
|
@ -631,7 +631,7 @@ public class ZkStateReader implements Closeable {
|
|||
try {
|
||||
children = zkClient.getChildren(COLLECTIONS_ZKNODE, watcher, true);
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
LOG.warn("Error fetching collection names: [{}]", e.getMessage());
|
||||
log.warn("Error fetching collection names: [{}]", e.getMessage());
|
||||
// fall through
|
||||
}
|
||||
if (children == null || children.isEmpty()) {
|
||||
|
@ -783,10 +783,10 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
}
|
||||
if (oldLiveNodes.size() != newLiveNodes.size()) {
|
||||
LOG.info("Updated live nodes from ZooKeeper... ({}) -> ({})", oldLiveNodes.size(), newLiveNodes.size());
|
||||
log.info("Updated live nodes from ZooKeeper... ({}) -> ({})", oldLiveNodes.size(), newLiveNodes.size());
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Updated live nodes from ZooKeeper... {} -> {}", oldLiveNodes, newLiveNodes);
|
||||
if (log.isDebugEnabled()) {
|
||||
log.debug("Updated live nodes from ZooKeeper... {} -> {}", oldLiveNodes, newLiveNodes);
|
||||
}
|
||||
if (!oldLiveNodes.equals(newLiveNodes)) { // fire listeners
|
||||
liveNodesListeners.forEach(listener ->
|
||||
|
@ -1002,11 +1002,11 @@ public class ZkStateReader implements Closeable {
|
|||
try {
|
||||
byte[] data = zkClient.getData(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, new Stat(), true);
|
||||
this.clusterProperties = (Map<String, Object>) Utils.fromJSON(data);
|
||||
LOG.debug("Loaded cluster properties: {}", this.clusterProperties);
|
||||
log.debug("Loaded cluster properties: {}", this.clusterProperties);
|
||||
return;
|
||||
} catch (KeeperException.NoNodeException e) {
|
||||
this.clusterProperties = Collections.emptyMap();
|
||||
LOG.debug("Loaded empty cluster properties");
|
||||
log.debug("Loaded empty cluster properties");
|
||||
// set an exists watch, and if the node has been created since the last call,
|
||||
// read the data again
|
||||
if (zkClient.exists(ZkStateReader.CLUSTER_PROPS, clusterPropertiesWatcher, true) == null)
|
||||
|
@ -1014,7 +1014,7 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
}
|
||||
} catch (KeeperException | InterruptedException e) {
|
||||
LOG.error("Error reading cluster properties from zookeeper", SolrZkClient.checkInterrupted(e));
|
||||
log.error("Error reading cluster properties from zookeeper", SolrZkClient.checkInterrupted(e));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1117,12 +1117,12 @@ public class ZkStateReader implements Closeable {
|
|||
|
||||
if (!collectionWatches.containsKey(coll)) {
|
||||
// This collection is no longer interesting, stop watching.
|
||||
LOG.debug("Uninteresting collection {}", coll);
|
||||
log.debug("Uninteresting collection {}", coll);
|
||||
return;
|
||||
}
|
||||
|
||||
Set<String> liveNodes = ZkStateReader.this.liveNodes;
|
||||
LOG.info("A cluster state change: [{}] for collection [{}] has occurred - updating... (live nodes size: [{}])",
|
||||
log.info("A cluster state change: [{}] for collection [{}] has occurred - updating... (live nodes size: [{}])",
|
||||
event, coll, liveNodes.size());
|
||||
|
||||
refreshAndWatch();
|
||||
|
@ -1143,13 +1143,13 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
|
||||
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
|
||||
LOG.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("Unwatched collection: [{}]", coll, e);
|
||||
log.error("Unwatched collection: [{}]", coll, e);
|
||||
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.error("Unwatched collection: [{}]", coll, e);
|
||||
log.error("Unwatched collection: [{}]", coll, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1164,7 +1164,7 @@ public class ZkStateReader implements Closeable {
|
|||
return;
|
||||
}
|
||||
int liveNodesSize = ZkStateReader.this.clusterState == null ? 0 : ZkStateReader.this.clusterState.getLiveNodes().size();
|
||||
LOG.debug("A cluster state change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodesSize);
|
||||
log.debug("A cluster state change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodesSize);
|
||||
refreshAndWatch();
|
||||
}
|
||||
|
||||
|
@ -1176,14 +1176,14 @@ public class ZkStateReader implements Closeable {
|
|||
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
|
||||
"Cannot connect to cluster at " + zkClient.getZkServerAddress() + ": cluster not found/not ready");
|
||||
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
|
||||
LOG.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("A ZK error has occurred", e);
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Interrupted", e);
|
||||
log.warn("Interrupted", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1205,11 +1205,11 @@ public class ZkStateReader implements Closeable {
|
|||
|
||||
if (!collectionPropsWatches.containsKey(coll)) {
|
||||
// No one can be notified of the change, we can ignore it and "unset" the watch
|
||||
LOG.debug("Ignoring property change for collection {}", coll);
|
||||
log.debug("Ignoring property change for collection {}", coll);
|
||||
return;
|
||||
}
|
||||
|
||||
LOG.info("A collection property change: [{}] for collection [{}] has occurred - updating...",
|
||||
log.info("A collection property change: [{}] for collection [{}] has occurred - updating...",
|
||||
event, coll);
|
||||
|
||||
refreshAndWatch(true);
|
||||
|
@ -1236,13 +1236,13 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
}
|
||||
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
|
||||
LOG.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("Lost collection property watcher for {} due to ZK error", coll, e);
|
||||
log.error("Lost collection property watcher for {} due to ZK error", coll, e);
|
||||
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.error("Lost collection property watcher for {} due to the thread being interrupted", coll, e);
|
||||
log.error("Lost collection property watcher for {} due to the thread being interrupted", coll, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1256,7 +1256,7 @@ public class ZkStateReader implements Closeable {
|
|||
if (EventType.None.equals(event.getType())) {
|
||||
return;
|
||||
}
|
||||
LOG.debug("A collections change: [{}], has occurred - updating...", event);
|
||||
log.debug("A collections change: [{}], has occurred - updating...", event);
|
||||
refreshAndWatch();
|
||||
synchronized (getUpdateLock()) {
|
||||
constructState(Collections.emptySet());
|
||||
|
@ -1268,14 +1268,14 @@ public class ZkStateReader implements Closeable {
|
|||
try {
|
||||
refreshCollectionList(this);
|
||||
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
|
||||
LOG.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("A ZK error has occurred", e);
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Interrupted", e);
|
||||
log.warn("Interrupted", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1289,7 +1289,7 @@ public class ZkStateReader implements Closeable {
|
|||
if (EventType.None.equals(event.getType())) {
|
||||
return;
|
||||
}
|
||||
LOG.debug("A live node change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodes.size());
|
||||
log.debug("A live node change: [{}], has occurred - updating... (live nodes size: [{}])", event, liveNodes.size());
|
||||
refreshAndWatch();
|
||||
}
|
||||
|
||||
|
@ -1297,14 +1297,14 @@ public class ZkStateReader implements Closeable {
|
|||
try {
|
||||
refreshLiveNodes(this);
|
||||
} catch (KeeperException.SessionExpiredException | KeeperException.ConnectionLossException e) {
|
||||
LOG.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("A ZK error has occurred", e);
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Interrupted", e);
|
||||
log.warn("Interrupted", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1520,7 +1520,7 @@ public class ZkStateReader implements Closeable {
|
|||
private boolean updateWatchedCollection(String coll, DocCollection newState) {
|
||||
|
||||
if (newState == null) {
|
||||
LOG.debug("Removing cached collection state for [{}]", coll);
|
||||
log.debug("Removing cached collection state for [{}]", coll);
|
||||
watchedCollectionStates.remove(coll);
|
||||
return true;
|
||||
}
|
||||
|
@ -1534,7 +1534,7 @@ public class ZkStateReader implements Closeable {
|
|||
DocCollection oldState = watchedCollectionStates.get(coll);
|
||||
if (oldState == null) {
|
||||
if (watchedCollectionStates.putIfAbsent(coll, newState) == null) {
|
||||
LOG.debug("Add data for [{}] ver [{}]", coll, newState.getZNodeVersion());
|
||||
log.debug("Add data for [{}] ver [{}]", coll, newState.getZNodeVersion());
|
||||
updated = true;
|
||||
break;
|
||||
}
|
||||
|
@ -1546,7 +1546,7 @@ public class ZkStateReader implements Closeable {
|
|||
break;
|
||||
}
|
||||
if (watchedCollectionStates.replace(coll, oldState, newState)) {
|
||||
LOG.debug("Updating data for [{}] from [{}] to [{}]", coll, oldState.getZNodeVersion(), newState.getZNodeVersion());
|
||||
log.debug("Updating data for [{}] from [{}] to [{}]", coll, oldState.getZNodeVersion(), newState.getZNodeVersion());
|
||||
updated = true;
|
||||
break;
|
||||
}
|
||||
|
@ -1556,7 +1556,7 @@ public class ZkStateReader implements Closeable {
|
|||
// Resolve race with unregisterCore.
|
||||
if (!collectionWatches.containsKey(coll)) {
|
||||
watchedCollectionStates.remove(coll);
|
||||
LOG.debug("Removing uninteresting collection [{}]", coll);
|
||||
log.debug("Removing uninteresting collection [{}]", coll);
|
||||
}
|
||||
|
||||
return updated;
|
||||
|
@ -1611,7 +1611,7 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
catch (RejectedExecutionException e) {
|
||||
if (closed == false) {
|
||||
LOG.error("Couldn't run collection notifications for {}", collection, e);
|
||||
log.error("Couldn't run collection notifications for {}", collection, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1643,7 +1643,7 @@ public class ZkStateReader implements Closeable {
|
|||
removeCollectionStateWatcher(collection, watcher);
|
||||
}
|
||||
} catch (Exception exception) {
|
||||
LOG.warn("Error on calling watcher", exception);
|
||||
log.warn("Error on calling watcher", exception);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1684,7 +1684,7 @@ public class ZkStateReader implements Closeable {
|
|||
*/
|
||||
public class AliasesManager implements Watcher { // the holder is a Zk watcher
|
||||
// note: as of this writing, this class if very generic. Is it useful to use for other ZK managed things?
|
||||
private final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private volatile Aliases aliases = Aliases.EMPTY;
|
||||
|
||||
|
@ -1719,7 +1719,7 @@ public class ZkStateReader implements Closeable {
|
|||
Aliases modAliases = op.apply(curAliases);
|
||||
final byte[] modAliasesJson = modAliases.toJSON();
|
||||
if (curAliases == modAliases) {
|
||||
LOG.debug("Current aliases has the desired modification; no further ZK interaction needed.");
|
||||
log.debug("Current aliases has the desired modification; no further ZK interaction needed.");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1729,8 +1729,8 @@ public class ZkStateReader implements Closeable {
|
|||
setIfNewer(Aliases.fromJSON(modAliasesJson, stat.getVersion()));
|
||||
return;
|
||||
} catch (KeeperException.BadVersionException e) {
|
||||
LOG.debug(e.toString(), e);
|
||||
LOG.warn("Couldn't save aliases due to race with another modification; will update and retry until timeout");
|
||||
log.debug(e.toString(), e);
|
||||
log.warn("Couldn't save aliases due to race with another modification; will update and retry until timeout");
|
||||
// considered a backoff here, but we really do want to compete strongly since the normal case is
|
||||
// that we will do one update and succeed. This is left as a hot loop for limited tries intentionally.
|
||||
// More failures than that here probably indicate a bug or a very strange high write frequency usage for
|
||||
|
@ -1758,7 +1758,7 @@ public class ZkStateReader implements Closeable {
|
|||
* @return true if an update was performed
|
||||
*/
|
||||
public boolean update() throws KeeperException, InterruptedException {
|
||||
LOG.debug("Checking ZK for most up to date Aliases {}", ALIASES);
|
||||
log.debug("Checking ZK for most up to date Aliases {}", ALIASES);
|
||||
// Call sync() first to ensure the subsequent read (getData) is up to date.
|
||||
zkClient.getSolrZooKeeper().sync(ALIASES, null, null);
|
||||
Stat stat = new Stat();
|
||||
|
@ -1774,7 +1774,7 @@ public class ZkStateReader implements Closeable {
|
|||
return;
|
||||
}
|
||||
try {
|
||||
LOG.debug("Aliases: updating");
|
||||
log.debug("Aliases: updating");
|
||||
|
||||
// re-register the watch
|
||||
Stat stat = new Stat();
|
||||
|
@ -1783,14 +1783,14 @@ public class ZkStateReader implements Closeable {
|
|||
setIfNewer(Aliases.fromJSON(data, stat.getVersion()));
|
||||
} catch (KeeperException.ConnectionLossException | KeeperException.SessionExpiredException e) {
|
||||
// note: aliases.json is required to be present
|
||||
LOG.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK: [{}]", e.getMessage());
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("A ZK error has occurred", e);
|
||||
log.error("A ZK error has occurred", e);
|
||||
throw new ZooKeeperException(ErrorCode.SERVER_ERROR, "A ZK error has occurred", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
LOG.warn("Interrupted", e);
|
||||
log.warn("Interrupted", e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1804,12 +1804,12 @@ public class ZkStateReader implements Closeable {
|
|||
synchronized (this) {
|
||||
int cmp = Integer.compare(aliases.getZNodeVersion(), newAliases.getZNodeVersion());
|
||||
if (cmp < 0) {
|
||||
LOG.debug("Aliases: cmp={}, new definition is: {}", cmp, newAliases);
|
||||
log.debug("Aliases: cmp={}, new definition is: {}", cmp, newAliases);
|
||||
aliases = newAliases;
|
||||
this.notifyAll();
|
||||
return true;
|
||||
} else {
|
||||
LOG.debug("Aliases: cmp={}, not overwriting ZK version.", cmp);
|
||||
log.debug("Aliases: cmp={}, not overwriting ZK version.", cmp);
|
||||
assert cmp != 0 || Arrays.equals(aliases.toJSON(), newAliases.toJSON()) : aliases + " != " + newAliases;
|
||||
return false;
|
||||
}
|
||||
|
@ -1823,7 +1823,7 @@ public class ZkStateReader implements Closeable {
|
|||
collectionPropsNotifications.submit(new PropsNotification(collection, properties));
|
||||
} catch (RejectedExecutionException e) {
|
||||
if (!closed) {
|
||||
LOG.error("Couldn't run collection properties notifications for {}", collection, e);
|
||||
log.error("Couldn't run collection properties notifications for {}", collection, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class IOUtils {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static void closeQuietly(Closeable closeable) {
|
||||
try {
|
||||
|
@ -31,7 +31,7 @@ public class IOUtils {
|
|||
closeable.close();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.error("Error while closing", e);
|
||||
log.error("Error while closing", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue