mirror of https://github.com/apache/lucene.git
SOLR-6677: Some more logging moves from INFO to DEBUG
This commit is contained in:
parent
4c7a8c4b81
commit
255bf895b3
|
@ -71,7 +71,7 @@ public class DeleteReplicaCmd implements Cmd {
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
|
void deleteReplica(ClusterState clusterState, ZkNodeProps message, NamedList results, Runnable onComplete)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
log.info("deleteReplica() : {}", Utils.toJSONString(message));
|
log.debug("deleteReplica() : {}", Utils.toJSONString(message));
|
||||||
boolean parallel = message.getBool("parallel", false);
|
boolean parallel = message.getBool("parallel", false);
|
||||||
|
|
||||||
//If a count is specified the strategy needs be different
|
//If a count is specified the strategy needs be different
|
||||||
|
@ -142,7 +142,7 @@ public class DeleteReplicaCmd implements Cmd {
|
||||||
Set<String> replicas = shardToReplicasMapping.get(shardSlice);
|
Set<String> replicas = shardToReplicasMapping.get(shardSlice);
|
||||||
//callDeleteReplica on all replicas
|
//callDeleteReplica on all replicas
|
||||||
for (String replica: replicas) {
|
for (String replica: replicas) {
|
||||||
log.info("Deleting replica {} for shard {} based on count {}", replica, shardId, count);
|
log.debug("Deleting replica {} for shard {} based on count {}", replica, shardId, count);
|
||||||
deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
|
deleteCore(shardSlice, collectionName, replica, message, shard, results, onComplete, parallel);
|
||||||
}
|
}
|
||||||
results.add("shard_id", shardId);
|
results.add("shard_id", shardId);
|
||||||
|
|
|
@ -81,14 +81,14 @@ public abstract class ElectionContext implements Closeable {
|
||||||
public void cancelElection() throws InterruptedException, KeeperException {
|
public void cancelElection() throws InterruptedException, KeeperException {
|
||||||
if (leaderSeqPath != null) {
|
if (leaderSeqPath != null) {
|
||||||
try {
|
try {
|
||||||
log.info("Canceling election {}", leaderSeqPath);
|
log.debug("Canceling election {}", leaderSeqPath);
|
||||||
zkClient.delete(leaderSeqPath, -1, true);
|
zkClient.delete(leaderSeqPath, -1, true);
|
||||||
} catch (NoNodeException e) {
|
} catch (NoNodeException e) {
|
||||||
// fine
|
// fine
|
||||||
log.info("cancelElection did not find election node to remove {}", leaderSeqPath);
|
log.debug("cancelElection did not find election node to remove {}", leaderSeqPath);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.info("cancelElection skipped as this context has not been initialized");
|
log.debug("cancelElection skipped as this context has not been initialized");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,14 +147,14 @@ class ShardLeaderElectionContextBase extends ElectionContext {
|
||||||
// We do this by using a multi and ensuring the parent znode of the leader registration node
|
// We do this by using a multi and ensuring the parent znode of the leader registration node
|
||||||
// matches the version we expect - there is a setData call that increments the parent's znode
|
// matches the version we expect - there is a setData call that increments the parent's znode
|
||||||
// version whenever a leader registers.
|
// version whenever a leader registers.
|
||||||
log.info("Removing leader registration node on cancel: {} {}", leaderPath, leaderZkNodeParentVersion);
|
log.debug("Removing leader registration node on cancel: {} {}", leaderPath, leaderZkNodeParentVersion);
|
||||||
List<Op> ops = new ArrayList<>(2);
|
List<Op> ops = new ArrayList<>(2);
|
||||||
ops.add(Op.check(new Path(leaderPath).getParent().toString(), leaderZkNodeParentVersion));
|
ops.add(Op.check(new Path(leaderPath).getParent().toString(), leaderZkNodeParentVersion));
|
||||||
ops.add(Op.delete(leaderPath, -1));
|
ops.add(Op.delete(leaderPath, -1));
|
||||||
zkClient.multi(ops, true);
|
zkClient.multi(ops, true);
|
||||||
} catch (KeeperException.NoNodeException nne) {
|
} catch (KeeperException.NoNodeException nne) {
|
||||||
// no problem
|
// no problem
|
||||||
log.info("No leader registration node found to remove: {}", leaderPath);
|
log.debug("No leader registration node found to remove: {}", leaderPath);
|
||||||
} catch (KeeperException.BadVersionException bve) {
|
} catch (KeeperException.BadVersionException bve) {
|
||||||
log.info("Cannot remove leader registration node because the current registered node is not ours: {}", leaderPath);
|
log.info("Cannot remove leader registration node because the current registered node is not ours: {}", leaderPath);
|
||||||
// no problem
|
// no problem
|
||||||
|
@ -181,7 +181,7 @@ class ShardLeaderElectionContextBase extends ElectionContext {
|
||||||
try {
|
try {
|
||||||
RetryUtil.retryOnThrowable(NodeExistsException.class, 60000, 5000, () -> {
|
RetryUtil.retryOnThrowable(NodeExistsException.class, 60000, 5000, () -> {
|
||||||
synchronized (lock) {
|
synchronized (lock) {
|
||||||
log.info("Creating leader registration node {} after winning as {}", leaderPath, leaderSeqPath);
|
log.debug("Creating leader registration node {} after winning as {}", leaderPath, leaderSeqPath);
|
||||||
List<Op> ops = new ArrayList<>(2);
|
List<Op> ops = new ArrayList<>(2);
|
||||||
|
|
||||||
// We use a multi operation to get the parent nodes version, which will
|
// We use a multi operation to get the parent nodes version, which will
|
||||||
|
@ -306,7 +306,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
||||||
|
|
||||||
int leaderVoteWait = cc.getZkController().getLeaderVoteWait();
|
int leaderVoteWait = cc.getZkController().getLeaderVoteWait();
|
||||||
|
|
||||||
log.info("Running the leader process for shard={} and weAreReplacement={} and leaderVoteWait={}", shardId, weAreReplacement, leaderVoteWait);
|
log.debug("Running the leader process for shard={} and weAreReplacement={} and leaderVoteWait={}", shardId, weAreReplacement, leaderVoteWait);
|
||||||
// clear the leader in clusterstate
|
// clear the leader in clusterstate
|
||||||
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(),
|
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.LEADER.toLower(),
|
||||||
ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection);
|
ZkStateReader.SHARD_ID_PROP, shardId, ZkStateReader.COLLECTION_PROP, collection);
|
||||||
|
@ -471,7 +471,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
||||||
: clusterState.getReplica(collection, leaderProps.getStr(ZkStateReader.CORE_NODE_NAME_PROP));
|
: clusterState.getReplica(collection, leaderProps.getStr(ZkStateReader.CORE_NODE_NAME_PROP));
|
||||||
if (rep != null && rep.getState() != Replica.State.ACTIVE
|
if (rep != null && rep.getState() != Replica.State.ACTIVE
|
||||||
&& rep.getState() != Replica.State.RECOVERING) {
|
&& rep.getState() != Replica.State.RECOVERING) {
|
||||||
log.info("We have become the leader after core registration but are not in an ACTIVE state - publishing ACTIVE");
|
log.debug("We have become the leader after core registration but are not in an ACTIVE state - publishing ACTIVE");
|
||||||
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
|
zkController.publish(core.getCoreDescriptor(), Replica.State.ACTIVE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -643,7 +643,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (found >= slices.getReplicasMap().size()) {
|
if (found >= slices.getReplicasMap().size()) {
|
||||||
log.info("All replicas are ready to participate in election.");
|
log.debug("All replicas are ready to participate in election.");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -660,7 +660,7 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
||||||
throws InterruptedException, KeeperException, IOException {
|
throws InterruptedException, KeeperException, IOException {
|
||||||
// remove our ephemeral and re join the election
|
// remove our ephemeral and re join the election
|
||||||
if (cc.isShutDown()) {
|
if (cc.isShutDown()) {
|
||||||
log.info("Not rejoining election because CoreContainer is closed");
|
log.debug("Not rejoining election because CoreContainer is closed");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -674,10 +674,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean shouldIBeLeader(ZkNodeProps leaderProps, SolrCore core, boolean weAreReplacement) {
|
private boolean shouldIBeLeader(ZkNodeProps leaderProps, SolrCore core, boolean weAreReplacement) {
|
||||||
log.info("Checking if I should try and be the leader.");
|
log.debug("Checking if I should try and be the leader.");
|
||||||
|
|
||||||
if (isClosed) {
|
if (isClosed) {
|
||||||
log.info("Bailing on leader process because we have been closed");
|
log.debug("Bailing on leader process because we have been closed");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -688,10 +688,10 @@ final class ShardLeaderElectionContext extends ShardLeaderElectionContextBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (core.getCoreDescriptor().getCloudDescriptor().getLastPublished() == Replica.State.ACTIVE) {
|
if (core.getCoreDescriptor().getCloudDescriptor().getLastPublished() == Replica.State.ACTIVE) {
|
||||||
log.info("My last published State was Active, it's okay to be the leader.");
|
log.debug("My last published State was Active, it's okay to be the leader.");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
log.info("My last published State was "
|
log.debug("My last published State was "
|
||||||
+ core.getCoreDescriptor().getCloudDescriptor().getLastPublished()
|
+ core.getCoreDescriptor().getCloudDescriptor().getLastPublished()
|
||||||
+ ", I won't be the leader.");
|
+ ", I won't be the leader.");
|
||||||
// TODO: and if no one is a good candidate?
|
// TODO: and if no one is a good candidate?
|
||||||
|
|
|
@ -227,7 +227,7 @@ public class Overseer implements Closeable {
|
||||||
while (workQueue.poll() != null);
|
while (workQueue.poll() != null);
|
||||||
|
|
||||||
} catch (KeeperException.BadVersionException bve) {
|
} catch (KeeperException.BadVersionException bve) {
|
||||||
log.warn("Bad version writing to ZK using compare-and-set, will force refresh cluster state", bve);
|
log.warn("Bad version writing to ZK using compare-and-set, will force refresh cluster state: {}", bve.getMessage());
|
||||||
refreshClusterState = true;
|
refreshClusterState = true;
|
||||||
} catch (KeeperException e) {
|
} catch (KeeperException e) {
|
||||||
if (e.code() == KeeperException.Code.SESSIONEXPIRED) {
|
if (e.code() == KeeperException.Code.SESSIONEXPIRED) {
|
||||||
|
|
|
@ -134,7 +134,7 @@ public class OverseerTaskQueue extends DistributedQueue {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// If latchEventType is not null, only fire if the type matches
|
// If latchEventType is not null, only fire if the type matches
|
||||||
LOG.info("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
|
LOG.debug("{} fired on path {} state {} latchEventType {}", event.getType(), event.getPath(), event.getState(), latchEventType);
|
||||||
if (latchEventType == null || event.getType() == latchEventType) {
|
if (latchEventType == null || event.getType() == latchEventType) {
|
||||||
synchronized (lock) {
|
synchronized (lock) {
|
||||||
this.event = event;
|
this.event = event;
|
||||||
|
|
|
@ -174,7 +174,7 @@ public class RecoveryStrategy extends Thread implements Closeable {
|
||||||
+ " from "
|
+ " from "
|
||||||
+ leaderUrl
|
+ leaderUrl
|
||||||
+ " gen:"
|
+ " gen:"
|
||||||
+ core.getDeletionPolicy().getLatestCommit() != null ? "null" : core.getDeletionPolicy().getLatestCommit().getGeneration()
|
+ (core.getDeletionPolicy().getLatestCommit() != null ? "null" : core.getDeletionPolicy().getLatestCommit().getGeneration())
|
||||||
+ " data:" + core.getDataDir()
|
+ " data:" + core.getDataDir()
|
||||||
+ " index:" + core.getIndexDir()
|
+ " index:" + core.getIndexDir()
|
||||||
+ " newIndex:" + core.getNewIndexDir()
|
+ " newIndex:" + core.getNewIndexDir()
|
||||||
|
|
|
@ -2515,7 +2515,7 @@ public class ZkController {
|
||||||
* @param nodeName to operate on
|
* @param nodeName to operate on
|
||||||
*/
|
*/
|
||||||
public void publishNodeAsDown(String nodeName) {
|
public void publishNodeAsDown(String nodeName) {
|
||||||
log.info("Publish node={} as DOWN", nodeName);
|
log.debug("Publish node={} as DOWN", nodeName);
|
||||||
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.DOWNNODE.toLower(),
|
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.DOWNNODE.toLower(),
|
||||||
ZkStateReader.NODE_NAME_PROP, nodeName);
|
ZkStateReader.NODE_NAME_PROP, nodeName);
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -46,7 +46,7 @@ public class NodeMutator {
|
||||||
List<ZkWriteCommand> zkWriteCommands = new ArrayList<ZkWriteCommand>();
|
List<ZkWriteCommand> zkWriteCommands = new ArrayList<ZkWriteCommand>();
|
||||||
String nodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
|
String nodeName = message.getStr(ZkStateReader.NODE_NAME_PROP);
|
||||||
|
|
||||||
log.info("DownNode state invoked for node: " + nodeName);
|
log.debug("DownNode state invoked for node: " + nodeName);
|
||||||
|
|
||||||
Map<String, DocCollection> collections = clusterState.getCollectionsMap();
|
Map<String, DocCollection> collections = clusterState.getCollectionsMap();
|
||||||
for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
|
for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
|
||||||
|
@ -62,7 +62,7 @@ public class NodeMutator {
|
||||||
Map<String,Object> props = replica.shallowCopy();
|
Map<String,Object> props = replica.shallowCopy();
|
||||||
String rNodeName = replica.getNodeName();
|
String rNodeName = replica.getNodeName();
|
||||||
if (rNodeName.equals(nodeName)) {
|
if (rNodeName.equals(nodeName)) {
|
||||||
log.info("Update replica state for " + replica + " to " + Replica.State.DOWN.toString());
|
log.debug("Update replica state for " + replica + " to " + Replica.State.DOWN.toString());
|
||||||
props.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
|
props.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -156,7 +156,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
log.info("Closing " + this.getClass().getSimpleName() + " - " + byDirectoryCache.size() + " directories currently being tracked");
|
log.debug("Closing {} - {} directories currently being tracked", this.getClass().getSimpleName(), byDirectoryCache.size());
|
||||||
this.closed = true;
|
this.closed = true;
|
||||||
Collection<CacheValue> values = byDirectoryCache.values();
|
Collection<CacheValue> values = byDirectoryCache.values();
|
||||||
for (CacheValue val : values) {
|
for (CacheValue val : values) {
|
||||||
|
@ -201,7 +201,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (CacheValue val : removeEntries) {
|
for (CacheValue val : removeEntries) {
|
||||||
log.info("Removing directory after core close: " + val.path);
|
log.debug("Removing directory after core close: " + val.path);
|
||||||
try {
|
try {
|
||||||
removeDirectory(val);
|
removeDirectory(val);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -224,7 +224,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
||||||
// be sure this is called with the this sync lock
|
// be sure this is called with the this sync lock
|
||||||
// returns true if we closed the cacheValue, false if it will be closed later
|
// returns true if we closed the cacheValue, false if it will be closed later
|
||||||
private boolean closeCacheValue(CacheValue cacheValue) {
|
private boolean closeCacheValue(CacheValue cacheValue) {
|
||||||
log.info("looking to close " + cacheValue.path + " " + cacheValue.closeEntries.toString());
|
log.debug("looking to close {} {}", cacheValue.path, cacheValue.closeEntries.toString());
|
||||||
List<CloseListener> listeners = closeListeners.remove(cacheValue.directory);
|
List<CloseListener> listeners = closeListeners.remove(cacheValue.directory);
|
||||||
if (listeners != null) {
|
if (listeners != null) {
|
||||||
for (CloseListener listener : listeners) {
|
for (CloseListener listener : listeners) {
|
||||||
|
@ -268,11 +268,11 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
||||||
|
|
||||||
for (CacheValue val : cacheValue.removeEntries) {
|
for (CacheValue val : cacheValue.removeEntries) {
|
||||||
if (!val.deleteAfterCoreClose) {
|
if (!val.deleteAfterCoreClose) {
|
||||||
log.info("Removing directory before core close: " + val.path);
|
log.debug("Removing directory before core close: " + val.path);
|
||||||
try {
|
try {
|
||||||
removeDirectory(val);
|
removeDirectory(val);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
SolrException.log(log, "Error removing directory", e);
|
SolrException.log(log, "Error removing directory " + val.path + " before core close", e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
removeEntries.add(val);
|
removeEntries.add(val);
|
||||||
|
@ -292,13 +292,13 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void close(CacheValue val) {
|
private void close(CacheValue val) {
|
||||||
log.info("Closing directory, CoreContainer#isShutdown={}", coreContainer != null ? coreContainer.isShutDown() : "null");
|
log.debug("Closing directory, CoreContainer#isShutdown={}", coreContainer != null ? coreContainer.isShutDown() : "null");
|
||||||
try {
|
try {
|
||||||
if (coreContainer != null && coreContainer.isShutDown() && val.directory instanceof ShutdownAwareDirectory) {
|
if (coreContainer != null && coreContainer.isShutDown() && val.directory instanceof ShutdownAwareDirectory) {
|
||||||
log.info("Closing directory on shutdown: " + val.path);
|
log.debug("Closing directory on shutdown: " + val.path);
|
||||||
((ShutdownAwareDirectory) val.directory).closeOnShutdown();
|
((ShutdownAwareDirectory) val.directory).closeOnShutdown();
|
||||||
} else {
|
} else {
|
||||||
log.info("Closing directory: " + val.path);
|
log.debug("Closing directory: " + val.path);
|
||||||
val.directory.close();
|
val.directory.close();
|
||||||
}
|
}
|
||||||
assert ObjectReleaseTracker.release(val.directory);
|
assert ObjectReleaseTracker.release(val.directory);
|
||||||
|
@ -350,7 +350,7 @@ public abstract class CachingDirectoryFactory extends DirectoryFactory {
|
||||||
CacheValue newCacheValue = new CacheValue(fullPath, directory);
|
CacheValue newCacheValue = new CacheValue(fullPath, directory);
|
||||||
byDirectoryCache.put(directory, newCacheValue);
|
byDirectoryCache.put(directory, newCacheValue);
|
||||||
byPathCache.put(fullPath, newCacheValue);
|
byPathCache.put(fullPath, newCacheValue);
|
||||||
log.debug("return new directory for " + fullPath);
|
log.debug("return new directory for {}", fullPath);
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (!success) {
|
if (!success) {
|
||||||
|
|
|
@ -2068,7 +2068,7 @@ public final class SolrCore implements SolrInfoMBean, Closeable {
|
||||||
|
|
||||||
|
|
||||||
public void closeSearcher() {
|
public void closeSearcher() {
|
||||||
log.info(logid+"Closing main searcher on request.");
|
log.debug(logid+"Closing main searcher on request.");
|
||||||
synchronized (searcherLock) {
|
synchronized (searcherLock) {
|
||||||
if (realtimeSearcher != null) {
|
if (realtimeSearcher != null) {
|
||||||
realtimeSearcher.decref();
|
realtimeSearcher.decref();
|
||||||
|
|
|
@ -77,14 +77,10 @@ public class SolrDeletionPolicy extends IndexDeletionPolicy implements NamedList
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void onInit(List<? extends IndexCommit> commits) throws IOException {
|
public void onInit(List<? extends IndexCommit> commits) throws IOException {
|
||||||
// SOLR-4547: log basic data at INFO, add filenames at DEBUG.
|
|
||||||
if (commits.isEmpty()) {
|
if (commits.isEmpty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
log.info("SolrDeletionPolicy.onInit: commits: {}",
|
log.debug("SolrDeletionPolicy.onInit: commits: {}", new CommitsLoggingDebug(commits));
|
||||||
new CommitsLoggingInfo(commits));
|
|
||||||
log.debug("SolrDeletionPolicy.onInit: commits: {}",
|
|
||||||
new CommitsLoggingDebug(commits));
|
|
||||||
updateCommits(commits);
|
updateCommits(commits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,11 +89,7 @@ public class SolrDeletionPolicy extends IndexDeletionPolicy implements NamedList
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
|
public void onCommit(List<? extends IndexCommit> commits) throws IOException {
|
||||||
// SOLR-4547: log basic data at INFO, add filenames at DEBUG.
|
log.debug("SolrDeletionPolicy.onCommit: commits: {}", new CommitsLoggingDebug(commits));
|
||||||
log.info("SolrDeletionPolicy.onCommit: commits: {}",
|
|
||||||
new CommitsLoggingInfo(commits));
|
|
||||||
log.debug("SolrDeletionPolicy.onCommit: commits: {}",
|
|
||||||
new CommitsLoggingDebug(commits));
|
|
||||||
updateCommits(commits);
|
updateCommits(commits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,10 +150,7 @@ public class SolrDeletionPolicy extends IndexDeletionPolicy implements NamedList
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
long maxCommitAgeTimeStamp = -1L;
|
long maxCommitAgeTimeStamp = -1L;
|
||||||
IndexCommit newest = commits.get(commits.size() - 1);
|
IndexCommit newest = commits.get(commits.size() - 1);
|
||||||
// SOLR-4547: Removed the filenames from this log entry because this
|
log.debug("newest commit generation = " + newest.getGeneration());
|
||||||
// method is only called from methods that have just logged them
|
|
||||||
// at DEBUG.
|
|
||||||
log.info("newest commit generation = " + newest.getGeneration());
|
|
||||||
int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
|
int singleSegKept = (newest.getSegmentCount() == 1) ? 1 : 0;
|
||||||
int totalKept = 1;
|
int totalKept = 1;
|
||||||
|
|
||||||
|
|
|
@ -82,12 +82,12 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
|
||||||
|
|
||||||
private void closeIndexWriter(IndexWriterCloser closer) {
|
private void closeIndexWriter(IndexWriterCloser closer) {
|
||||||
try {
|
try {
|
||||||
log.info("SolrCoreState ref count has reached 0 - closing IndexWriter");
|
log.debug("SolrCoreState ref count has reached 0 - closing IndexWriter");
|
||||||
if (closer != null) {
|
if (closer != null) {
|
||||||
log.info("closing IndexWriter with IndexWriterCloser");
|
log.debug("closing IndexWriter with IndexWriterCloser");
|
||||||
closer.closeWriter(indexWriter);
|
closer.closeWriter(indexWriter);
|
||||||
} else if (indexWriter != null) {
|
} else if (indexWriter != null) {
|
||||||
log.info("closing IndexWriter...");
|
log.debug("closing IndexWriter...");
|
||||||
indexWriter.close();
|
indexWriter.close();
|
||||||
}
|
}
|
||||||
indexWriter = null;
|
indexWriter = null;
|
||||||
|
@ -184,14 +184,14 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
|
||||||
if (iw != null) {
|
if (iw != null) {
|
||||||
if (!rollback) {
|
if (!rollback) {
|
||||||
try {
|
try {
|
||||||
log.info("Closing old IndexWriter... core=" + coreName);
|
log.debug("Closing old IndexWriter... core=" + coreName);
|
||||||
iw.close();
|
iw.close();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
SolrException.log(log, "Error closing old IndexWriter. core=" + coreName, e);
|
SolrException.log(log, "Error closing old IndexWriter. core=" + coreName, e);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
log.info("Rollback old IndexWriter... core=" + coreName);
|
log.debug("Rollback old IndexWriter... core=" + coreName);
|
||||||
iw.rollback();
|
iw.rollback();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
SolrException.log(log, "Error rolling back old IndexWriter. core=" + coreName, e);
|
SolrException.log(log, "Error rolling back old IndexWriter. core=" + coreName, e);
|
||||||
|
@ -362,12 +362,15 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
|
||||||
/** called from recoveryStrat on a successful recovery */
|
/** called from recoveryStrat on a successful recovery */
|
||||||
@Override
|
@Override
|
||||||
public void recovered() {
|
public void recovered() {
|
||||||
|
recoveryStrat = null;
|
||||||
recoveringAfterStartup = false; // once we have successfully recovered, we no longer need to act as if we are recovering after startup
|
recoveringAfterStartup = false; // once we have successfully recovered, we no longer need to act as if we are recovering after startup
|
||||||
}
|
}
|
||||||
|
|
||||||
/** called from recoveryStrat on a failed recovery */
|
/** called from recoveryStrat on a failed recovery */
|
||||||
@Override
|
@Override
|
||||||
public void failed() {}
|
public void failed() {
|
||||||
|
recoveryStrat = null;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized void close(IndexWriterCloser closer) {
|
public synchronized void close(IndexWriterCloser closer) {
|
||||||
|
|
|
@ -729,7 +729,7 @@ public class DirectUpdateHandler2 extends UpdateHandler implements SolrCoreState
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
log.info("closing " + this);
|
log.debug("closing " + this);
|
||||||
|
|
||||||
commitTracker.close();
|
commitTracker.close();
|
||||||
softCommitTracker.close();
|
softCommitTracker.close();
|
||||||
|
|
|
@ -70,7 +70,7 @@ public abstract class SolrCoreState {
|
||||||
|
|
||||||
if (close) {
|
if (close) {
|
||||||
try {
|
try {
|
||||||
log.info("Closing SolrCoreState");
|
log.debug("Closing SolrCoreState");
|
||||||
close(closer);
|
close(closer);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.error("Error closing SolrCoreState", e);
|
log.error("Error closing SolrCoreState", e);
|
||||||
|
|
Loading…
Reference in New Issue