SOLR-13339: Prevent recovery, fetching index being kicked off after SolrCores already closed

This commit is contained in:
Cao Manh Dat 2019-04-11 09:21:14 +01:00
parent 02c4503f8c
commit a67a941e19
3 changed files with 43 additions and 48 deletions

View File

@ -172,6 +172,8 @@ Bug Fixes
* SOLR-13388: Fix FileExchangeRateProvider to be a public class, as it appears in schema.xml (Uwe Schindler) * SOLR-13388: Fix FileExchangeRateProvider to be a public class, as it appears in schema.xml (Uwe Schindler)
* SOLR-13339: Prevent recovery, fetching index being kicked off after SolrCores already closed (Cao Manh Dat)
Improvements Improvements
---------------------- ----------------------

View File

@ -563,45 +563,59 @@ public class ZkController implements Closeable {
} }
} }
public void preClose() {
this.isClosed = true;
try {
this.removeEphemeralLiveNode();
} catch (AlreadyClosedException | SessionExpiredException | KeeperException.ConnectionLossException e) {
} catch (Exception e) {
log.warn("Error removing live node. Continuing to close CoreContainer", e);
}
try {
if (getZkClient().getConnectionManager().isConnected()) {
log.info("Publish this node as DOWN...");
publishNodeAsDown(getNodeName());
}
} catch (Exception e) {
log.warn("Error publishing nodes as down. Continuing to close CoreContainer", e);
}
ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("preCloseThreadPool"));
try {
synchronized (collectionToTerms) {
customThreadPool.submit(() -> collectionToTerms.values().parallelStream().forEach(ZkCollectionTerms::close));
}
customThreadPool.submit(() -> replicateFromLeaders.values().parallelStream().forEach(ReplicateFromLeader::stopReplication));
} finally {
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
}
}
/** /**
* Closes the underlying ZooKeeper client. * Closes the underlying ZooKeeper client.
*/ */
public void close() { public void close() {
this.isClosed = true; if (!this.isClosed)
preClose();
ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("closeThreadPool")); ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("closeThreadPool"));
customThreadPool.submit(() -> Collections.singleton(overseerElector.getContext()).parallelStream().forEach(c -> { customThreadPool.submit(() -> Collections.singleton(overseerElector.getContext()).parallelStream().forEach(IOUtils::closeQuietly));
IOUtils.closeQuietly(c);
}));
customThreadPool.submit(() -> Collections.singleton(overseer).parallelStream().forEach(c -> { customThreadPool.submit(() -> Collections.singleton(overseer).parallelStream().forEach(IOUtils::closeQuietly));
IOUtils.closeQuietly(c);
}));
synchronized (collectionToTerms) {
customThreadPool.submit(() -> collectionToTerms.values().parallelStream().forEach(c -> {
c.close();
}));
}
try { try {
customThreadPool.submit(() -> electionContexts.values().parallelStream().forEach(IOUtils::closeQuietly));
customThreadPool.submit(() -> replicateFromLeaders.values().parallelStream().forEach(c -> {
c.stopReplication();
}));
customThreadPool.submit(() -> electionContexts.values().parallelStream().forEach(c -> {
IOUtils.closeQuietly(c);
}));
} finally { } finally {
customThreadPool.submit(() -> Collections.singleton(cloudSolrClient).parallelStream().forEach(c -> { customThreadPool.submit(() -> Collections.singleton(cloudSolrClient).parallelStream().forEach(IOUtils::closeQuietly));
IOUtils.closeQuietly(c); customThreadPool.submit(() -> Collections.singleton(cloudManager).parallelStream().forEach(IOUtils::closeQuietly));
}));
customThreadPool.submit(() -> Collections.singleton(cloudManager).parallelStream().forEach(c -> {
IOUtils.closeQuietly(c);
}));
try { try {
try { try {

View File

@ -114,8 +114,6 @@ import org.apache.solr.util.OrderedExecutor;
import org.apache.solr.util.RefCounted; import org.apache.solr.util.RefCounted;
import org.apache.solr.util.stats.MetricUtils; import org.apache.solr.util.stats.MetricUtils;
import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.ConnectionLossException;
import org.apache.zookeeper.KeeperException.SessionExpiredException;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -887,26 +885,7 @@ public class CoreContainer {
try { try {
if (isZooKeeperAware()) { if (isZooKeeperAware()) {
cancelCoreRecoveries(); cancelCoreRecoveries();
zkSys.zkController.preClose();
if (isZooKeeperAware()) {
cancelCoreRecoveries();
try {
zkSys.zkController.removeEphemeralLiveNode();
} catch (AlreadyClosedException | SessionExpiredException | ConnectionLossException e) {
} catch (Exception e) {
log.warn("Error removing live node. Continuing to close CoreContainer", e);
}
}
try {
if (zkSys.zkController.getZkClient().getConnectionManager().isConnected()) {
log.info("Publish this node as DOWN...");
zkSys.zkController.publishNodeAsDown(zkSys.zkController.getNodeName());
}
} catch (Exception e) {
log.warn("Error publishing nodes as down. Continuing to close CoreContainer", e);
}
} }
ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor); ExecutorUtil.shutdownAndAwaitTermination(coreContainerWorkExecutor);