mirror of https://github.com/apache/lucene.git
SOLR-7854: Remove unused ZkStateReader.updateClusterState(false) method
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1693681 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
7b412fdc63
commit
2a8bef841a
|
@ -387,6 +387,8 @@ Other Changes
|
|||
* SOLR-7823: TestMiniSolrCloudCluster.testCollectionCreateSearchDelete async collection-creation (sometimes)
|
||||
(Christine Poerschke)
|
||||
|
||||
* SOLR-7854: Remove unused ZkStateReader.updateClusterState(false) method. (Scott Blum via shalin)
|
||||
|
||||
================== 5.2.1 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release
|
||||
|
|
|
@ -646,7 +646,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
|
||||
Thread.sleep(200);
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
}
|
||||
|
||||
if (TEST_NIGHTLY) {
|
||||
|
|
|
@ -158,7 +158,7 @@ public class LeaderInitiatedRecoveryThread extends Thread {
|
|||
// see if the replica's node is still live, if not, no need to keep doing this loop
|
||||
ZkStateReader zkStateReader = zkController.getZkStateReader();
|
||||
try {
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
} catch (Exception exc) {
|
||||
log.warn("Error when updating cluster state: "+exc);
|
||||
}
|
||||
|
|
|
@ -151,7 +151,7 @@ public class Overseer implements Closeable {
|
|||
|
||||
if (refreshClusterState) {
|
||||
try {
|
||||
reader.updateClusterState(true);
|
||||
reader.updateClusterState();
|
||||
clusterState = reader.getClusterState();
|
||||
refreshClusterState = false;
|
||||
|
||||
|
|
|
@ -571,7 +571,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
|
|||
NamedList results = new NamedList();
|
||||
try {
|
||||
// force update the cluster state
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
CollectionParams.CollectionAction action = CollectionParams.CollectionAction.get(operation);
|
||||
if (action == null) {
|
||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Unknown operation:" + operation);
|
||||
|
@ -1807,7 +1807,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
|
|||
return;
|
||||
}
|
||||
Thread.sleep(1000);
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
}
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR,
|
||||
"Could not find new slice " + sliceName + " in collection " + collectionName
|
||||
|
|
|
@ -935,7 +935,7 @@ public final class ZkController {
|
|||
}
|
||||
|
||||
// make sure we have an update cluster state right away
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
return shardId;
|
||||
} finally {
|
||||
MDCLoggingContext.clear();
|
||||
|
|
|
@ -357,7 +357,7 @@ public class CdcrRequestHandler extends RequestHandlerBase implements SolrCoreAw
|
|||
throws IOException, SolrServerException {
|
||||
ZkController zkController = core.getCoreDescriptor().getCoreContainer().getZkController();
|
||||
try {
|
||||
zkController.getZkStateReader().updateClusterState(true);
|
||||
zkController.getZkStateReader().updateClusterState();
|
||||
} catch (Exception e) {
|
||||
log.warn("Error when updating cluster state", e);
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ public class ClusterStatus {
|
|||
@SuppressWarnings("unchecked")
|
||||
public void getClusterStatus(NamedList results)
|
||||
throws KeeperException, InterruptedException {
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
|
||||
|
||||
// read aliases
|
||||
|
|
|
@ -947,7 +947,7 @@ public class CoreAdminHandler extends RequestHandlerBase {
|
|||
waitForState + "; forcing ClusterState update from ZooKeeper");
|
||||
|
||||
// force a cluster state update
|
||||
coreContainer.getZkController().getZkStateReader().updateClusterState(true);
|
||||
coreContainer.getZkController().getZkStateReader().updateClusterState();
|
||||
}
|
||||
|
||||
if (maxTries == 0) {
|
||||
|
|
|
@ -74,7 +74,7 @@ class RebalanceLeaders {
|
|||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
|
||||
String.format(Locale.ROOT, "The " + COLLECTION_PROP + " is required for the REASSIGNLEADERS command."));
|
||||
}
|
||||
coreContainer.getZkController().getZkStateReader().updateClusterState(true);
|
||||
coreContainer.getZkController().getZkStateReader().updateClusterState();
|
||||
ClusterState clusterState = coreContainer.getZkController().getClusterState();
|
||||
DocCollection dc = clusterState.getCollection(collectionName);
|
||||
if (dc == null) {
|
||||
|
|
|
@ -598,7 +598,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
|
|||
try {
|
||||
cloudClient.connect();
|
||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
DocCollection coll = clusterState.getCollection(collection);
|
||||
|
||||
|
|
|
@ -517,7 +517,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
Thread.sleep(5000);
|
||||
ChaosMonkey.start(cloudJettys.get(0).jetty);
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
try {
|
||||
cloudClient.getZkStateReader().getLeaderRetry("multiunload2", "shard1", 30000);
|
||||
} catch (SolrException e) {
|
||||
|
@ -800,7 +800,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
// we added a role of none on these creates - check for it
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(oneInstanceCollection2);
|
||||
assertNotNull(slices);
|
||||
String roles = slices.get("slice1").getReplicasMap().values().iterator().next().getStr(ZkStateReader.ROLES_PROP);
|
||||
|
|
|
@ -212,7 +212,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
|
||||
// TODO: assert we didnt kill everyone
|
||||
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
assertTrue(zkStateReader.getClusterState().getLiveNodes().size() > 0);
|
||||
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
|
|||
for (int i = 0; i < 30; i++) {
|
||||
Thread.sleep(3000);
|
||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
DocCollection collection1 = clusterState.getCollection("collection1");
|
||||
Slice slice = collection1.getSlice("shard1");
|
||||
|
|
|
@ -103,7 +103,7 @@ public class CollectionReloadTest extends AbstractFullDistribZkTestBase {
|
|||
timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutSecs, TimeUnit.SECONDS);
|
||||
while (System.nanoTime() < timeout) {
|
||||
// state of leader should be active after session loss recovery - see SOLR-7338
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
ClusterState cs = cloudClient.getZkStateReader().getClusterState();
|
||||
Slice slice = cs.getSlice(testCollectionName, shardId);
|
||||
replicaState = slice.getReplica(leader.getName()).getStr(ZkStateReader.STATE_PROP);
|
||||
|
|
|
@ -98,7 +98,7 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase
|
|||
assertEquals(0, response.getStatus());
|
||||
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1");
|
||||
|
||||
Replica rep = null;
|
||||
|
@ -195,7 +195,7 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase
|
|||
// And finally, insure that there are all the replcias we expect. We should have shards 1, 2 and 4 and each
|
||||
// should have exactly two replicas
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
Map<String, Slice> slices = zkStateReader.getClusterState().getSlicesMap(collectionName);
|
||||
assertEquals("There should be exaclty four slices", slices.size(), 4);
|
||||
assertNotNull("shardstart should exist", slices.get("shardstart"));
|
||||
|
@ -276,7 +276,7 @@ public class CollectionTooManyReplicasTest extends AbstractFullDistribZkTestBase
|
|||
|
||||
private List<String> getAllNodeNames(String collectionName) throws KeeperException, InterruptedException {
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
Slice slice = zkStateReader.getClusterState().getSlicesMap(collectionName).get("shard1");
|
||||
|
||||
List<String> nodes = new ArrayList<>();
|
||||
|
|
|
@ -288,7 +288,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
}
|
||||
|
||||
Thread.sleep(200);
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
}
|
||||
|
||||
assertFalse("Still found collection that should be gone", cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2"));
|
||||
|
@ -461,7 +461,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
private void testNoCollectionSpecified() throws Exception {
|
||||
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
|
||||
assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
|
||||
|
||||
|
@ -485,13 +485,13 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
makeRequest(getBaseUrl((HttpSolrClient) clients.get(1)), createCmd);
|
||||
|
||||
// in both cases, the collection should have default to the core name
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
|
||||
assertTrue(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
|
||||
}
|
||||
|
||||
private void testNoConfigSetExist() throws Exception {
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection3"));
|
||||
|
||||
// try and create a SolrCore with no collection name
|
||||
|
@ -512,7 +512,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
assertTrue(gotExp);
|
||||
TimeUnit.MILLISECONDS.sleep(200);
|
||||
// in both cases, the collection should have default to the core name
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
|
||||
Collection<Slice> slices = cloudClient.getZkStateReader().getClusterState().getActiveSlices("corewithnocollection3");
|
||||
assertNull(slices);
|
||||
|
|
|
@ -405,7 +405,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
int attempts = 0;
|
||||
while (true) {
|
||||
if (attempts > 30) fail("Not enough active replicas in the shard 'x'");
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
attempts++;
|
||||
replicaCount = zkStateReader.getClusterState().getSlice(collectionName, "x").getReplicas().size();
|
||||
if (replicaCount >= 1) break;
|
||||
|
|
|
@ -90,7 +90,7 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
|
|||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
int counter = 10;
|
||||
while (counter-- > 0) {
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
clusterState = zkStateReader.getClusterState();
|
||||
if (clusterState.getSlice("collection1", shard) == null) {
|
||||
break;
|
||||
|
@ -136,7 +136,7 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
|
|||
boolean transition = false;
|
||||
|
||||
for (int counter = 10; counter > 0; counter--) {
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
State sliceState = clusterState.getSlice("collection1", slice).getState();
|
||||
if (sliceState == state) {
|
||||
|
|
|
@ -523,7 +523,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
long startMs = System.currentTimeMillis();
|
||||
|
||||
ZkStateReader zkr = cloudClient.getZkStateReader();
|
||||
zkr.updateClusterState(true); // force the state to be fresh
|
||||
zkr.updateClusterState(); // force the state to be fresh
|
||||
|
||||
ClusterState cs = zkr.getClusterState();
|
||||
Collection<Slice> slices = cs.getActiveSlices(testCollectionName);
|
||||
|
@ -533,7 +533,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
while (waitMs < maxWaitMs && !allReplicasUp) {
|
||||
// refresh state every 2 secs
|
||||
if (waitMs % 2000 == 0)
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
|
||||
cs = cloudClient.getZkStateReader().getClusterState();
|
||||
assertNotNull(cs);
|
||||
|
|
|
@ -155,7 +155,7 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest {
|
|||
|
||||
long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS);
|
||||
while (System.nanoTime() < timeout) {
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
|
||||
List<Replica> activeReps = getActiveOrRecoveringReplicas(testCollectionName, "shard1");
|
||||
if (activeReps.size() >= 2) break;
|
||||
|
|
|
@ -75,7 +75,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
|
|||
|
||||
Thread.sleep(sleepMsBeforeHealPartition);
|
||||
|
||||
cloudClient.getZkStateReader().updateClusterState(true); // get the latest state
|
||||
cloudClient.getZkStateReader().updateClusterState(); // get the latest state
|
||||
leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
|
||||
assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState());
|
||||
|
||||
|
@ -123,7 +123,7 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
|
|||
sendCommitWithRetry(replica);
|
||||
Thread.sleep(sleepMsBeforeHealPartition);
|
||||
|
||||
cloudClient.getZkStateReader().updateClusterState(true); // get the latest state
|
||||
cloudClient.getZkStateReader().updateClusterState(); // get the latest state
|
||||
leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1");
|
||||
assertSame("Leader was not active", Replica.State.ACTIVE, leader.getState());
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
|||
ClusterState state;Slice slice;
|
||||
boolean ruleRemoved = false;
|
||||
while (System.currentTimeMillis() - finishTime < 60000) {
|
||||
getCommonCloudSolrClient().getZkStateReader().updateClusterState(true);
|
||||
getCommonCloudSolrClient().getZkStateReader().updateClusterState();
|
||||
state = getCommonCloudSolrClient().getZkStateReader().getClusterState();
|
||||
slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
|
||||
Map<String,RoutingRule> routingRules = slice.getRoutingRules();
|
||||
|
@ -179,7 +179,7 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
|||
log.info("Response from target collection: " + response);
|
||||
assertEquals("DocCount on target collection does not match", splitKeyCount[0], response.getResults().getNumFound());
|
||||
|
||||
getCommonCloudSolrClient().getZkStateReader().updateClusterState(true);
|
||||
getCommonCloudSolrClient().getZkStateReader().updateClusterState();
|
||||
ClusterState state = getCommonCloudSolrClient().getZkStateReader().getClusterState();
|
||||
Slice slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
|
||||
assertNotNull("Routing rule map is null", slice.getRoutingRules());
|
||||
|
|
|
@ -253,7 +253,7 @@ public class OverseerCollectionProcessorTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
}).anyTimes();
|
||||
|
||||
zkStateReaderMock.updateClusterState(anyBoolean());
|
||||
zkStateReaderMock.updateClusterState();
|
||||
|
||||
clusterStateMock.getCollections();
|
||||
expectLastCall().andAnswer(new IAnswer<Object>() {
|
||||
|
|
|
@ -422,7 +422,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
int cloudStateSliceCount = 0;
|
||||
for (int i = 0; i < 40; i++) {
|
||||
cloudStateSliceCount = 0;
|
||||
reader.updateClusterState(true);
|
||||
reader.updateClusterState();
|
||||
ClusterState state = reader.getClusterState();
|
||||
Map<String,Slice> slices = state.getSlicesMap("collection1");
|
||||
if (slices != null) {
|
||||
|
@ -507,7 +507,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
private void waitForCollections(ZkStateReader stateReader, String... collections) throws InterruptedException, KeeperException {
|
||||
int maxIterations = 100;
|
||||
while (0 < maxIterations--) {
|
||||
stateReader.updateClusterState(true);
|
||||
stateReader.updateClusterState();
|
||||
final ClusterState state = stateReader.getClusterState();
|
||||
Set<String> availableCollections = state.getCollections();
|
||||
int availableCount = 0;
|
||||
|
@ -604,7 +604,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
private void verifyShardLeader(ZkStateReader reader, String collection, String shard, String expectedCore) throws InterruptedException, KeeperException {
|
||||
int maxIterations = 200;
|
||||
while(maxIterations-->0) {
|
||||
reader.updateClusterState(true); // poll state
|
||||
reader.updateClusterState(); // poll state
|
||||
ZkNodeProps props = reader.getClusterState().getLeader(collection, shard);
|
||||
if(props!=null) {
|
||||
if(expectedCore.equals(props.getStr(ZkStateReader.CORE_NAME_PROP))) {
|
||||
|
@ -895,7 +895,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
|
||||
while (version == getClusterStateVersion(controllerClient));
|
||||
|
||||
reader.updateClusterState(true);
|
||||
reader.updateClusterState();
|
||||
ClusterState state = reader.getClusterState();
|
||||
|
||||
int numFound = 0;
|
||||
|
@ -1036,7 +1036,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
assertTrue(overseers.size() > 0);
|
||||
|
||||
while (true) {
|
||||
reader.updateClusterState(true);
|
||||
reader.updateClusterState();
|
||||
ClusterState state = reader.getClusterState();
|
||||
if (state.hasCollection("perf_sentinel")) {
|
||||
break;
|
||||
|
|
|
@ -57,7 +57,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
|
|||
ClusterState clusterState = null;
|
||||
Replica replica = null;
|
||||
for (int idx = 0; idx < 300; ++idx) {
|
||||
client.getZkStateReader().updateClusterState(true);
|
||||
client.getZkStateReader().updateClusterState();
|
||||
clusterState = client.getZkStateReader().getClusterState();
|
||||
replica = clusterState.getReplica(collectionName, replicaName);
|
||||
if (replica == null) {
|
||||
|
@ -83,7 +83,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
|
|||
ClusterState clusterState = null;
|
||||
|
||||
for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
|
||||
client.getZkStateReader().updateClusterState(true);
|
||||
client.getZkStateReader().updateClusterState();
|
||||
clusterState = client.getZkStateReader().getClusterState();
|
||||
replica = clusterState.getReplica(collectionName, replicaName);
|
||||
if (replica == null) {
|
||||
|
@ -117,7 +117,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
|
|||
|
||||
DocCollection col = null;
|
||||
for (int idx = 0; idx < 300; ++idx) {
|
||||
client.getZkStateReader().updateClusterState(true);
|
||||
client.getZkStateReader().updateClusterState();
|
||||
ClusterState clusterState = client.getZkStateReader().getClusterState();
|
||||
|
||||
col = clusterState.getCollection(collectionName);
|
||||
|
|
|
@ -385,7 +385,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
int i = 0;
|
||||
for (i = 0; i < 10; i++) {
|
||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
clusterState = zkStateReader.getClusterState();
|
||||
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
|
||||
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
|
||||
|
|
|
@ -219,7 +219,7 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase {
|
|||
for (int i = 0; i < 60; i++) {
|
||||
Thread.sleep(3000);
|
||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
DocCollection collection1 = clusterState.getCollection("collection1");
|
||||
Slice slice = collection1.getSlice("shard1");
|
||||
|
|
|
@ -596,7 +596,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
|
||||
throws KeeperException, InterruptedException {
|
||||
|
||||
client.getZkStateReader().updateClusterState(true);
|
||||
client.getZkStateReader().updateClusterState();
|
||||
ClusterState clusterState = client.getZkStateReader().getClusterState();
|
||||
Replica replica = clusterState.getReplica(collectionName, replicaName);
|
||||
if (replica == null) {
|
||||
|
|
|
@ -152,7 +152,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
|||
assertEquals(1, rsp.getResults().getNumFound());
|
||||
|
||||
// remove a server not hosting any replicas
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
HashMap<String, JettySolrRunner> jettyMap = new HashMap<String, JettySolrRunner>();
|
||||
for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) {
|
||||
|
|
|
@ -76,7 +76,7 @@ public class TestRandomRequestDistribution extends AbstractFullDistribZkTestBase
|
|||
waitForRecoveriesToFinish("a1x2", true);
|
||||
waitForRecoveriesToFinish("b1x1", true);
|
||||
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
|
||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
||||
DocCollection b1x1 = clusterState.getCollection("b1x1");
|
||||
|
|
|
@ -302,7 +302,7 @@ public class TestRebalanceLeaders extends AbstractFullDistribZkTestBase {
|
|||
long start = System.currentTimeMillis();
|
||||
while (System.currentTimeMillis() - start < timeoutMs) {
|
||||
goAgain = false;
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
Map<String, Slice> slices = cloudClient.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
|
||||
|
||||
for (Map.Entry<String, Replica> ent : expected.entrySet()) {
|
||||
|
|
|
@ -194,7 +194,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
|
|||
String lastFailMsg = "";
|
||||
for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
|
||||
lastFailMsg = "";
|
||||
client.getZkStateReader().updateClusterState(true);
|
||||
client.getZkStateReader().updateClusterState();
|
||||
ClusterState clusterState = client.getZkStateReader().getClusterState();
|
||||
for (Slice slice : clusterState.getSlices(collectionName)) {
|
||||
Boolean foundLeader = false;
|
||||
|
|
|
@ -163,7 +163,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
}
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
|
||||
int slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
|
||||
assertEquals(1, slices);
|
||||
|
@ -179,7 +179,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
createCmd.setDataDir(getDataDir(core2dataDir));
|
||||
adminClient.request(createCmd);
|
||||
}
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
|
||||
assertEquals(1, slices);
|
||||
|
||||
|
|
|
@ -355,7 +355,7 @@ public class ZkControllerTest extends SolrTestCaseJ4 {
|
|||
byte[] bytes = Utils.toJSON(state);
|
||||
zkController.getZkClient().makePath(ZkStateReader.getCollectionPath("testPublishAndWaitForDownStates"), bytes, CreateMode.PERSISTENT, true);
|
||||
|
||||
zkController.getZkStateReader().updateClusterState(true);
|
||||
zkController.getZkStateReader().updateClusterState();
|
||||
assertTrue(zkController.getZkStateReader().getClusterState().hasCollection("testPublishAndWaitForDownStates"));
|
||||
assertNotNull(zkController.getZkStateReader().getClusterState().getCollection("testPublishAndWaitForDownStates"));
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
|
|||
|
||||
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
|
||||
cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
|
||||
for (int i = 1; i < nShards + 1; i++) {
|
||||
cloudClient.getZkStateReader().getLeaderRetry(DELETE_DATA_DIR_COLLECTION, "shard" + i, 30000);
|
||||
|
@ -210,7 +210,7 @@ public class StressHdfsTest extends BasicDistributedZkTest {
|
|||
}
|
||||
|
||||
Thread.sleep(200);
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
}
|
||||
|
||||
// check that all dirs are gone
|
||||
|
|
|
@ -94,7 +94,6 @@ public class ZkStateReader implements Closeable {
|
|||
|
||||
protected volatile ClusterState clusterState;
|
||||
|
||||
private static final long SOLRCLOUD_UPDATE_DELAY = Long.parseLong(System.getProperty("solrcloud.update.delay", "5000"));
|
||||
private static final int GET_LEADER_RETRY_INTERVAL_MS = 50;
|
||||
private static final int GET_LEADER_RETRY_DEFAULT_TIMEOUT = 4000;
|
||||
|
||||
|
@ -173,9 +172,6 @@ public class ZkStateReader implements Closeable {
|
|||
return td;
|
||||
}
|
||||
}
|
||||
private ScheduledExecutorService updateCloudExecutor = Executors.newScheduledThreadPool(1, new ZKTF());
|
||||
|
||||
private boolean clusterStateUpdateScheduled;
|
||||
|
||||
private final SolrZkClient zkClient;
|
||||
|
||||
|
@ -225,13 +221,13 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
|
||||
// load and publish a new CollectionInfo
|
||||
public void updateClusterState(boolean immediate) throws KeeperException, InterruptedException {
|
||||
updateClusterState(immediate, false);
|
||||
public void updateClusterState() throws KeeperException, InterruptedException {
|
||||
updateClusterState(false);
|
||||
}
|
||||
|
||||
// load and publish a new CollectionInfo
|
||||
public void updateLiveNodes() throws KeeperException, InterruptedException {
|
||||
updateClusterState(true, true);
|
||||
updateClusterState(true);
|
||||
}
|
||||
|
||||
public Aliases getAliases() {
|
||||
|
@ -485,90 +481,20 @@ public class ZkStateReader implements Closeable {
|
|||
}
|
||||
|
||||
// load and publish a new CollectionInfo
|
||||
private synchronized void updateClusterState(boolean immediate,
|
||||
final boolean onlyLiveNodes) throws KeeperException,
|
||||
InterruptedException {
|
||||
private void updateClusterState(boolean onlyLiveNodes) throws KeeperException, InterruptedException {
|
||||
// build immutable CloudInfo
|
||||
|
||||
if (immediate) {
|
||||
ClusterState clusterState;
|
||||
synchronized (getUpdateLock()) {
|
||||
List<String> liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE, null, true);
|
||||
Set<String> liveNodesSet = new HashSet<>(liveNodes);
|
||||
|
||||
if (!onlyLiveNodes) {
|
||||
log.debug("Updating cloud state from ZooKeeper... ");
|
||||
clusterState = constructState(liveNodesSet, null);
|
||||
} else {
|
||||
log.debug("Updating live nodes from ZooKeeper... ({})", liveNodesSet.size());
|
||||
clusterState = this.clusterState;
|
||||
clusterState.setLiveNodes(liveNodesSet);
|
||||
}
|
||||
this.clusterState = clusterState;
|
||||
}
|
||||
} else {
|
||||
if (clusterStateUpdateScheduled) {
|
||||
log.debug("Cloud state update for ZooKeeper already scheduled");
|
||||
return;
|
||||
}
|
||||
log.debug("Scheduling cloud state update from ZooKeeper...");
|
||||
clusterStateUpdateScheduled = true;
|
||||
updateCloudExecutor.schedule(new Runnable() {
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
log.debug("Updating cluster state from ZooKeeper...");
|
||||
synchronized (getUpdateLock()) {
|
||||
clusterStateUpdateScheduled = false;
|
||||
ClusterState clusterState;
|
||||
try {
|
||||
List<String> liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE,
|
||||
null, true);
|
||||
Set<String> liveNodesSet = new HashSet<>(liveNodes);
|
||||
|
||||
if (!onlyLiveNodes) {
|
||||
log.debug("Updating cloud state from ZooKeeper... ");
|
||||
synchronized (getUpdateLock()) {
|
||||
List<String> liveNodes = zkClient.getChildren(LIVE_NODES_ZKNODE, null, true);
|
||||
Set<String> liveNodesSet = new HashSet<>(liveNodes);
|
||||
|
||||
clusterState = constructState(liveNodesSet,null);
|
||||
} else {
|
||||
log.debug("Updating live nodes from ZooKeeper... ");
|
||||
clusterState = ZkStateReader.this.clusterState;
|
||||
clusterState.setLiveNodes(liveNodesSet);
|
||||
}
|
||||
|
||||
ZkStateReader.this.clusterState = clusterState;
|
||||
|
||||
} catch (KeeperException e) {
|
||||
if (e.code() == KeeperException.Code.SESSIONEXPIRED
|
||||
|| e.code() == KeeperException.Code.CONNECTIONLOSS) {
|
||||
log.warn("ZooKeeper watch triggered, but Solr cannot talk to ZK");
|
||||
return;
|
||||
}
|
||||
log.error("", e);
|
||||
throw new ZooKeeperException(
|
||||
SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
} catch (InterruptedException e) {
|
||||
// Restore the interrupted status
|
||||
Thread.currentThread().interrupt();
|
||||
log.error("", e);
|
||||
throw new ZooKeeperException(
|
||||
SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
}
|
||||
// update volatile
|
||||
ZkStateReader.this.clusterState = clusterState;
|
||||
|
||||
synchronized (ZkStateReader.this) {
|
||||
for (String watchedCollection : watchedCollections) {
|
||||
DocCollection live = getCollectionLive(ZkStateReader.this, watchedCollection);
|
||||
assert live != null;
|
||||
if (live != null) {
|
||||
updateWatchedCollection(live);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}, SOLRCLOUD_UPDATE_DELAY, TimeUnit.MILLISECONDS);
|
||||
if (!onlyLiveNodes) {
|
||||
log.debug("Updating cloud state from ZooKeeper... ");
|
||||
clusterState = constructState(liveNodesSet, null);
|
||||
} else {
|
||||
log.debug("Updating live nodes from ZooKeeper... ({})", liveNodesSet.size());
|
||||
clusterState = this.clusterState;
|
||||
clusterState.setLiveNodes(liveNodesSet);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -947,7 +873,7 @@ public class ZkStateReader implements Closeable {
|
|||
watchedCollections.remove(coll);
|
||||
watchedCollectionStates.remove(coll);
|
||||
try {
|
||||
updateClusterState(true);
|
||||
updateClusterState();
|
||||
} catch (KeeperException e) {
|
||||
log.error("Error updating state",e);
|
||||
} catch (InterruptedException e) {
|
||||
|
|
|
@ -143,7 +143,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
while (cont) {
|
||||
if (verbose) System.out.println("-");
|
||||
boolean sawLiveRecovering = false;
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
Map<String,Slice> slices = clusterState.getSlicesMap(collection);
|
||||
assertNotNull("Could not find collection:" + collection, slices);
|
||||
|
@ -193,7 +193,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
|
||||
while (cont) {
|
||||
if (verbose) System.out.println("-");
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
if (!clusterState.hasCollection(collection)) break;
|
||||
if (cnt == timeoutSeconds) {
|
||||
|
@ -218,7 +218,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
protected void assertAllActive(String collection,ZkStateReader zkStateReader)
|
||||
throws KeeperException, InterruptedException {
|
||||
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
Map<String,Slice> slices = clusterState.getSlicesMap(collection);
|
||||
if (slices == null) {
|
||||
|
|
|
@ -604,7 +604,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
|
|||
|
||||
protected void updateMappingsFromZk(List<JettySolrRunner> jettys, List<SolrClient> clients, boolean allowOverSharding) throws Exception {
|
||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
cloudJettys.clear();
|
||||
shardToJetty.clear();
|
||||
|
||||
|
@ -1792,7 +1792,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
|
|||
Map<String,Replica> notLeaders = new HashMap<>();
|
||||
|
||||
ZkStateReader zkr = cloudClient.getZkStateReader();
|
||||
zkr.updateClusterState(true); // force the state to be fresh
|
||||
zkr.updateClusterState(); // force the state to be fresh
|
||||
|
||||
ClusterState cs = zkr.getClusterState();
|
||||
Collection<Slice> slices = cs.getActiveSlices(testCollectionName);
|
||||
|
@ -1804,7 +1804,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
|
|||
while (waitMs < maxWaitMs && !allReplicasUp) {
|
||||
// refresh state every 2 secs
|
||||
if (waitMs % 2000 == 0)
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
|
||||
cs = cloudClient.getZkStateReader().getClusterState();
|
||||
assertNotNull(cs);
|
||||
|
@ -1858,7 +1858,7 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
|
|||
}
|
||||
|
||||
protected String printClusterStateInfo(String collection) throws Exception {
|
||||
cloudClient.getZkStateReader().updateClusterState(true);
|
||||
cloudClient.getZkStateReader().updateClusterState();
|
||||
String cs = null;
|
||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
||||
if (collection != null) {
|
||||
|
|
|
@ -403,7 +403,7 @@ public class ChaosMonkey {
|
|||
for (CloudJettyRunner cloudJetty : shardToJetty.get(slice)) {
|
||||
|
||||
// get latest cloud state
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
|
||||
Slice theShards = zkStateReader.getClusterState().getSlicesMap(collection)
|
||||
.get(slice);
|
||||
|
@ -427,7 +427,7 @@ public class ChaosMonkey {
|
|||
|
||||
public SolrClient getRandomClient(String slice) throws KeeperException, InterruptedException {
|
||||
// get latest cloud state
|
||||
zkStateReader.updateClusterState(true);
|
||||
zkStateReader.updateClusterState();
|
||||
|
||||
// get random shard
|
||||
List<SolrClient> clients = shardToClient.get(slice);
|
||||
|
@ -604,4 +604,4 @@ public class ChaosMonkey {
|
|||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue