SOLR-8256: Set legacyCloud=false as default

This commit is contained in:
Cao Manh Dat 2017-06-21 22:25:39 +07:00
parent eff583ee88
commit 8e9d685a40
23 changed files with 588 additions and 1036 deletions

View File

@ -21,6 +21,14 @@ See the Quick Start guide at http://lucene.apache.org/solr/quickstart.html
Upgrading from Solr 6.x
----------------------
* the cluster property 'legacyCloud' is set to false from 7.0. This means 'zookeeper is the truth' by
default. If an entry for a replica does not exist in the state.json, that replica cannot get
registered. This may affect users who use that feature where they bring up replicas and they are
automatically registered as a part of a shard. However, it is possible to fall back to the old behavior by
setting the property legacyCloud=true , in the cluster properties using the following command
./server/scripts/cloud-scripts/zkcli.sh -zkhost 127.0.0.1:2181 -cmd clusterprop -name legacyCloud -val true
* HttpClientInterceptorPlugin is now HttpClientBuilderPlugin and must work with a
SolrHttpClientBuilder rather than an HttpClientConfigurer.
@ -290,6 +298,8 @@ Other Changes
* SOLR-4646: eDismax lowercaseOperators now defaults to "false" for luceneMatchVersion >= 7.0.0 (janhoy, David Smiley)
* SOLR-8256: Set legacyCloud=false as default (Cao Manh Dat)
================== 6.7.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

View File

@ -18,10 +18,10 @@ package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.solr.cloud.OverseerCollectionMessageHandler.Cmd;
@ -32,16 +32,13 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.component.ShardHandler;
import org.apache.solr.util.TimeOut;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.cloud.Assign.getNodesForNewReplicas;
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.COLL_CONF;
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
@ -49,7 +46,6 @@ import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
import static org.apache.solr.common.params.CommonParams.NAME;
public class CreateShardCmd implements Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@ -67,11 +63,8 @@ public class CreateShardCmd implements Cmd {
log.info("Create shard invoked: {}", message);
if (collectionName == null || sliceName == null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'collection' and 'shard' are required parameters");
int numSlices = 1;
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
DocCollection collection = clusterState.getCollection(collectionName);
// int repFactor = message.getInt(REPLICATION_FACTOR, collection.getInt(REPLICATION_FACTOR, 1));
int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, collection.getInt(NRT_REPLICAS, collection.getInt(REPLICATION_FACTOR, 1))));
int numPullReplicas = message.getInt(PULL_REPLICAS, collection.getInt(PULL_REPLICAS, 0));
int numTlogReplicas = message.getInt(TLOG_REPLICAS, collection.getInt(TLOG_REPLICAS, 0));
@ -88,26 +81,12 @@ public class CreateShardCmd implements Cmd {
ZkStateReader zkStateReader = ocmh.zkStateReader;
Overseer.getStateUpdateQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
// wait for a while until we see the shard
TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS);
boolean created = false;
while (!timeout.hasTimedOut()) {
Thread.sleep(100);
created = zkStateReader.getClusterState().getCollection(collectionName).getSlice(sliceName) != null;
if (created) break;
}
if (!created)
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not fully create shard: " + message.getStr(NAME));
String configName = message.getStr(COLL_CONF);
ocmh.waitForNewShard(collectionName, sliceName);
String async = message.getStr(ASYNC);
Map<String, String> requestMap = null;
if (async != null) {
requestMap = new HashMap<>(totalReplicas, 1.0f);
}
int createdNrtReplicas = 0, createdTlogReplicas = 0, createdPullReplicas = 0;
CountDownLatch countDownLatch = new CountDownLatch(totalReplicas);
for (int j = 1; j <= totalReplicas; j++) {
int coreNameNumber;
Replica.Type typeToCreate;
@ -131,20 +110,41 @@ public class CreateShardCmd implements Cmd {
+ " on " + nodeName);
// Need to create new params for each request
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.CREATE.toString());
params.set(CoreAdminParams.NAME, coreName);
params.set(CoreAdminParams.REPLICA_TYPE, typeToCreate.name());
params.set(COLL_CONF, configName);
params.set(CoreAdminParams.COLLECTION, collectionName);
params.set(CoreAdminParams.SHARD, sliceName);
params.set(ZkStateReader.NUM_SHARDS_PROP, numSlices);
ocmh.addPropertyParams(message, params);
ocmh.sendShardRequest(nodeName, params, shardHandler, async, requestMap);
ZkNodeProps addReplicasProps = new ZkNodeProps(
COLLECTION_PROP, collectionName,
SHARD_ID_PROP, sliceName,
CoreAdminParams.REPLICA_TYPE, typeToCreate.name(),
CoreAdminParams.NODE, nodeName,
CoreAdminParams.NAME, coreName);
Map<String, Object> propertyParams = new HashMap<>();
ocmh.addPropertyParams(message, propertyParams);;
addReplicasProps = addReplicasProps.plus(propertyParams);
if(async!=null) addReplicasProps.getProperties().put(ASYNC, async);
final NamedList addResult = new NamedList();
ocmh.addReplica(zkStateReader.getClusterState(), addReplicasProps, addResult, ()-> {
countDownLatch.countDown();
Object addResultFailure = addResult.get("failure");
if (addResultFailure != null) {
SimpleOrderedMap failure = (SimpleOrderedMap) results.get("failure");
if (failure == null) {
failure = new SimpleOrderedMap();
results.add("failure", failure);
}
failure.addAll((NamedList) addResultFailure);
} else {
SimpleOrderedMap success = (SimpleOrderedMap) results.get("success");
if (success == null) {
success = new SimpleOrderedMap();
results.add("success", success);
}
success.addAll((NamedList) addResult.get("success"));
}
});
}
ocmh.processResponses(results, shardHandler, true, "Failed to create shard", async, requestMap, Collections.emptySet());
log.debug("Waiting for create shard action to complete");
countDownLatch.await(5, TimeUnit.MINUTES);
log.debug("Finished waiting for create shard action to complete");
log.info("Finished create command on all shards for collection: " + collectionName);

View File

@ -761,8 +761,8 @@ public class Overseer implements Closeable {
}
}
public static boolean isLegacy(ZkStateReader stateReader) {
String legacyProperty = stateReader.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true");
return !"false".equals(legacyProperty);
String legacyProperty = stateReader.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "false");
return "true".equals(legacyProperty);
}
public ZkStateReader getZkStateReader() {

View File

@ -37,6 +37,7 @@ import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.common.SolrInputDocument;
@ -46,6 +47,7 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.CommonParams;
@ -59,6 +61,7 @@ import org.apache.solr.core.SolrCore;
import org.apache.solr.handler.CdcrParams;
import org.apache.solr.util.TimeOut;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
@ -69,6 +72,7 @@ import org.slf4j.LoggerFactory;
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.CREATE_NODE_SET;
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_PROPS;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.handler.admin.CoreAdminHandler.COMPLETED;
@ -142,6 +146,11 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
zkStateReader.getZkClient().create(ZkStateReader.CLUSTER_PROPS,
Utils.toJSON(Collections.singletonMap("urlScheme", "https")),
CreateMode.PERSISTENT, true);
} catch (KeeperException.NodeExistsException e) {
ZkNodeProps props = ZkNodeProps.load(zkStateReader.getZkClient().getData(ZkStateReader.CLUSTER_PROPS,
null, null, true));
props = props.plus("urlScheme", "https");
zkStateReader.getZkClient().setData(CLUSTER_PROPS, Utils.toJSON(props), true);
} finally {
zkStateReader.close();
}
@ -567,7 +576,7 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
*/
protected List<String> startServers(int nServer) throws Exception {
String temporaryCollection = "tmp_collection";
System.setProperty("collection", temporaryCollection);
for (int i = 1; i <= nServer; i++) {
// give everyone there own solrhome
File jettyDir = createTempDir("jetty").toFile();
@ -577,6 +586,19 @@ public class BaseCdcrDistributedZkTest extends AbstractDistribZkTestBase {
jettys.add(jetty);
}
try (SolrClient client = createCloudClient(temporaryCollection)) {
assertEquals(0, CollectionAdminRequest
.createCollection(temporaryCollection, shardCount, 1)
.setCreateNodeSet("")
.process(client).getStatus());
for (int i = 0; i < jettys.size(); i++) {
assertTrue(CollectionAdminRequest
.addReplicaToShard(temporaryCollection, "shard"+((i % shardCount) + 1))
.setNode(jettys.get(i).getNodeName())
.process(client).isSuccess());
}
}
ZkStateReader zkStateReader = jettys.get(0).getCoreContainer().getZkController().getZkStateReader();
// now wait till we see the leader for each shard

View File

@ -31,7 +31,7 @@ import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
@ -153,21 +153,15 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
}
private void testNodeWithoutCollectionForwarding() throws Exception {
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
try (HttpSolrClient client = getHttpSolrClient(baseUrl)) {
client.setConnectionTimeout(30000);
Create createCmd = new Create();
createCmd.setRoles("none");
createCmd.setCoreName(ONE_NODE_COLLECTION + "core");
createCmd.setCollection(ONE_NODE_COLLECTION);
createCmd.setNumShards(1);
createCmd.setDataDir(getDataDir(createTempDir(ONE_NODE_COLLECTION).toFile().getAbsolutePath()));
client.request(createCmd);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
assertEquals(0, CollectionAdminRequest
.createCollection(ONE_NODE_COLLECTION, 1, 1)
.setCreateNodeSet("")
.process(cloudClient).getStatus());
assertTrue(CollectionAdminRequest
.addReplicaToShard(ONE_NODE_COLLECTION, "shard1")
.setCoreName(ONE_NODE_COLLECTION + "core")
.process(cloudClient).isSuccess());
waitForCollection(cloudClient.getZkStateReader(), ONE_NODE_COLLECTION, 1);
waitForRecoveriesToFinish(ONE_NODE_COLLECTION, cloudClient.getZkStateReader(), false);

View File

@ -25,8 +25,10 @@ import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
@ -570,19 +572,27 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
protected void createCores(final HttpSolrClient client,
ThreadPoolExecutor executor, final String collection, final int numShards, int cnt) {
try {
assertEquals(0, CollectionAdminRequest.createCollection(collection, numShards, 1)
.setCreateNodeSet("")
.process(client).getStatus());
} catch (SolrServerException | IOException e) {
throw new RuntimeException(e);
}
String nodeName = null;
for (JettySolrRunner jetty : jettys) {
if (client.getBaseURL().startsWith(jetty.getBaseUrl().toString())) {
nodeName = jetty.getNodeName();
}
}
for (int i = 0; i < cnt; i++) {
final int freezeI = i;
final String freezeNodename = nodeName;
executor.execute(() -> {
Create createCmd = new Create();
createCmd.setCoreName(collection + freezeI);
createCmd.setCollection(collection);
createCmd.setNumShards(numShards);
try {
String core3dataDir = createTempDir(collection).toFile().getAbsolutePath();
createCmd.setDataDir(getDataDir(core3dataDir));
client.request(createCmd);
assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard"+((freezeI%numShards)+1))
.setCoreName(collection + freezeI)
.setNode(freezeNodename).process(client).isSuccess());
} catch (SolrServerException | IOException e) {
throw new RuntimeException(e);
}
@ -780,22 +790,20 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
System.clearProperty("numShards");
assertEquals(0, CollectionAdminRequest.createCollection(oneInstanceCollection2, 2, 2)
.setCreateNodeSet("")
.setMaxShardsPerNode(4)
.process(cloudClient).getStatus());
List<SolrClient> collectionClients = new ArrayList<>();
SolrClient client = clients.get(0);
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
0,
((HttpSolrClient) client).getBaseURL().length()
- DEFAULT_COLLECTION.length() - 1);
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 1, "slice1");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 2, "slice2");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 3, "slice2");
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 4, "slice1");
while (pending != null && pending.size() > 0) {
Future<Object> future = completionService.take();
pending.remove(future);
for (int i = 0; i < 4; i++) {
CollectionAdminResponse resp = CollectionAdminRequest
.addReplicaToShard(oneInstanceCollection2, "shard" + ((i%2)+1))
.setNode(jettys.get(0).getNodeName())
.process(cloudClient);
for (String coreName : resp.getCollectionCoresStatus().keySet()) {
collectionClients.add(createNewSolrClient(coreName, jettys.get(0).getBaseUrl().toString()));
}
}
SolrClient client1 = collectionClients.get(0);
@ -846,14 +854,11 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
zkStateReader.forceUpdateCollection(oneInstanceCollection2);
Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(oneInstanceCollection2);
assertNotNull(slices);
String roles = slices.get("slice1").getReplicasMap().values().iterator().next().getStr(ZkStateReader.ROLES_PROP);
assertEquals("none", roles);
ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "slice1"));
ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "shard1"));
// now test that unloading a core gets us a new leader
try (HttpSolrClient unloadClient = getHttpSolrClient(baseUrl)) {
try (HttpSolrClient unloadClient = getHttpSolrClient(jettys.get(0).getBaseUrl().toString())) {
unloadClient.setConnectionTimeout(15000);
unloadClient.setSoTimeout(60000);
Unload unloadCmd = new Unload(true);
@ -864,7 +869,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
unloadClient.request(unloadCmd);
int tries = 50;
while (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "slice1", 10000))) {
while (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "shard1", 10000))) {
Thread.sleep(100);
if (tries-- == 0) {
fail("Leader never changed");
@ -911,22 +916,14 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
private void testANewCollectionInOneInstance() throws Exception {
log.info("### STARTING testANewCollectionInOneInstance");
CollectionAdminResponse response = CollectionAdminRequest.createCollection(oneInstanceCollection, 2, 2)
.setCreateNodeSet(jettys.get(0).getNodeName())
.setMaxShardsPerNode(4)
.process(cloudClient);
assertEquals(0, response.getStatus());
List<SolrClient> collectionClients = new ArrayList<>();
SolrClient client = clients.get(0);
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
0,
((HttpSolrClient) client).getBaseURL().length()
- DEFAULT_COLLECTION.length() - 1);
createCollection(oneInstanceCollection, collectionClients, baseUrl, 1);
createCollection(oneInstanceCollection, collectionClients, baseUrl, 2);
createCollection(oneInstanceCollection, collectionClients, baseUrl, 3);
createCollection(oneInstanceCollection, collectionClients, baseUrl, 4);
while (pending != null && pending.size() > 0) {
Future<Object> future = completionService.take();
if (future == null) return;
pending.remove(future);
for (String coreName : response.getCollectionCoresStatus().keySet()) {
collectionClients.add(createNewSolrClient(coreName, jettys.get(0).getBaseUrl().toString()));
}
SolrClient client1 = collectionClients.get(0);
@ -1083,26 +1080,28 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
}
private void createNewCollection(final String collection) throws InterruptedException {
try {
assertEquals(0, CollectionAdminRequest
.createCollection(collection, 2, 1)
.setCreateNodeSet("")
.process(cloudClient).getStatus());
} catch (Exception e) {
e.printStackTrace();
//fails
}
final List<SolrClient> collectionClients = new ArrayList<>();
otherCollectionClients.put(collection, collectionClients);
int unique = 0;
for (final SolrClient client : clients) {
int unique = 0 ;
for (final JettySolrRunner runner : jettys) {
unique++;
final String baseUrl = ((HttpSolrClient) client).getBaseURL()
.substring(
0,
((HttpSolrClient) client).getBaseURL().length()
- DEFAULT_COLLECTION.length() -1);
final int frozeUnique = unique;
Callable call = () -> {
try (HttpSolrClient client1 = getHttpSolrClient(baseUrl)) {
client1.setConnectionTimeout(15000);
client1.setSoTimeout(60000);
Create createCmd = new Create();
createCmd.setCoreName(collection);
createCmd.setDataDir(getDataDir(createTempDir(collection).toFile().getAbsolutePath()));
client1.request(createCmd);
try {
assertTrue(CollectionAdminRequest
.addReplicaToShard(collection, "shard"+ ((frozeUnique%2)+1))
.setNode(runner.getNodeName())
.process(cloudClient).isSuccess());
} catch (Exception e) {
e.printStackTrace();
//fails
@ -1110,7 +1109,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
return null;
};
collectionClients.add(createNewSolrClient(collection, baseUrl));
collectionClients.add(createNewSolrClient(collection, runner.getBaseUrl().toString()));
pending.add(completionService.submit(call));
while (pending != null && pending.size() > 0) {

View File

@ -16,25 +16,17 @@
*/
package org.apache.solr.cloud;
import java.io.File;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.CoreContainer;
import org.apache.zookeeper.CreateMode;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -42,37 +34,21 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Slow
public class ClusterStateUpdateTest extends SolrTestCaseJ4 {
public class ClusterStateUpdateTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
protected ZkTestServer zkServer;
@Override
public void setUp() throws Exception {
super.setUp();
configureCluster(3)
.addConfig("conf", configset("cloud-minimal"))
.configure();
protected String zkDir;
private CoreContainer container1;
private CoreContainer container2;
private CoreContainer container3;
private File dataDir1;
private File dataDir2;
private File dataDir3;
private File dataDir4;
private static volatile File solrHomeDirectory;
}
@BeforeClass
public static void beforeClass() throws IOException {
solrHomeDirectory = createTempDir().toFile();
public static void beforeClass() {
System.setProperty("solrcloud.skip.autorecovery", "true");
System.setProperty("genericCoreNodeNames", "false");
copyMinFullSetup(solrHomeDirectory);
}
@AfterClass
@ -80,82 +56,16 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4 {
System.clearProperty("solrcloud.skip.autorecovery");
System.clearProperty("genericCoreNodeNames");
}
@Override
public void setUp() throws Exception {
super.setUp();
System.setProperty("zkClientTimeout", "3000");
File tmpDir = createTempDir("zkData").toFile();
zkDir = tmpDir.getAbsolutePath();
zkServer = new ZkTestServer(zkDir);
zkServer.run();
System.setProperty("zkHost", zkServer.getZkAddress());
AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(), zkServer
.getZkAddress(), "solrconfig.xml", "schema.xml");
log.info("####SETUP_START " + getTestName());
dataDir1 = new File(tmpDir + File.separator + "data1");
dataDir1.mkdirs();
dataDir2 = new File(tmpDir + File.separator + "data2");
dataDir2.mkdirs();
dataDir3 = new File(tmpDir + File.separator + "data3");
dataDir3.mkdirs();
dataDir4 = new File(tmpDir + File.separator + "data4");
dataDir4.mkdirs();
// set some system properties for use by tests
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
System.setProperty("solr.solr.home", TEST_HOME());
System.setProperty("hostPort", "1661");
System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir1.getAbsolutePath());
container1 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
container1.load();
System.clearProperty("hostPort");
System.setProperty("hostPort", "1662");
System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir2.getAbsolutePath());
container2 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
container2.load();
System.clearProperty("hostPort");
System.setProperty("hostPort", "1663");
System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir3.getAbsolutePath());
container3 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
container3.load();
System.clearProperty("hostPort");
System.clearProperty("solr.solr.home");
log.info("####SETUP_END " + getTestName());
}
@Test
public void testCoreRegistration() throws Exception {
System.setProperty("solrcloud.update.delay", "1");
Map<String,Object> props2 = new HashMap<>();
props2.put("configName", "conf1");
ZkNodeProps zkProps2 = new ZkNodeProps(props2);
SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(),
AbstractZkTestCase.TIMEOUT);
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/testcore",
Utils.toJSON(zkProps2), CreateMode.PERSISTENT, true);
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/testcore/shards",
CreateMode.PERSISTENT, true);
zkClient.close();
container1.create("testcore", ImmutableMap.of("dataDir", dataDir4.getAbsolutePath()));
ZkController zkController2 = container2.getZkController();
assertEquals(0, CollectionAdminRequest.createCollection("testcore", 1,1)
.setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName())
.process(cluster.getSolrClient()).getStatus());
ZkController zkController2 = cluster.getJettySolrRunner(1).getCoreContainer().getZkController();
String host = zkController2.getHostName();
@ -184,19 +94,22 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4 {
assertEquals(1, shards.size());
Replica zkProps = shards.get(host + ":1661_solr_testcore");
// assert this is core of container1
Replica zkProps = shards.get("core_node1");
assertNotNull(zkProps);
assertEquals(host + ":1661_solr", zkProps.getStr(ZkStateReader.NODE_NAME_PROP));
assertEquals(host + ":" +cluster.getJettySolrRunner(0).getLocalPort()+"_solr", zkProps.getStr(ZkStateReader.NODE_NAME_PROP));
assertEquals("http://" + host + ":1661/solr", zkProps.getStr(ZkStateReader.BASE_URL_PROP));
assertEquals("http://" + host + ":"+cluster.getJettySolrRunner(0).getLocalPort()+"/solr", zkProps.getStr(ZkStateReader.BASE_URL_PROP));
// assert there are 3 live nodes
Set<String> liveNodes = clusterState2.getLiveNodes();
assertNotNull(liveNodes);
assertEquals(3, liveNodes.size());
container3.shutdown();
// shut down node 2
cluster.stopJettySolrRunner(2);
// slight pause (15s timeout) for watch to trigger
for(int i = 0; i < (5 * 15); i++) {
@ -208,52 +121,21 @@ public class ClusterStateUpdateTest extends SolrTestCaseJ4 {
assertEquals(2, zkController2.getClusterState().getLiveNodes().size());
// quickly kill / start client
container2.getZkController().getZkClient().getSolrZooKeeper().getConnection()
.disconnect();
container2.shutdown();
System.setProperty("hostPort", "1662");
System.setProperty("solr.data.dir", ClusterStateUpdateTest.this.dataDir2.getAbsolutePath());
container2 = new CoreContainer(solrHomeDirectory.getAbsolutePath());
container2.load();
System.clearProperty("hostPort");
cluster.getJettySolrRunner(1).stop();
cluster.getJettySolrRunner(1).start();
// pause for watch to trigger
for(int i = 0; i < 200; i++) {
if (container1.getZkController().getClusterState().liveNodesContain(
container2.getZkController().getNodeName())) {
if (cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getClusterState().liveNodesContain(
cluster.getJettySolrRunner(1).getCoreContainer().getZkController().getNodeName())) {
break;
}
Thread.sleep(100);
}
assertTrue(container1.getZkController().getClusterState().liveNodesContain(
container2.getZkController().getNodeName()));
assertTrue(cluster.getJettySolrRunner(0).getCoreContainer().getZkController().getClusterState().liveNodesContain(
cluster.getJettySolrRunner(1).getCoreContainer().getZkController().getNodeName()));
// core.close(); // don't close - this core is managed by container1 now
}
@Override
public void tearDown() throws Exception {
container1.shutdown();
container2.shutdown();
container3.shutdown();
zkServer.shutdown();
super.tearDown();
System.clearProperty("zkClientTimeout");
System.clearProperty("zkHost");
System.clearProperty("hostPort");
System.clearProperty("solrcloud.update.delay");
System.clearProperty("solr.data.dir");
}
static void printLayout(String zkHost) throws Exception {
SolrZkClient zkClient = new SolrZkClient(
zkHost, AbstractZkTestCase.TIMEOUT);
zkClient.printLayoutToStdOut();
zkClient.close();
}
}

View File

@ -41,16 +41,13 @@ import com.google.common.collect.ImmutableList;
import org.apache.commons.io.IOUtils;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
import org.apache.solr.client.solrj.request.CoreStatus;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
@ -157,16 +154,15 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
final String collectionName = "halfdeletedcollection";
// create a core that simulates something left over from a partially-deleted collection
Create createCmd = new Create();
createCmd.setCoreName("halfdeletedcollection_shard1_replica1");
createCmd.setCollection(collectionName);
createCmd.setCollectionConfigName("conf");
assertEquals(0, CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
.setCreateNodeSet("")
.process(cluster.getSolrClient()).getStatus());
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(2);
createCmd.process(cluster.getSolrClient());
// create a core that simulates something left over from a partially-deleted collection
assertTrue(CollectionAdminRequest
.addReplicaToShard(collectionName, "shard1")
.setDataDir(dataDir)
.process(cluster.getSolrClient()).isSuccess());
CollectionAdminRequest.deleteCollection(collectionName)
.process(cluster.getSolrClient());
@ -282,32 +278,21 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
@Test
public void testCreateShouldFailOnExistingCore() throws Exception {
// first we make a core with the core name the collections api
// will try and use - this will cause our mock fail
Create createCmd = new Create();
createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1));
createCmd.setCollection("halfcollectionblocker");
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
createCmd.setCollectionConfigName("conf");
assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker", "conf", 1, 1)
.setCreateNodeSet("")
.process(cluster.getSolrClient()).getStatus());
assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker", "shard1")
.setNode(cluster.getJettySolrRunner(0).getNodeName())
.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1))
.process(cluster.getSolrClient()).isSuccess());
try (SolrClient client = cluster.getJettySolrRunner(0).newClient()) {
client.request(createCmd);
}
createCmd = new Create();
createCmd.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1));
createCmd.setCollection("halfcollectionblocker2");
dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
createCmd.setCollectionConfigName("conf");
try (SolrClient client = cluster.getJettySolrRunner(1).newClient()) {
client.request(createCmd);
}
assertEquals(0, CollectionAdminRequest.createCollection("halfcollectionblocker2", "conf",1, 1)
.setCreateNodeSet("")
.process(cluster.getSolrClient()).getStatus());
assertTrue(CollectionAdminRequest.addReplicaToShard("halfcollectionblocker2", "shard1")
.setNode(cluster.getJettySolrRunner(1).getNodeName())
.setCoreName(Assign.buildCoreName("halfcollection", "shard1", Replica.Type.NRT, 1))
.process(cluster.getSolrClient()).isSuccess());
String nn1 = cluster.getJettySolrRunner(0).getNodeName();
String nn2 = cluster.getJettySolrRunner(1).getNodeName();
@ -327,74 +312,18 @@ public class CollectionsAPIDistributedZkTest extends SolrCloudTestCase {
assertTrue(val1.contains("SolrException") || val2.contains("SolrException"));
}
@Test
public void testNoCollectionSpecified() throws Exception {
// TODO - should we remove this behaviour?
assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
assertFalse(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
// try and create a SolrCore with no collection name
Create createCmd = new Create();
createCmd.setCoreName("corewithnocollection");
createCmd.setCollection("");
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
createCmd.setCollectionConfigName("conf");
cluster.getSolrClient().request(createCmd);
// try and create a SolrCore with no collection name
createCmd.setCollection(null);
createCmd.setCoreName("corewithnocollection2");
cluster.getSolrClient().request(createCmd);
// in both cases, the collection should have default to the core name
cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection");
cluster.getSolrClient().getZkStateReader().forceUpdateCollection("corewithnocollection2");
assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection"));
assertTrue(cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection("corewithnocollection2"));
}
@Test
public void testNoConfigSetExist() throws Exception {
final CloudSolrClient cloudClient = cluster.getSolrClient();
assertFalse(cloudClient.getZkStateReader().getClusterState().hasCollection("corewithnocollection3"));
// try and create a SolrCore with no collection name
Create createCmd = new Create();
createCmd.setCoreName("corewithnocollection3");
createCmd.setCollection("");
String dataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(dataDir);
createCmd.setNumShards(1);
createCmd.setCollectionConfigName("conf123");
expectThrows(Exception.class, () -> {
cluster.getSolrClient().request(createCmd);
CollectionAdminRequest.createCollection("noconfig", "conf123", 1, 1)
.process(cluster.getSolrClient());
});
TimeUnit.MILLISECONDS.sleep(1000);
// in both cases, the collection should have default to the core name
cloudClient.getZkStateReader().forceUpdateCollection("corewithnocollection3");
Collection<Slice> slices = cloudClient.getZkStateReader().getClusterState().getActiveSlices("corewithnocollection3");
int replicaCount = 0;
if (slices != null) {
for (Slice slice : slices) {
replicaCount += slice.getReplicas().size();
}
}
assertEquals("replicaCount", 0, replicaCount);
// TODO - WTF? shouldn't this *not* contain the collection?
assertTrue(CollectionAdminRequest.listCollections(cloudClient).contains("corewithnocollection3"));
cluster.getSolrClient().getZkStateReader().forceUpdateCollection("noconfig");
assertFalse(CollectionAdminRequest.listCollections(cluster.getSolrClient()).contains("noconfig"));
}
@Test

View File

@ -269,20 +269,22 @@ public class CollectionsAPISolrJTest extends SolrCloudTestCase {
// sanity check our expected default
final ClusterProperties props = new ClusterProperties(zkClient());
assertEquals("Expecting prop to default to unset, test needs upated",
props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), null);
assertEquals("Expecting legacyCloud to false as default",
props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "false");
CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "true")
.process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "false");
assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "true");
// Unset ClusterProp that we set.
CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, null).process(cluster.getSolrClient());
assertEquals("Cluster property was not unset", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), null);
response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
.process(cluster.getSolrClient());
assertEquals(0, response.getStatus());
assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, null), "false");
}
@Test

View File

@ -16,54 +16,21 @@
*/
package org.apache.solr.cloud;
import javax.xml.parsers.ParserConfigurationException;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.SolrResourceLoader;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xml.sax.SAXException;
@Slow
public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private final static int NUM_SHARD_REPLICAS = 5;
private static final Pattern HOST = Pattern
.compile(".*?\\:(\\d\\d\\d\\d)_.*");
protected ZkTestServer zkServer;
protected String zkDir;
private Map<Integer,CoreContainer> containerMap = new HashMap<>();
private Map<String,Set<Integer>> shardPorts = new HashMap<>();
private SolrZkClient zkClient;
private ZkStateReader reader;
public class LeaderElectionIntegrationTest extends SolrCloudTestCase {
private final static int NUM_REPLICAS_OF_SHARD1 = 5;
@BeforeClass
public static void beforeClass() {
@ -73,171 +40,112 @@ public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
@Override
public void setUp() throws Exception {
super.setUp();
ignoreException("No UpdateLog found - cannot sync");
ignoreException("No UpdateLog found - cannot recover");
System.setProperty("zkClientTimeout", "8000");
zkDir = createTempDir("zkData").toFile().getAbsolutePath();
zkServer = new ZkTestServer(zkDir);
zkServer.run();
System.setProperty("zkHost", zkServer.getZkAddress());
AbstractZkTestCase.buildZooKeeper(zkServer.getZkHost(),
zkServer.getZkAddress(), "solrconfig.xml", "schema.xml");
log.info("####SETUP_START " + getTestName());
// set some system properties for use by tests
System.setProperty("solr.test.sys.prop1", "propone");
System.setProperty("solr.test.sys.prop2", "proptwo");
for (int i = 7000; i < 7000 + NUM_SHARD_REPLICAS; i++) {
try {
setupContainer(i, "shard1");
} catch (Throwable t) {
log.error("!!!Could not start container:" + i + " The exception thrown was: " + t.getClass() + " " + t.getMessage());
fail("Could not start container:" + i + ". Reason:" + t.getClass() + " " + t.getMessage());
}
}
try {
setupContainer(3333, "shard2");
} catch (Throwable t) {
log.error("!!!Could not start container 3333. The exception thrown was: " + t.getClass() + " " + t.getMessage());
fail("Could not start container: 3333");
}
zkClient = new SolrZkClient(zkServer.getZkAddress(),
AbstractZkTestCase.TIMEOUT);
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
boolean initSuccessful = false;
for (int i = 0; i < 30; i++) {
List<String> liveNodes = zkClient.getChildren("/live_nodes", null, true);
if (liveNodes.size() == NUM_SHARD_REPLICAS + 1) {
// all nodes up
initSuccessful = true;
break;
}
Thread.sleep(1000);
log.info("Waiting for more nodes to come up, now: " + liveNodes.size()
+ "/" + (NUM_SHARD_REPLICAS + 1));
}
if (!initSuccessful) {
fail("Init was not successful!");
}
log.info("####SETUP_END " + getTestName());
configureCluster(6)
.addConfig("conf", configset("cloud-minimal"))
.configure();
}
private void setupContainer(int port, String shard) throws IOException,
ParserConfigurationException, SAXException {
Path data = createTempDir();
System.setProperty("hostPort", Integer.toString(port));
System.setProperty("shard", shard);
System.setProperty("solr.data.dir", data.toString());
System.setProperty("solr.solr.home", TEST_HOME());
Set<Integer> ports = shardPorts.get(shard);
if (ports == null) {
ports = new HashSet<>();
shardPorts.put(shard, ports);
}
ports.add(port);
SolrResourceLoader loader = new SolrResourceLoader(createTempDir());
Files.copy(TEST_PATH().resolve("solr.xml"), loader.getInstancePath().resolve("solr.xml"));
CoreContainer container = new CoreContainer(loader);
container.load();
container.create("collection1_" + shard, ImmutableMap.of("collection", "collection1"));
containerMap.put(port, container);
System.clearProperty("solr.solr.home");
System.clearProperty("hostPort");
private void createCollection(String collection) throws IOException, SolrServerException {
assertEquals(0, CollectionAdminRequest.createCollection(collection,
"conf", 2, 1)
.setMaxShardsPerNode(1).process(cluster.getSolrClient()).getStatus());
for (int i = 1; i < NUM_REPLICAS_OF_SHARD1; i++) {
assertTrue(
CollectionAdminRequest.addReplicaToShard(collection, "shard1").process(cluster.getSolrClient()).isSuccess()
);
}
}
@Test
public void testSimpleSliceLeaderElection() throws Exception {
String collection = "collection1";
createCollection(collection);
//printLayout(zkServer.getZkAddress());
for (int i = 0; i < 4; i++) {
// who is the leader?
String leader = getLeader();
Set<Integer> shard1Ports = shardPorts.get("shard1");
int leaderPort = getLeaderPort(leader);
assertTrue(shard1Ports.toString(), shard1Ports.contains(leaderPort));
shard1Ports.remove(leaderPort);
// kill the leader
containerMap.get(leaderPort).shutdown();
//printLayout(zkServer.getZkAddress());
String leader = getLeader(collection);
JettySolrRunner jetty = getRunner(leader);
assertNotNull(jetty);
assertTrue("shard1".equals(jetty.getCoreContainer().getCores().iterator().next()
.getCoreDescriptor().getCloudDescriptor().getShardId()));
jetty.stop();
// poll until leader change is visible
for (int j = 0; j < 90; j++) {
String currentLeader = getLeader();
String currentLeader = getLeader(collection);
if(!leader.equals(currentLeader)) {
break;
}
Thread.sleep(500);
}
leader = getLeader();
int newLeaderPort = getLeaderPort(leader);
leader = getLeader(collection);
int retry = 0;
while (leaderPort == newLeaderPort) {
while (jetty == getRunner(leader)) {
if (retry++ == 60) {
break;
}
Thread.sleep(1000);
}
if (leaderPort == newLeaderPort) {
zkClient.printLayoutToStdOut();
fail("We didn't find a new leader! " + leaderPort + " was close, but it's still showing as the leader");
}
assertTrue("Could not find leader " + newLeaderPort + " in " + shard1Ports, shard1Ports.contains(newLeaderPort));
}
}
@Test
public void testLeaderElectionAfterClientTimeout() throws Exception {
if (jetty == getRunner(leader)) {
cluster.getZkClient().printLayoutToStdOut();
fail("We didn't find a new leader! " + jetty + " was close, but it's still showing as the leader");
}
assertTrue("shard1".equals(getRunner(leader).getCoreContainer().getCores().iterator().next()
.getCoreDescriptor().getCloudDescriptor().getShardId()));
}
cluster.getJettySolrRunners().parallelStream().forEach(jetty -> {
if (jetty.isStopped())
try {
jetty.start();
} catch (Exception e) {
e.printStackTrace();
}
});
waitForState("Expected to see nodes come back " + collection, collection,
(n, c) -> {
return n.size() == 6;
});
CollectionAdminRequest.deleteCollection(collection).process(cluster.getSolrClient());
// testLeaderElectionAfterClientTimeout
collection = "collection2";
createCollection(collection);
// TODO: work out the best timing here...
System.setProperty("zkClientTimeout", Integer.toString(ZkTestServer.TICK_TIME * 2 + 100));
// timeout the leader
String leader = getLeader();
int leaderPort = getLeaderPort(leader);
ZkController zkController = containerMap.get(leaderPort).getZkController();
String leader = getLeader(collection);
JettySolrRunner jetty = getRunner(leader);
ZkController zkController = jetty.getCoreContainer().getZkController();
zkController.getZkClient().getSolrZooKeeper().closeCnxn();
long sessionId = zkClient.getSolrZooKeeper().getSessionId();
zkServer.expire(sessionId);
cluster.getZkServer().expire(zkController.getZkClient().getSolrZooKeeper().getSessionId());
for (int i = 0; i < 60; i++) { // wait till leader is changed
if (leaderPort != getLeaderPort(getLeader())) {
if (jetty != getRunner(getLeader(collection))) {
break;
}
Thread.sleep(100);
}
// make sure we have waited long enough for the first leader to have come back
Thread.sleep(ZkTestServer.TICK_TIME * 2 + 100);
// kill everyone but the first leader that should have reconnected by now
for (Map.Entry<Integer,CoreContainer> entry : containerMap.entrySet()) {
if (entry.getKey() != leaderPort) {
entry.getValue().shutdown();
for (JettySolrRunner jetty2 : cluster.getJettySolrRunners()) {
if (jetty != jetty2) {
jetty2.stop();
}
}
for (int i = 0; i < 320; i++) { // wait till leader is changed
try {
if (leaderPort == getLeaderPort(getLeader())) {
if (jetty == getRunner(getLeader(collection))) {
break;
}
Thread.sleep(100);
@ -246,73 +154,26 @@ public class LeaderElectionIntegrationTest extends SolrTestCaseJ4 {
}
}
// the original leader should be leader again now - everyone else is down
// TODO: I saw this fail once...expected:<7000> but was:<7004>
assertEquals(leaderPort, getLeaderPort(getLeader()));
//printLayout(zkServer.getZkAddress());
//Thread.sleep(100000);
assertEquals(jetty, getRunner(getLeader(collection)));
}
private String getLeader() throws InterruptedException {
private JettySolrRunner getRunner(String nodeName) {
for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()){
if (!jettySolrRunner.isStopped() && nodeName.equals(jettySolrRunner.getNodeName())) return jettySolrRunner;
}
return null;
}
private String getLeader(String collection) throws InterruptedException {
ZkNodeProps props = reader.getLeaderRetry("collection1", "shard1", 30000);
ZkNodeProps props = cluster.getSolrClient().getZkStateReader().getLeaderRetry(collection, "shard1", 30000);
String leader = props.getStr(ZkStateReader.NODE_NAME_PROP);
return leader;
}
private int getLeaderPort(String leader) {
Matcher m = HOST.matcher(leader);
int leaderPort = 0;
if (m.matches()) {
leaderPort = Integer.parseInt(m.group(1));
} else {
throw new IllegalStateException();
}
return leaderPort;
}
@Override
public void tearDown() throws Exception {
if (zkClient != null) {
zkClient.close();
}
if (reader != null) {
reader.close();
}
for (CoreContainer cc : containerMap.values()) {
if (!cc.isShutDown()) {
cc.shutdown();
}
}
zkServer.shutdown();
super.tearDown();
System.clearProperty("zkClientTimeout");
System.clearProperty("zkHost");
System.clearProperty("hostPort");
System.clearProperty("shard");
System.clearProperty("solrcloud.update.delay");
}
private void printLayout(String zkHost) throws Exception {
SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
zkClient.printLayoutToStdOut();
zkClient.close();
}
@AfterClass
public static void afterClass() throws InterruptedException {
System.clearProperty("solrcloud.skip.autorecovery");
System.clearProperty("zkClientTimeout");
System.clearProperty("zkHost");
System.clearProperty("shard");
System.clearProperty("solr.data.dir");
System.clearProperty("solr.solr.home");
resetExceptionIgnores();
// wait just a bit for any zk client threads to outlast timeout
Thread.sleep(2000);
}
}

View File

@ -146,8 +146,8 @@ public class LeaderFailureAfterFreshStartTest extends AbstractFullDistribZkTestB
// start the freshNode
restartNodes(singletonList(freshNode));
String replicationProperties = (String) freshNode.jetty.getSolrHome() + "/cores/" + DEFAULT_TEST_COLLECTION_NAME + "/data/replication.properties";
String coreName = freshNode.jetty.getCoreContainer().getCores().iterator().next().getName();
String replicationProperties = freshNode.jetty.getSolrHome() + "/cores/" + coreName + "/data/replication.properties";
String md5 = DigestUtils.md5Hex(Files.readAllBytes(Paths.get(replicationProperties)));
// shutdown the original leader

View File

@ -27,7 +27,10 @@ import org.apache.solr.client.solrj.SolrResponse;
import org.apache.solr.cloud.Overseer.LeaderStatus;
import org.apache.solr.cloud.OverseerTaskQueue.QueueEvent;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
@ -70,7 +73,8 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
private static ClusterState clusterStateMock;
private static SolrZkClient solrZkClientMock;
private final Map zkMap = new HashMap();
private final Set collectionsSet = new HashSet();
private final Map<String, ClusterState.CollectionRef> collectionsSet = new HashMap<>();
private final List<ZkNodeProps> replicas = new ArrayList<>();
private SolrResponse lastProcessMessageResult;
@ -141,6 +145,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
zkMap.clear();
collectionsSet.clear();
replicas.clear();
}
@After
@ -193,7 +198,27 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
when(zkStateReaderMock.getZkClient()).thenReturn(solrZkClientMock);
when(zkStateReaderMock.getClusterState()).thenReturn(clusterStateMock);
when(clusterStateMock.getCollections()).thenReturn(collectionsSet);
when(clusterStateMock.getCollection(anyString())).thenAnswer(invocation -> {
String key = invocation.getArgument(0);
if (!collectionsSet.containsKey(key)) return null;
DocCollection docCollection = collectionsSet.get(key).get();
Map<String, Map<String, Replica>> slices = new HashMap<>();
for (ZkNodeProps replica : replicas) {
if (!key.equals(replica.getStr(ZkStateReader.COLLECTION_PROP))) continue;
String slice = replica.getStr(ZkStateReader.SHARD_ID_PROP);
if (!slices.containsKey(slice)) slices.put(slice, new HashMap<>());
String replicaName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
slices.get(slice).put(replicaName, new Replica(replicaName, replica.getProperties()));
}
Map<String, Slice> slicesMap = new HashMap<>();
for (Map.Entry<String, Map<String, Replica>> entry : slices.entrySet()) {
slicesMap.put(entry.getKey(), new Slice(entry.getKey(), entry.getValue(), null));
}
return docCollection.copyWithSlices(slicesMap);
});
final Set<String> liveNodes = new HashSet<>();
for (int i = 0; i < liveNodesCount; i++) {
final String address = "localhost:" + (8963 + i) + "_solr";
@ -202,13 +227,13 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
when(zkStateReaderMock.getBaseUrlForNodeName(address)).thenAnswer(invocation -> address.replaceAll("_", "/"));
}
when(zkStateReaderMock.getClusterProperty("legacyCloud", "true")).thenReturn("true");
when(zkStateReaderMock.getClusterProperty("legacyCloud", "false")).thenReturn("false");
when(solrZkClientMock.getZkClientTimeout()).thenReturn(30000);
when(clusterStateMock.hasCollection(anyString())).thenAnswer(invocation -> {
String key = invocation.getArgument(0);
return collectionsSet.contains(key);
return collectionsSet.containsKey(key);
});
when(clusterStateMock.getLiveNodes()).thenReturn(liveNodes);
@ -234,7 +259,11 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
ZkNodeProps props = ZkNodeProps.load(bytes);
if(CollectionParams.CollectionAction.CREATE.isEqual(props.getStr("operation"))){
String collName = props.getStr("name") ;
if(collName != null) collectionsSet.add(collName);
if(collName != null) collectionsSet.put(collName, new ClusterState.CollectionRef(
new DocCollection(collName, new HashMap<>(), props.getProperties(), DocRouter.DEFAULT)));
}
if (CollectionParams.CollectionAction.ADDREPLICA.isEqual(props.getStr("operation"))) {
replicas.add(props);
}
} catch (Exception e) { }
}
@ -297,7 +326,7 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
ArgumentCaptor<ModifiableSolrParams> paramsCaptor = ArgumentCaptor.forClass(ModifiableSolrParams.class);
verify(shardHandlerMock, times(numberOfReplica * numberOfSlices))
.submit(shardRequestCaptor.capture(), nodeUrlsWithoutProtocolPartCaptor.capture(), paramsCaptor.capture());
log.info("Datcmzz " + shardRequestCaptor.getAllValues().size());
for (int i = 0; i < shardRequestCaptor.getAllValues().size(); i++) {
ShardRequest shardRequest = shardRequestCaptor.getAllValues().get(i);
String nodeUrlsWithoutProtocolPartCapture = nodeUrlsWithoutProtocolPartCaptor.getAllValues().get(i);

View File

@ -29,7 +29,6 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
@ -47,13 +46,11 @@ import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.Utils;
import org.apache.solr.core.CloudConfig;
import org.apache.solr.handler.component.HttpShardHandlerFactory;
import org.apache.solr.update.UpdateShardHandler;
import org.apache.solr.update.UpdateShardHandlerConfig;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
@ -131,8 +128,20 @@ public class OverseerTest extends SolrTestCaseJ4 {
zkStateReader.close();
zkClient.close();
}
public String publishState(String collection, String coreName, String coreNodeName, Replica.State stateName, int numShards)
public void createCollection(String collection, int numShards) throws KeeperException, InterruptedException {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
"name", collection,
ZkStateReader.REPLICATION_FACTOR, "1",
ZkStateReader.NUM_SHARDS_PROP, numShards+"",
"createNodeSet", "");
DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
q.offer(Utils.toJSON(m));
}
public String publishState(String collection, String coreName, String coreNodeName, String shard, Replica.State stateName, int numShards)
throws KeeperException, InterruptedException, IOException {
if (stateName == null) {
ElectionContext ec = electionContext.remove(coreName);
@ -144,22 +153,23 @@ public class OverseerTest extends SolrTestCaseJ4 {
ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
ZkStateReader.COLLECTION_PROP, collection);
DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
q.offer(Utils.toJSON(m));
return null;
DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
q.offer(Utils.toJSON(m));
return null;
} else {
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.STATE_PROP, stateName.toString(),
ZkStateReader.NODE_NAME_PROP, nodeName,
ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
ZkStateReader.COLLECTION_PROP, collection,
ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards),
ZkStateReader.BASE_URL_PROP, "http://" + nodeName + "/solr/");
ZkStateReader.STATE_PROP, stateName.toString(),
ZkStateReader.NODE_NAME_PROP, nodeName,
ZkStateReader.CORE_NAME_PROP, coreName,
ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName,
ZkStateReader.COLLECTION_PROP, collection,
ZkStateReader.SHARD_ID_PROP, shard,
ZkStateReader.NUM_SHARDS_PROP, Integer.toString(numShards),
ZkStateReader.BASE_URL_PROP, "http://" + nodeName + "/solr/");
DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
q.offer(Utils.toJSON(m));
}
if (collection.length() > 0) {
for (int i = 0; i < 120; i++) {
String shardId = getShardId(collection, coreNodeName);
@ -193,7 +203,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
}
return null;
}
private String getShardId(String collection, String coreNodeName) {
Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(collection);
if (slices != null) {
@ -257,7 +267,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
@ -265,24 +275,32 @@ public class OverseerTest extends SolrTestCaseJ4 {
ZkStateReader reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
zkController = new MockZKController(server.getZkAddress(), "127.0.0.1");
final int numShards=6;
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
"name", COLLECTION,
ZkStateReader.REPLICATION_FACTOR, "1",
ZkStateReader.NUM_SHARDS_PROP, "3",
"createNodeSet", "");
DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
q.offer(Utils.toJSON(m));
for (int i = 0; i < numShards; i++) {
assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), "shard"+((i%3)+1), Replica.State.ACTIVE, 3));
}
final Map<String,Replica> rmap = reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap();
assertEquals(rmap.toString(), 2, rmap.size());
assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard2").getReplicasMap().size());
assertEquals(rmap.toString(), 2, reader.getClusterState().getSlice(COLLECTION, "shard3").getReplicasMap().size());
//make sure leaders are in cloud state
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard1", 15000));
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard2", 15000));
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard3", 15000));
} finally {
close(zkClient);
if (zkController != null) {
@ -319,9 +337,10 @@ public class OverseerTest extends SolrTestCaseJ4 {
zkController = new MockZKController(server.getZkAddress(), "127.0.0.1");
final int numShards=3;
zkController.createCollection(COLLECTION, 3);
for (int i = 0; i < numShards; i++) {
assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1), "node" + (i+1), Replica.State.ACTIVE, 3));
assertNotNull("shard got no id?", zkController.publishState(COLLECTION, "core" + (i+1),
"node" + (i+1), "shard"+((i%3)+1) , Replica.State.ACTIVE, 3));
}
assertEquals(1, reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap().size());
@ -335,12 +354,14 @@ public class OverseerTest extends SolrTestCaseJ4 {
// publish a bad queue item
String emptyCollectionName = "";
zkController.publishState(emptyCollectionName, "core0", "node0", Replica.State.ACTIVE, 1);
zkController.publishState(emptyCollectionName, "core0", "node0", null, 1);
zkController.publishState(emptyCollectionName, "core0", "node0", "shard1", Replica.State.ACTIVE, 1);
zkController.publishState(emptyCollectionName, "core0", "node0", "shard1", null, 1);
zkController.createCollection("collection2", 3);
// make sure the Overseer is still processing items
for (int i = 0; i < numShards; i++) {
assertNotNull("shard got no id?", zkController.publishState("collection2", "core" + (i + 1), "node" + (i + 1), Replica.State.ACTIVE, 3));
assertNotNull("shard got no id?", zkController.publishState("collection2",
"core" + (i + 1), "node" + (i + 1),"shard"+((i%3)+1), Replica.State.ACTIVE, 3));
}
assertEquals(1, reader.getClusterState().getSlice("collection2", "shard1").getReplicasMap().size());
@ -361,147 +382,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
server.shutdown();
}
}
@Test
public void testShardAssignmentBigger() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
final int nodeCount = random().nextInt(TEST_NIGHTLY ? 50 : 10)+(TEST_NIGHTLY ? 50 : 10)+1; //how many simulated nodes (num of threads)
final int coreCount = random().nextInt(TEST_NIGHTLY ? 100 : 11)+(TEST_NIGHTLY ? 100 : 11)+1; //how many cores to register
final int sliceCount = random().nextInt(TEST_NIGHTLY ? 20 : 5)+1; //how many slices
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
ZkStateReader reader = null;
SolrZkClient overseerClient = null;
final MockZKController[] controllers = new MockZKController[nodeCount];
final ExecutorService[] nodeExecutors = new ExecutorService[nodeCount];
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
overseerClient = electNewOverseer(server.getZkAddress());
reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
for (int i = 0; i < nodeCount; i++) {
controllers[i] = new MockZKController(server.getZkAddress(), "node" + i);
}
for (int i = 0; i < nodeCount; i++) {
nodeExecutors[i] = ExecutorUtil.newMDCAwareFixedThreadPool(1, new DefaultSolrThreadFactory("testShardAssignment"));
}
final String[] ids = new String[coreCount];
//register total of coreCount cores
for (int i = 0; i < coreCount; i++) {
final int slot = i;
nodeExecutors[i % nodeCount].submit((Runnable) () -> {
final String coreName = "core" + slot;
try {
ids[slot] = controllers[slot % nodeCount].publishState(COLLECTION, coreName, "node" + slot, Replica.State.ACTIVE, sliceCount);
} catch (Throwable e) {
e.printStackTrace();
fail("register threw exception:" + e.getClass());
}
});
}
for (int i = 0; i < nodeCount; i++) {
nodeExecutors[i].shutdown();
}
for (int i = 0; i < nodeCount; i++) {
while (!nodeExecutors[i].awaitTermination(100, TimeUnit.MILLISECONDS));
}
// make sure all cores have been assigned a id in cloudstate
int cloudStateSliceCount = 0;
for (int i = 0; i < 40; i++) {
cloudStateSliceCount = 0;
ClusterState state = reader.getClusterState();
final Map<String,Slice> slices = state.getSlicesMap(COLLECTION);
if (slices != null) {
for (String name : slices.keySet()) {
cloudStateSliceCount += slices.get(name).getReplicasMap().size();
}
if (coreCount == cloudStateSliceCount) break;
}
Thread.sleep(200);
}
assertEquals("Unable to verify all cores have been assigned an id in cloudstate",
coreCount, cloudStateSliceCount);
// make sure all cores have been returned an id
int assignedCount = 0;
for (int i = 0; i < 240; i++) {
assignedCount = 0;
for (int j = 0; j < coreCount; j++) {
if (ids[j] != null) {
assignedCount++;
}
}
if (coreCount == assignedCount) {
break;
}
Thread.sleep(1000);
}
assertEquals("Unable to verify all cores have been returned an id",
coreCount, assignedCount);
final HashMap<String, AtomicInteger> counters = new HashMap<>();
for (int i = 1; i < sliceCount+1; i++) {
counters.put("shard" + i, new AtomicInteger());
}
for (int i = 0; i < coreCount; i++) {
final AtomicInteger ai = counters.get(ids[i]);
assertNotNull("could not find counter for shard:" + ids[i], ai);
ai.incrementAndGet();
}
for (String counter: counters.keySet()) {
int count = counters.get(counter).intValue();
int expectedCount = coreCount / sliceCount;
int min = expectedCount - 1;
int max = expectedCount + 1;
if (count < min || count > max) {
fail("Unevenly assigned shard ids, " + counter + " had " + count
+ ", expected: " + min + "-" + max);
}
}
//make sure leaders are in cloud state
for (int i = 0; i < sliceCount; i++) {
assertNotNull(reader.getLeaderUrl(COLLECTION, "shard" + (i + 1), 15000));
}
} finally {
close(zkClient);
close(overseerClient);
close(reader);
for (int i = 0; i < controllers.length; i++)
if (controllers[i] != null) {
controllers[i].close();
}
server.shutdown();
for (int i = 0; i < nodeCount; i++) {
if (nodeExecutors[i] != null) {
nodeExecutors[i].shutdownNow();
}
}
}
}
//wait until collections are available
private void waitForCollections(ZkStateReader stateReader, String... collections) throws InterruptedException, KeeperException {
@ -545,11 +425,19 @@ public class OverseerTest extends SolrTestCaseJ4 {
overseerClient = electNewOverseer(server.getZkAddress());
DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
"name", COLLECTION,
ZkStateReader.REPLICATION_FACTOR, "1",
ZkStateReader.NUM_SHARDS_PROP, "1",
"createNodeSet", "");
q.offer(Utils.toJSON(m));
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.CORE_NAME_PROP, "core1",
ZkStateReader.ROLES_PROP, "",
ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
@ -557,15 +445,14 @@ public class OverseerTest extends SolrTestCaseJ4 {
q.offer(Utils.toJSON(m));
waitForCollections(reader, COLLECTION);
assertSame(reader.getClusterState().toString(), Replica.State.RECOVERING,
reader.getClusterState().getSlice(COLLECTION, "shard1").getReplica("core_node1").getState());
verifyReplicaStatus(reader, "collection1", "shard1", "core_node1", Replica.State.RECOVERING);
//publish node state (active)
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.CORE_NAME_PROP, "core1",
ZkStateReader.ROLES_PROP, "",
ZkStateReader.STATE_PROP, Replica.State.ACTIVE.toString());
@ -634,7 +521,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
overseerClient = electNewOverseer(server.getZkAddress());
Thread.sleep(1000);
mockController.publishState(COLLECTION, core, core_node,
mockController.createCollection(COLLECTION, 1);
mockController.publishState(COLLECTION, core, core_node, "shard1",
Replica.State.RECOVERING, numShards);
waitForCollections(reader, COLLECTION);
@ -642,7 +530,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
int version = getClusterStateVersion(zkClient);
mockController.publishState(COLLECTION, core, core_node, Replica.State.ACTIVE,
mockController.publishState(COLLECTION, core, core_node, "shard1", Replica.State.ACTIVE,
numShards);
while (version == getClusterStateVersion(zkClient));
@ -652,7 +540,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
overseerClient.close();
Thread.sleep(1000); // wait for overseer to get killed
mockController.publishState(COLLECTION, core, core_node,
mockController.publishState(COLLECTION, core, core_node, "shard1",
Replica.State.RECOVERING, numShards);
version = getClusterStateVersion(zkClient);
@ -667,7 +555,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
assertEquals(shard+" replica count does not match", 1, reader.getClusterState()
.getSlice(COLLECTION, shard).getReplicasMap().size());
version = getClusterStateVersion(zkClient);
mockController.publishState(COLLECTION, core, core_node, null, numShards);
mockController.publishState(COLLECTION, core, core_node, "shard1", null, numShards);
while (version == getClusterStateVersion(zkClient));
Thread.sleep(500);
assertTrue(COLLECTION +" should remain after removal of the last core", // as of SOLR-5209 core removal does not cascade to remove the slice and collection
@ -723,7 +611,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
overseerElector.setup(ec);
overseerElector.joinElection(ec, false);
mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.ACTIVE, 1);
mockController.createCollection(COLLECTION, 1);
mockController.publishState(COLLECTION, "core1", "core_node1", "shard1", Replica.State.ACTIVE, 1);
assertNotNull(overseer.getStats());
assertTrue((overseer.getStats().getSuccessCount(OverseerAction.STATE.toLower())) > 0);
@ -819,16 +708,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
for (int i = 0; i < atLeast(4); i++) {
killCounter.incrementAndGet(); //for each round allow 1 kill
mockController = new MockZKController(server.getZkAddress(), "node1");
mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE,1);
mockController.createCollection(COLLECTION, 1);
mockController.publishState(COLLECTION, "core1", "node1", "shard1", Replica.State.ACTIVE,1);
if(mockController2!=null) {
mockController2.close();
mockController2 = null;
}
mockController.publishState(COLLECTION, "core1", "node1",Replica.State.RECOVERING,1);
mockController.publishState(COLLECTION, "core1", "node1","shard1", Replica.State.RECOVERING,1);
mockController2 = new MockZKController(server.getZkAddress(), "node2");
mockController.publishState(COLLECTION, "core1", "node1", Replica.State.ACTIVE,1);
mockController.publishState(COLLECTION, "core1", "node1","shard1", Replica.State.ACTIVE,1);
verifyShardLeader(reader, COLLECTION, "shard1", "core1");
mockController2.publishState(COLLECTION, "core4", "node2", Replica.State.ACTIVE ,1);
mockController2.publishState(COLLECTION, "core4", "node2", "shard1", Replica.State.ACTIVE ,1);
mockController.close();
mockController = null;
verifyShardLeader(reader, COLLECTION, "shard1", "core4");
@ -874,7 +764,8 @@ public class OverseerTest extends SolrTestCaseJ4 {
overseerClient = electNewOverseer(server.getZkAddress());
mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.RECOVERING, 1);
mockController.createCollection(COLLECTION, 1);
mockController.publishState(COLLECTION, "core1", "core_node1", "shard1", Replica.State.RECOVERING, 1);
waitForCollections(reader, "collection1");
@ -885,7 +776,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
int version = getClusterStateVersion(controllerClient);
mockController = new MockZKController(server.getZkAddress(), "node1");
mockController.publishState(COLLECTION, "core1", "core_node1", Replica.State.RECOVERING, 1);
mockController.publishState(COLLECTION, "core1", "core_node1","shard1", Replica.State.RECOVERING, 1);
while (version == reader.getClusterState().getZkClusterStateVersion()) {
Thread.sleep(100);
@ -914,47 +805,6 @@ public class OverseerTest extends SolrTestCaseJ4 {
}
}
@Test
public void testPlaceholders() throws Exception {
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient controllerClient = null;
SolrZkClient overseerClient = null;
ZkStateReader reader = null;
MockZKController mockController = null;
try {
server.run();
controllerClient = new SolrZkClient(server.getZkAddress(), TIMEOUT);
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
ZkController.createClusterZkNodes(controllerClient);
reader = new ZkStateReader(controllerClient);
reader.createClusterStateWatchersAndUpdate();
mockController = new MockZKController(server.getZkAddress(), "node1");
overseerClient = electNewOverseer(server.getZkAddress());
mockController.publishState(COLLECTION, "core1", "node1", Replica.State.RECOVERING, 12);
waitForCollections(reader, COLLECTION);
assertEquals("Slicecount does not match", 12, reader.getClusterState().getSlices(COLLECTION).size());
} finally {
close(overseerClient);
close(mockController);
close(controllerClient);
close(reader);
server.shutdown();
}
}
@Test
@Ignore
public void testPerformance() throws Exception {
@ -1113,10 +963,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
reader.createClusterStateWatchersAndUpdate();
//prepopulate work queue with some items to emulate previous overseer died before persisting state
DistributedQueue queue = Overseer.getInternalWorkQueue(zkClient, new Overseer.Stats());
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
"name", COLLECTION,
ZkStateReader.REPLICATION_FACTOR, "1",
ZkStateReader.NUM_SHARDS_PROP, "1",
"createNodeSet", "");
queue.offer(Utils.toJSON(m));
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.SHARD_ID_PROP, "s1",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.CORE_NAME_PROP, "core1",
ZkStateReader.ROLES_PROP, "",
@ -1125,7 +982,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, "state",
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.SHARD_ID_PROP, "s1",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.CORE_NAME_PROP, "core2",
ZkStateReader.ROLES_PROP, "",
@ -1139,7 +996,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.SHARD_ID_PROP, "s1",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.CORE_NAME_PROP, "core3",
ZkStateReader.ROLES_PROP, "",
@ -1147,12 +1004,12 @@ public class OverseerTest extends SolrTestCaseJ4 {
queue.offer(Utils.toJSON(m));
for(int i=0;i<100;i++) {
Slice s = reader.getClusterState().getSlice(COLLECTION, "s1");
Slice s = reader.getClusterState().getSlice(COLLECTION, "shard1");
if(s!=null && s.getReplicasMap().size()==3) break;
Thread.sleep(100);
}
assertNotNull(reader.getClusterState().getSlice(COLLECTION, "s1"));
assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "s1").getReplicasMap().size());
assertNotNull(reader.getClusterState().getSlice(COLLECTION, "shard1"));
assertEquals(3, reader.getClusterState().getSlice(COLLECTION, "shard1").getReplicasMap().size());
} finally {
close(overseerClient);
close(zkClient);
@ -1188,8 +1045,17 @@ public class OverseerTest extends SolrTestCaseJ4 {
DistributedQueue q = Overseer.getStateUpdateQueue(zkClient);
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, CollectionParams.CollectionAction.CREATE.toLower(),
"name", "c1",
ZkStateReader.REPLICATION_FACTOR, "1",
ZkStateReader.NUM_SHARDS_PROP, "1",
"createNodeSet", "");
q.offer(Utils.toJSON(m));
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, "c1",
ZkStateReader.CORE_NAME_PROP, "core1",
@ -1203,6 +1069,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, "c1",
ZkStateReader.CORE_NAME_PROP, "core1",
@ -1214,6 +1081,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard1",
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.COLLECTION_PROP, "c1",
ZkStateReader.CORE_NAME_PROP, "core1",
@ -1379,6 +1247,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
final int N = (numReplicas-rr)*numShards + ss;
ZkNodeProps m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
ZkStateReader.BASE_URL_PROP, "http://127.0.0.1/solr",
ZkStateReader.SHARD_ID_PROP, "shard"+ss,
ZkStateReader.NODE_NAME_PROP, "node"+N,
ZkStateReader.COLLECTION_PROP, COLLECTION,
ZkStateReader.CORE_NAME_PROP, "core"+N,

View File

@ -83,7 +83,7 @@ public class ReplaceNodeTest extends SolrCloudTestCase {
new CollectionAdminRequest.ReplaceNode(node2bdecommissioned, emptyNode).processAsync("000", cloudClient);
CollectionAdminRequest.RequestStatus requestStatus = CollectionAdminRequest.requestStatus("000");
boolean success = false;
for (int i = 0; i < 200; i++) {
for (int i = 0; i < 300; i++) {
CollectionAdminRequest.RequestStatusResponse rsp = requestStatus.process(cloudClient);
if (rsp.getRequestStatus() == RequestStatusState.COMPLETED) {
success = true;

View File

@ -20,6 +20,8 @@ import java.io.File;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.Replica;
import org.junit.BeforeClass;
import org.junit.Test;
@ -60,6 +62,16 @@ public class ShardRoutingCustomTest extends AbstractFullDistribZkTestBase {
jettyDir.mkdirs();
setupJettySolrHome(jettyDir);
JettySolrRunner j = createJetty(jettyDir, createTempDir().toFile().getAbsolutePath(), "shardA", "solrconfig.xml", null);
assertEquals(0, CollectionAdminRequest
.createCollection(DEFAULT_COLLECTION, 1, 1)
.setStateFormat(Integer.parseInt(getStateFormat()))
.setCreateNodeSet("")
.process(cloudClient).getStatus());
assertTrue(CollectionAdminRequest
.addReplicaToShard(collection,"shard1")
.setNode(j.getNodeName())
.setType(useTlogReplicas()? Replica.Type.TLOG: Replica.Type.NRT)
.process(cloudClient).isSuccess());
jettys.add(j);
SolrClient client = createNewSolrClient(j.getLocalPort());
clients.add(client);

View File

@ -21,8 +21,9 @@ import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
import org.apache.solr.common.SolrInputDocument;
import org.apache.solr.common.cloud.Replica;
@ -32,12 +33,12 @@ import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.params.SolrParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.core.SolrCore;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.solr.util.DefaultSolrThreadFactory;
import org.apache.solr.util.TimeOut;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.Random;
@ -108,37 +109,28 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
final String coreName1 = collection+"_1";
final String coreName2 = collection+"_2";
// create one leader and one replica
Create createCmd = new Create();
createCmd.setCoreName(coreName1);
createCmd.setCollection(collection);
String coreDataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(getDataDir(coreDataDir));
createCmd.setNumShards(numShards);
SolrClient client = clients.get(0);
String url1 = getBaseUrl(client);
assertEquals(0, CollectionAdminRequest.createCollection(collection, numShards, 1)
.setCreateNodeSet("")
.process(cloudClient).getStatus());
assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard1")
.setCoreName(coreName1)
.setNode(jettys.get(0).getNodeName())
.process(cloudClient).isSuccess());
try (HttpSolrClient adminClient = getHttpSolrClient(url1)) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(60000);
adminClient.request(createCmd);
assertTrue(CollectionAdminRequest.addReplicaToShard(collection, "shard2")
.setCoreName(coreName2)
.setNode(jettys.get(0).getNodeName())
.process(cloudClient).isSuccess());
createCmd = new Create();
createCmd.setCoreName(coreName2);
createCmd.setCollection(collection);
coreDataDir = createTempDir().toFile().getAbsolutePath();
createCmd.setDataDir(getDataDir(coreDataDir));
adminClient.request(createCmd);
// does not mean they are active and up yet :*
waitForRecoveriesToFinish(collection, false);
// does not mean they are active and up yet :*
waitForRecoveriesToFinish(collection, false);
final boolean unloadInOrder = random().nextBoolean();
final String unloadCmdCoreName1 = (unloadInOrder ? coreName1 : coreName2);
final String unloadCmdCoreName2 = (unloadInOrder ? coreName2 : coreName1);
final boolean unloadInOrder = random().nextBoolean();
final String unloadCmdCoreName1 = (unloadInOrder ? coreName1 : coreName2);
final String unloadCmdCoreName2 = (unloadInOrder ? coreName2 : coreName1);
try (HttpSolrClient adminClient = getHttpSolrClient(buildUrl(jettys.get(0).getLocalPort()))) {
// now unload one of the two
Unload unloadCmd = new Unload(false);
unloadCmd.setCoreName(unloadCmdCoreName1);
@ -163,42 +155,26 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
* @throws Exception on any problem
*/
private void testCoreUnloadAndLeaders() throws Exception {
File tmpDir = createTempDir().toFile();
JettySolrRunner jetty1 = jettys.get(0);
String core1DataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection1" + "_1n";
// create a new collection collection
SolrClient client = clients.get(0);
String url1 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url1)) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(60000);
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection1");
createCmd.setCollection("unloadcollection");
createCmd.setNumShards(1);
createCmd.setDataDir(getDataDir(core1DataDir));
adminClient.request(createCmd);
}
assertEquals(0, CollectionAdminRequest
.createCollection("unloadcollection", 1,1)
.setCreateNodeSet(jetty1.getNodeName())
.process(cloudClient).getStatus());
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
zkStateReader.forceUpdateCollection("unloadcollection");
int slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
assertEquals(1, slices);
client = clients.get(1);
String url2 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url2)) {
SolrCore solrCore = jetty1.getCoreContainer().getCore("unloadcollection_shard1_replica1");
String core1DataDir = solrCore.getDataDir();
solrCore.close();
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection2");
createCmd.setCollection("unloadcollection");
String core2dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection1" + "_2n";
createCmd.setDataDir(getDataDir(core2dataDir));
adminClient.request(createCmd);
}
assertTrue(CollectionAdminRequest
.addReplicaToShard("unloadcollection", "shard1")
.setNode(jettys.get(1).getNodeName())
.process(cloudClient).isSuccess());
zkStateReader.forceUpdateCollection("unloadcollection");
slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
assertEquals(1, slices);
@ -225,24 +201,17 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
}
}
// create another replica for our collection
client = clients.get(2);
String url3 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url3)) {
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection3");
createCmd.setCollection("unloadcollection");
String core3dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection" + "_3n";
createCmd.setDataDir(getDataDir(core3dataDir));
adminClient.request(createCmd);
}
assertTrue(CollectionAdminRequest
.addReplicaToShard("unloadcollection", "shard1")
.setNode(jettys.get(2).getNodeName())
.process(cloudClient).isSuccess());
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
// so that we start with some versions when we reload...
DirectUpdateHandler2.commitOnClose = false;
try (HttpSolrClient addClient = getHttpSolrClient(url3 + "/unloadcollection3")) {
try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3")) {
addClient.setConnectionTimeout(30000);
// add a few docs
@ -276,11 +245,11 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
fail("Leader never changed");
}
}
// ensure there is a leader
zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
try (HttpSolrClient addClient = getHttpSolrClient(url2 + "/unloadcollection2")) {
try (HttpSolrClient addClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2")) {
addClient.setConnectionTimeout(30000);
addClient.setSoTimeout(90000);
@ -291,23 +260,14 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
addClient.add(doc1);
}
}
// create another replica for our collection
client = clients.get(3);
String url4 = getBaseUrl(client);
try (HttpSolrClient adminClient = getHttpSolrClient(url4)) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
Create createCmd = new Create();
createCmd.setCoreName("unloadcollection4");
createCmd.setCollection("unloadcollection");
String core4dataDir = tmpDir.getAbsolutePath() + File.separator + System.nanoTime() + "unloadcollection" + "_4n";
createCmd.setDataDir(getDataDir(core4dataDir));
adminClient.request(createCmd);
}
assertTrue(CollectionAdminRequest
.addReplicaToShard("unloadcollection", "shard1")
.setNode(jettys.get(3).getNodeName())
.process(cloudClient).isSuccess());
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
// unload the leader again
leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
try (HttpSolrClient collectionClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
@ -326,29 +286,22 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
fail("Leader never changed");
}
}
zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
// set this back
DirectUpdateHandler2.commitOnClose = true;
// bring the downed leader back as replica
try (HttpSolrClient adminClient = getHttpSolrClient(leaderProps.getBaseUrl())) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
assertTrue(CollectionAdminRequest
.addReplicaToShard("unloadcollection", "shard1")
.setDataDir(core1DataDir)
.setNode(leaderProps.getNodeName())
.process(cloudClient).isSuccess());
Create createCmd = new Create();
createCmd.setCoreName(leaderProps.getCoreName());
createCmd.setCollection("unloadcollection");
createCmd.setDataDir(getDataDir(core1DataDir));
adminClient.request(createCmd);
}
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
long found1, found3;
try (HttpSolrClient adminClient = getHttpSolrClient(url2 + "/unloadcollection")) {
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(1).getBaseUrl() + "/unloadcollection_shard1_replica2")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
adminClient.commit();
@ -356,7 +309,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
q.set("distrib", false);
found1 = adminClient.query(q).getResults().getNumFound();
}
try (HttpSolrClient adminClient = getHttpSolrClient(url3 + "/unloadcollection")) {
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(2).getBaseUrl() + "/unloadcollection_shard1_replica3")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
adminClient.commit();
@ -365,7 +318,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
found3 = adminClient.query(q).getResults().getNumFound();
}
try (HttpSolrClient adminClient = getHttpSolrClient(url4 + "/unloadcollection")) {
try (HttpSolrClient adminClient = getHttpSolrClient(jettys.get(3).getBaseUrl() + "/unloadcollection_shard1_replica4")) {
adminClient.setConnectionTimeout(15000);
adminClient.setSoTimeout(30000);
adminClient.commit();

View File

@ -109,7 +109,7 @@ public class TestConfigReload extends AbstractFullDistribZkTestBase {
assertTrue(newStat.getVersion() > stat.getVersion());
log.info("new_version "+ newStat.getVersion());
Integer newVersion = newStat.getVersion();
long maxTimeoutSeconds = 20;
long maxTimeoutSeconds = 60;
DocCollection coll = cloudClient.getZkStateReader().getClusterState().getCollection("collection1");
List<String> urls = new ArrayList<>();
for (Slice slice : coll.getSlices()) {

View File

@ -62,7 +62,7 @@ public class TestCloudManagedSchema extends AbstractFullDistribZkTestBase {
NamedList namedListResponse = client.request(request);
client.setBaseURL(previousBaseURL); // Restore baseURL
NamedList status = (NamedList)namedListResponse.get("status");
NamedList collectionStatus = (NamedList)status.get("collection1");
NamedList collectionStatus = (NamedList)status.getVal(0);
String collectionSchema = (String)collectionStatus.get(CoreAdminParams.SCHEMA);
// Make sure the upgrade to managed schema happened
assertEquals("Schema resource name differs from expected name", "managed-schema", collectionSchema);

View File

@ -1360,6 +1360,7 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
protected String collection;
protected String shard;
protected String node;
protected String coreName;
protected String routeKey;
protected String instanceDir;
protected String dataDir;
@ -1426,6 +1427,11 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
return this;
}
public AddReplica setCoreName(String coreName) {
this.coreName = coreName;
return this;
}
@Override
public SolrParams getParams() {
ModifiableSolrParams params = new ModifiableSolrParams(super.getParams());
@ -1446,6 +1452,9 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
if (dataDir != null) {
params.add("dataDir", dataDir);
}
if (coreName != null) {
params.add("name", coreName);
}
if (type != null) {
params.add(ZkStateReader.REPLICA_TYPE, type.name());
}

View File

@ -1116,20 +1116,7 @@ public abstract class BaseDistributedSearchTestCase extends SolrTestCaseJ4 {
protected void setupJettySolrHome(File jettyHome) throws IOException {
seedSolrHome(jettyHome);
Properties coreProperties = new Properties();
coreProperties.setProperty("name", "collection1");
coreProperties.setProperty("shard", "${shard:}");
coreProperties.setProperty("collection", "${collection:collection1}");
coreProperties.setProperty("config", "${solrconfig:solrconfig.xml}");
coreProperties.setProperty("schema", "${schema:schema.xml}");
coreProperties.setProperty("coreNodeName", "${coreNodeName:}");
coreProperties.setProperty("replicaType", "${replicaType:}");
writeCoreProperties(jettyHome.toPath().resolve("cores").resolve("collection1"), coreProperties, "collection1");
// <core name="collection1" instanceDir="collection1" shard="${shard:}"
// collection="${collection:collection1}" config="${solrconfig:solrconfig.xml}" schema="${schema:schema.xml}"
//coreNodeName="${coreNodeName:}"/>
Files.createDirectories(jettyHome.toPath().resolve("cores").resolve("collection1"));
}
}

View File

@ -25,6 +25,7 @@ import org.apache.commons.io.FileUtils;
import org.apache.solr.BaseDistributedSearchTestCase;
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
@ -98,20 +99,15 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
File controlHome = new File(new File(getSolrHome()).getParentFile(), "control" + homeCount.incrementAndGet());
FileUtils.copyDirectory(new File(getSolrHome()), controlHome);
setupJettySolrHome(controlHome);
System.setProperty("collection", "control_collection");
String numShardsS = System.getProperty(ZkStateReader.NUM_SHARDS_PROP);
System.setProperty(ZkStateReader.NUM_SHARDS_PROP, "1");
controlJetty = createJetty(controlHome, null); // let the shardId default to shard1
System.clearProperty("collection");
if(numShardsS != null) {
System.setProperty(ZkStateReader.NUM_SHARDS_PROP, numShardsS);
} else {
System.clearProperty(ZkStateReader.NUM_SHARDS_PROP);
}
controlJetty = createJetty(controlHome, null); // let the shardId default to shard1
controlClient = createNewSolrClient(controlJetty.getLocalPort());
assertTrue(CollectionAdminRequest
.createCollection("control_collection", 1, 1)
.setCreateNodeSet(controlJetty.getNodeName())
.process(controlClient).isSuccess());
StringBuilder sb = new StringBuilder();
for (int i = 1; i <= numShards; i++) {
if (sb.length() > 0) sb.append(',');

View File

@ -62,7 +62,6 @@ import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
@ -234,9 +233,15 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
System.clearProperty("urlScheme");
try (ZkStateReader zkStateReader = new ZkStateReader(zkServer.getZkAddress(),
AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT)) {
zkStateReader.getZkClient().create(ZkStateReader.CLUSTER_PROPS,
Utils.toJSON(Collections.singletonMap("urlScheme", "https")),
CreateMode.PERSISTENT, true);
try {
zkStateReader.getZkClient().create(ZkStateReader.CLUSTER_PROPS,
Utils.toJSON(Collections.singletonMap("urlScheme", "https")),
CreateMode.PERSISTENT, true);
} catch (KeeperException.NodeExistsException e) {
ZkNodeProps props = ZkNodeProps.load(zkStateReader.getZkClient().getData(ZkStateReader.CLUSTER_PROPS,
null, null, true));
zkStateReader.getZkClient().setData(ZkStateReader.CLUSTER_PROPS, Utils.toJSON(props.plus("urlScheme", "https")), true);
}
}
}
if (useTlogReplicas()) {
@ -300,48 +305,31 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
@Override
protected void createServers(int numServers) throws Exception {
System.setProperty("collection", "control_collection");
// we want hashes by default for the control, so set to 1 shard as opposed to leaving unset
String oldNumShards = System.getProperty(ZkStateReader.NUM_SHARDS_PROP);
System.setProperty(ZkStateReader.NUM_SHARDS_PROP, "1");
try {
File controlJettyDir = createTempDir("control").toFile();
setupJettySolrHome(controlJettyDir);
controlJetty = createJetty(controlJettyDir, useJettyDataDir ? getDataDir(testDir
+ "/control/data") : null); // don't pass shard name... let it default to
// "shard1"
controlClient = createNewSolrClient(controlJetty.getLocalPort());
if (sliceCount <= 0) {
// for now, just create the cloud client for the control if we don't
// create the normal cloud client.
// this can change if more tests need it.
controlClientCloud = createCloudClient("control_collection");
controlClientCloud.connect();
waitForCollection(controlClientCloud.getZkStateReader(),
"control_collection", 0);
// NOTE: we are skipping creation of the chaos monkey by returning here
cloudClient = controlClientCloud; // temporary - some code needs/uses
// cloudClient
return;
}
} finally {
System.clearProperty("collection");
if (oldNumShards != null) {
System.setProperty(ZkStateReader.NUM_SHARDS_PROP, oldNumShards);
} else {
System.clearProperty(ZkStateReader.NUM_SHARDS_PROP);
File controlJettyDir = createTempDir("control").toFile();
setupJettySolrHome(controlJettyDir);
controlJetty = createJetty(controlJettyDir, useJettyDataDir ? getDataDir(testDir
+ "/control/data") : null);
try (SolrClient client = createCloudClient("control_collection")) {
assertEquals(0, CollectionAdminRequest
.createCollection("control_collection", 1, 1)
.setCreateNodeSet(controlJetty.getNodeName())
.process(client).getStatus());
}
controlClient = new HttpSolrClient.Builder(controlJetty.getBaseUrl() + "/control_collection").build();
if (sliceCount <= 0) {
// for now, just create the cloud client for the control if we don't
// create the normal cloud client.
// this can change if more tests need it.
controlClientCloud = createCloudClient("control_collection");
controlClientCloud.connect();
waitForCollection(controlClientCloud.getZkStateReader(),
"control_collection", 0);
// NOTE: we are skipping creation of the chaos monkey by returning here
cloudClient = controlClientCloud; // temporary - some code needs/uses
// cloudClient
return;
}
initCloud();
createJettys(numServers);
@ -390,24 +378,13 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
List<SolrClient> clients = new ArrayList<>();
StringBuilder sb = new StringBuilder();
if ("2".equals(getStateFormat())) {
log.info("Creating " + DEFAULT_COLLECTION + " with stateFormat=2");
SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(),
AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT);
Overseer.getStateUpdateQueue(zkClient).offer(
Utils.toJSON(Utils.makeMap(Overseer.QUEUE_OPERATION,
CollectionParams.CollectionAction.CREATE.toLower(),
"name", DEFAULT_COLLECTION,
"numShards", String.valueOf(sliceCount),
DocCollection.STATE_FORMAT, getStateFormat(),
ZkStateReader.NRT_REPLICAS, useTlogReplicas()?"0":"1",
ZkStateReader.TLOG_REPLICAS, useTlogReplicas()?"1":"0",
ZkStateReader.PULL_REPLICAS, String.valueOf(getPullReplicaCount()))));
zkClient.close();
}
assertEquals(0, CollectionAdminRequest
.createCollection(DEFAULT_COLLECTION, sliceCount, 1)
.setStateFormat(Integer.parseInt(getStateFormat()))
.setCreateNodeSet("")
.process(cloudClient).getStatus());
int numPullReplicas = getPullReplicaCount() * sliceCount;
int numOtherReplicas = numJettys - getPullReplicaCount() * sliceCount;
for (int i = 1; i <= numJettys; i++) {
if (sb.length() > 0) sb.append(',');
int cnt = this.jettyIntCntr.incrementAndGet();
@ -417,20 +394,37 @@ public abstract class AbstractFullDistribZkTestBase extends AbstractDistribZkTes
jettyDir.mkdirs();
setupJettySolrHome(jettyDir);
JettySolrRunner j;
if (numPullReplicas > 0) {
numPullReplicas--;
if (numOtherReplicas > 0) {
numOtherReplicas--;
if (useTlogReplicas()) {
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.TLOG);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
+ cnt) : null, null, "solrconfig.xml", null, Replica.Type.TLOG);
assertTrue(CollectionAdminRequest
.addReplicaToShard(DEFAULT_COLLECTION, "shard"+((i%sliceCount)+1))
.setNode(j.getNodeName())
.setType(Replica.Type.TLOG)
.process(cloudClient).isSuccess());
} else {
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.NRT);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
+ cnt) : null, null, "solrconfig.xml", null, null);
assertTrue(CollectionAdminRequest
.addReplicaToShard(DEFAULT_COLLECTION, "shard"+((i%sliceCount)+1))
.setNode(j.getNodeName())
.setType(Replica.Type.NRT)
.process(cloudClient).isSuccess());
}
} else {
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.PULL);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
+ cnt) : null, null, "solrconfig.xml", null, Replica.Type.PULL);
} else if (useTlogReplicas()) {
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.TLOG);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
+ cnt) : null, null, "solrconfig.xml", null, Replica.Type.TLOG);
} else {
log.info("create jetty {} in directory {} of type {}", i, jettyDir, Replica.Type.NRT);
j = createJetty(jettyDir, useJettyDataDir ? getDataDir(testDir + "/jetty"
+ cnt) : null, null, "solrconfig.xml", null, null);
assertTrue(CollectionAdminRequest
.addReplicaToShard(DEFAULT_COLLECTION, "shard"+((i%sliceCount)+1))
.setNode(j.getNodeName())
.setType(Replica.Type.PULL)
.process(cloudClient).isSuccess());
}
jettys.add(j);
SolrClient client = createNewSolrClient(j.getLocalPort());

View File

@ -19,6 +19,7 @@ package org.apache.solr.cloud;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.util.Utils;
import org.apache.zookeeper.CreateMode;
import org.junit.AfterClass;
@ -28,6 +29,7 @@ import org.slf4j.LoggerFactory;
import java.io.File;
import java.lang.invoke.MethodHandles;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
@ -97,7 +99,9 @@ public abstract class AbstractZkTestCase extends SolrTestCaseJ4 {
zkClient.makePath("/collections/collection1/shards", CreateMode.PERSISTENT, true);
zkClient.makePath("/collections/control_collection", Utils.toJSON(zkProps), CreateMode.PERSISTENT, true);
zkClient.makePath("/collections/control_collection/shards", CreateMode.PERSISTENT, true);
// this workaround is acceptable until we remove legacyCloud because we just init a single core here
String defaultClusterProps = "{\""+ZkStateReader.LEGACY_CLOUD+"\":\"true\"}";
zkClient.makePath(ZkStateReader.CLUSTER_PROPS, defaultClusterProps.getBytes(StandardCharsets.UTF_8), CreateMode.PERSISTENT, true);
// for now, always upload the config and schema to the canonical names
putConfig("conf1", zkClient, solrhome, config, "solrconfig.xml");
putConfig("conf1", zkClient, solrhome, schema, "schema.xml");