mirror of https://github.com/apache/lucene.git
SOLR-9132: Cut over some more tests
This commit is contained in:
parent
529c60d914
commit
183f998098
|
@ -371,7 +371,7 @@ public class Overseer implements Closeable {
|
||||||
return Collections.singletonList(new SliceMutator(getZkStateReader()).updateShardState(clusterState, message));
|
return Collections.singletonList(new SliceMutator(getZkStateReader()).updateShardState(clusterState, message));
|
||||||
case QUIT:
|
case QUIT:
|
||||||
if (myId.equals(message.get("id"))) {
|
if (myId.equals(message.get("id"))) {
|
||||||
log.info("Quit command received {}", LeaderElector.getNodeName(myId));
|
log.info("Quit command received {} {}", message, LeaderElector.getNodeName(myId));
|
||||||
overseerCollectionConfigSetProcessor.close();
|
overseerCollectionConfigSetProcessor.close();
|
||||||
close();
|
close();
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -16,70 +16,47 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.nio.file.Paths;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
|
||||||
import java.util.Properties;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import org.apache.commons.codec.binary.StringUtils;
|
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.apache.lucene.util.TestUtil;
|
import org.apache.lucene.util.TestUtil;
|
||||||
import org.apache.solr.client.solrj.SolrServerException;
|
import org.apache.solr.client.solrj.SolrServerException;
|
||||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
|
||||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
import org.apache.solr.client.solrj.request.CoreStatus;
|
||||||
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
||||||
import org.apache.solr.client.solrj.response.CoreAdminResponse;
|
import org.apache.solr.common.cloud.ClusterProperties;
|
||||||
import org.apache.solr.common.cloud.ClusterState;
|
|
||||||
import org.apache.solr.common.cloud.DocCollection;
|
import org.apache.solr.common.cloud.DocCollection;
|
||||||
import org.apache.solr.common.cloud.Replica;
|
import org.apache.solr.common.cloud.Replica;
|
||||||
import org.apache.solr.common.cloud.Slice;
|
import org.apache.solr.common.cloud.Slice;
|
||||||
import org.apache.solr.common.cloud.ZkStateReader;
|
import org.apache.solr.common.cloud.ZkStateReader;
|
||||||
import org.apache.solr.common.params.CoreAdminParams;
|
import org.apache.solr.common.params.CoreAdminParams;
|
||||||
import org.apache.solr.common.util.NamedList;
|
import org.apache.solr.common.util.NamedList;
|
||||||
import org.apache.solr.util.TimeOut;
|
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import static org.apache.solr.cloud.ReplicaPropertiesBase.verifyUniqueAcrossCollection;
|
|
||||||
|
|
||||||
@LuceneTestCase.Slow
|
@LuceneTestCase.Slow
|
||||||
public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
|
public class CollectionsAPISolrJTest extends SolrCloudTestCase {
|
||||||
|
|
||||||
@Test
|
@BeforeClass
|
||||||
public void test() throws Exception {
|
public static void setupCluster() throws Exception {
|
||||||
testCreateAndDeleteCollection();
|
configureCluster(4)
|
||||||
testCreateAndDeleteShard();
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
testReloadCollection();
|
.configure();
|
||||||
testCreateAndDeleteAlias();
|
|
||||||
testSplitShard();
|
|
||||||
testCreateCollectionWithPropertyParam();
|
|
||||||
testAddAndDeleteReplica();
|
|
||||||
testClusterProp();
|
|
||||||
testAddAndRemoveRole();
|
|
||||||
testOverseerStatus();
|
|
||||||
testList();
|
|
||||||
testAddAndDeleteReplicaProp();
|
|
||||||
testBalanceShardUnique();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void testCreateAndDeleteCollection() throws Exception {
|
@Test
|
||||||
|
public void testCreateAndDeleteCollection() throws Exception {
|
||||||
String collectionName = "solrj_test";
|
String collectionName = "solrj_test";
|
||||||
CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create()
|
CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
|
||||||
.setCollectionName(collectionName)
|
.setStateFormat(1)
|
||||||
.setNumShards(2)
|
.process(cluster.getSolrClient());
|
||||||
.setReplicationFactor(2)
|
|
||||||
.setConfigName("conf1")
|
|
||||||
.setRouterField("myOwnField")
|
|
||||||
.setStateFormat(1);
|
|
||||||
|
|
||||||
CollectionAdminResponse response = createCollectionRequest.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
|
@ -91,57 +68,44 @@ public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
|
||||||
assertTrue(status.get("QTime") > 0);
|
assertTrue(status.get("QTime") > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
cloudClient.setDefaultCollection(collectionName);
|
response = CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
|
||||||
CollectionAdminRequest.Delete deleteCollectionRequest = new CollectionAdminRequest.Delete()
|
|
||||||
.setCollectionName(collectionName);
|
|
||||||
response = deleteCollectionRequest.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
Map<String,NamedList<Integer>> nodesStatus = response.getCollectionNodesStatus();
|
Map<String,NamedList<Integer>> nodesStatus = response.getCollectionNodesStatus();
|
||||||
assertNull("Deleted collection " + collectionName + "still exists",
|
|
||||||
cloudClient.getZkStateReader().getClusterState().getCollectionOrNull(collectionName));
|
|
||||||
assertEquals(4, nodesStatus.size());
|
assertEquals(4, nodesStatus.size());
|
||||||
|
|
||||||
|
waitForState("Expected " + collectionName + " to disappear from cluster state", collectionName, (n, c) -> c == null);
|
||||||
|
|
||||||
// Test Creating a collection with new stateformat.
|
// Test Creating a collection with new stateformat.
|
||||||
collectionName = "solrj_newstateformat";
|
collectionName = "solrj_newstateformat";
|
||||||
createCollectionRequest = new CollectionAdminRequest.Create()
|
|
||||||
.setCollectionName(collectionName)
|
|
||||||
.setNumShards(2)
|
|
||||||
.setConfigName("conf1")
|
|
||||||
.setStateFormat(2);
|
|
||||||
|
|
||||||
response = createCollectionRequest.process(cloudClient);
|
response = CollectionAdminRequest.createCollection(collectionName, "conf", 2, 2)
|
||||||
|
.setStateFormat(2)
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
|
|
||||||
waitForRecoveriesToFinish(collectionName, false);
|
waitForState("Expected " + collectionName + " to appear in cluster state", collectionName, (n, c) -> c != null);
|
||||||
assertTrue("Collection state does not exist",
|
|
||||||
cloudClient.getZkStateReader().getZkClient()
|
|
||||||
.exists(ZkStateReader.getCollectionPath(collectionName), true));
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void testCreateAndDeleteShard() throws IOException, SolrServerException {
|
@Test
|
||||||
|
public void testCreateAndDeleteShard() throws IOException, SolrServerException {
|
||||||
|
|
||||||
// Create an implicit collection
|
// Create an implicit collection
|
||||||
String collectionName = "solrj_implicit";
|
String collectionName = "solrj_implicit";
|
||||||
CollectionAdminResponse response = new CollectionAdminRequest.Create()
|
CollectionAdminResponse response
|
||||||
.setCollectionName(collectionName)
|
= CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "shardA,shardB", 1)
|
||||||
.setShards("shardA,shardB")
|
.process(cluster.getSolrClient());
|
||||||
.setConfigName("conf1")
|
|
||||||
.setRouterName("implicit").process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
|
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
|
||||||
assertEquals(2, coresStatus.size());
|
assertEquals(2, coresStatus.size());
|
||||||
|
|
||||||
cloudClient.setDefaultCollection(collectionName);
|
|
||||||
// Add a shard to the implicit collection
|
// Add a shard to the implicit collection
|
||||||
response = new CollectionAdminRequest
|
response = CollectionAdminRequest.createShard(collectionName, "shardC").process(cluster.getSolrClient());
|
||||||
.CreateShard()
|
|
||||||
.setCollectionName(collectionName)
|
|
||||||
.setShardName("shardC").process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
|
@ -149,57 +113,38 @@ public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
|
||||||
assertEquals(1, coresStatus.size());
|
assertEquals(1, coresStatus.size());
|
||||||
assertEquals(0, (int) coresStatus.get(collectionName + "_shardC_replica1").get("status"));
|
assertEquals(0, (int) coresStatus.get(collectionName + "_shardC_replica1").get("status"));
|
||||||
|
|
||||||
CollectionAdminRequest.DeleteShard deleteShardRequest = new CollectionAdminRequest
|
response = CollectionAdminRequest.deleteShard(collectionName, "shardC").process(cluster.getSolrClient());
|
||||||
.DeleteShard()
|
|
||||||
.setCollectionName(collectionName)
|
|
||||||
.setShardName("shardC");
|
|
||||||
response = deleteShardRequest.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
Map<String, NamedList<Integer>> nodesStatus = response.getCollectionNodesStatus();
|
Map<String, NamedList<Integer>> nodesStatus = response.getCollectionNodesStatus();
|
||||||
assertEquals(1, nodesStatus.size());
|
assertEquals(1, nodesStatus.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void testReloadCollection() throws IOException, SolrServerException {
|
|
||||||
cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
|
|
||||||
CollectionAdminRequest.Reload reloadCollectionRequest = new CollectionAdminRequest.Reload()
|
|
||||||
.setCollectionName("collection1");
|
|
||||||
CollectionAdminResponse response = reloadCollectionRequest.process(cloudClient);
|
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreateAndDeleteAlias() throws IOException, SolrServerException {
|
||||||
|
|
||||||
|
final String collection = "aliasedCollection";
|
||||||
|
CollectionAdminRequest.createCollection(collection, "conf", 1, 1).process(cluster.getSolrClient());
|
||||||
|
|
||||||
|
CollectionAdminResponse response
|
||||||
|
= CollectionAdminRequest.createAlias("solrj_alias", collection).process(cluster.getSolrClient());
|
||||||
|
assertEquals(0, response.getStatus());
|
||||||
|
|
||||||
|
response = CollectionAdminRequest.deleteAlias("solrj_alias").process(cluster.getSolrClient());
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void testCreateAndDeleteAlias() throws IOException, SolrServerException {
|
|
||||||
CollectionAdminRequest.CreateAlias createAliasRequest = new CollectionAdminRequest
|
|
||||||
.CreateAlias()
|
|
||||||
.setAliasName("solrj_alias")
|
|
||||||
.setAliasedCollections(DEFAULT_COLLECTION);
|
|
||||||
CollectionAdminResponse response = createAliasRequest.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
@Test
|
||||||
|
public void testSplitShard() throws Exception {
|
||||||
|
|
||||||
CollectionAdminRequest.DeleteAlias deleteAliasRequest = new CollectionAdminRequest.DeleteAlias()
|
final String collectionName = "solrj_test_splitshard";
|
||||||
.setAliasName("solrj_alias");
|
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1)
|
||||||
deleteAliasRequest.process(cloudClient);
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
CollectionAdminResponse response = CollectionAdminRequest.splitShard(collectionName)
|
||||||
}
|
.setShardName("shard1")
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
protected void testSplitShard() throws Exception {
|
|
||||||
String collectionName = "solrj_test_splitshard";
|
|
||||||
cloudClient.setDefaultCollection(collectionName);
|
|
||||||
|
|
||||||
CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create()
|
|
||||||
.setConfigName("conf1")
|
|
||||||
.setNumShards(2)
|
|
||||||
.setCollectionName(collectionName);
|
|
||||||
createCollectionRequest.process(cloudClient);
|
|
||||||
|
|
||||||
CollectionAdminRequest.SplitShard splitShardRequest = new CollectionAdminRequest.SplitShard()
|
|
||||||
.setCollectionName(collectionName)
|
|
||||||
.setShardName("shard1");
|
|
||||||
CollectionAdminResponse response = splitShardRequest.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
|
@ -207,267 +152,204 @@ public class CollectionsAPISolrJTest extends AbstractFullDistribZkTestBase {
|
||||||
assertEquals(0, (int) coresStatus.get(collectionName + "_shard1_0_replica1").get("status"));
|
assertEquals(0, (int) coresStatus.get(collectionName + "_shard1_0_replica1").get("status"));
|
||||||
assertEquals(0, (int) coresStatus.get(collectionName + "_shard1_1_replica1").get("status"));
|
assertEquals(0, (int) coresStatus.get(collectionName + "_shard1_1_replica1").get("status"));
|
||||||
|
|
||||||
waitForRecoveriesToFinish(collectionName, false);
|
waitForState("Expected all shards to be active and parent shard to be removed", collectionName, (n, c) -> {
|
||||||
waitForThingsToLevelOut(10);
|
if (c.getSlice("shard1").getState() == Slice.State.ACTIVE)
|
||||||
|
return false;
|
||||||
|
for (Replica r : c.getReplicas()) {
|
||||||
|
if (r.isActive(n) == false)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
|
||||||
// Test splitting using split.key
|
// Test splitting using split.key
|
||||||
splitShardRequest = new CollectionAdminRequest.SplitShard()
|
response = CollectionAdminRequest.splitShard(collectionName)
|
||||||
.setCollectionName(collectionName)
|
.setSplitKey("b!")
|
||||||
.setSplitKey("b!");
|
.process(cluster.getSolrClient());
|
||||||
response = splitShardRequest.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
|
|
||||||
waitForRecoveriesToFinish(collectionName, false);
|
waitForState("Expected 5 slices to be active", collectionName, (n, c) -> c.getActiveSlices().size() == 5);
|
||||||
waitForThingsToLevelOut(10);
|
|
||||||
|
|
||||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
|
||||||
Collection<Slice> slices = clusterState.getActiveSlices(collectionName);
|
|
||||||
assertEquals("ClusterState: "+ clusterState.getActiveSlices(collectionName), 5, slices.size());
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testCreateCollectionWithPropertyParam() throws Exception {
|
@Test
|
||||||
|
public void testCreateCollectionWithPropertyParam() throws Exception {
|
||||||
|
|
||||||
String collectionName = "solrj_test_core_props";
|
String collectionName = "solrj_test_core_props";
|
||||||
|
|
||||||
File tmpDir = createTempDir("testPropertyParamsForCreate").toFile();
|
Path tmpDir = createTempDir("testPropertyParamsForCreate");
|
||||||
File dataDir = new File(tmpDir, "dataDir-" + TestUtil.randomSimpleString(random(), 1, 5));
|
Path dataDir = tmpDir.resolve("dataDir-" + TestUtil.randomSimpleString(random(), 1, 5));
|
||||||
File ulogDir = new File(tmpDir, "ulogDir-" + TestUtil.randomSimpleString(random(), 1, 5));
|
Path ulogDir = tmpDir.resolve("ulogDir-" + TestUtil.randomSimpleString(random(), 1, 5));
|
||||||
|
|
||||||
Properties properties = new Properties();
|
CollectionAdminResponse response = CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1)
|
||||||
properties.put(CoreAdminParams.DATA_DIR, dataDir.getAbsolutePath());
|
.withProperty(CoreAdminParams.DATA_DIR, dataDir.toString())
|
||||||
properties.put(CoreAdminParams.ULOG_DIR, ulogDir.getAbsolutePath());
|
.withProperty(CoreAdminParams.ULOG_DIR, ulogDir.toString())
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
CollectionAdminRequest.Create createReq = new CollectionAdminRequest.Create()
|
|
||||||
.setCollectionName(collectionName)
|
|
||||||
.setNumShards(1)
|
|
||||||
.setConfigName("conf1")
|
|
||||||
.setProperties(properties);
|
|
||||||
|
|
||||||
CollectionAdminResponse response = createReq.process(cloudClient);
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
|
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
|
||||||
assertEquals(1, coresStatus.size());
|
assertEquals(1, coresStatus.size());
|
||||||
|
|
||||||
DocCollection testCollection = cloudClient.getZkStateReader()
|
DocCollection testCollection = getCollectionState(collectionName);
|
||||||
.getClusterState().getCollection(collectionName);
|
|
||||||
|
|
||||||
Replica replica1 = testCollection.getReplica("core_node1");
|
Replica replica1 = testCollection.getReplica("core_node1");
|
||||||
|
CoreStatus coreStatus = getCoreStatus(replica1);
|
||||||
|
|
||||||
try (HttpSolrClient client = getHttpSolrClient(replica1.getStr("base_url"))) {
|
assertEquals(Paths.get(coreStatus.getDataDirectory()).toString(), dataDir.toString());
|
||||||
CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), client);
|
|
||||||
NamedList<Object> coreStatus = status.getCoreStatus(replica1.getStr("core"));
|
|
||||||
String dataDirStr = (String) coreStatus.get("dataDir");
|
|
||||||
assertEquals("Data dir does not match param given in property.dataDir syntax",
|
|
||||||
new File(dataDirStr).getAbsolutePath(), dataDir.getAbsolutePath());
|
|
||||||
}
|
|
||||||
|
|
||||||
CollectionAdminRequest.Delete deleteCollectionRequest = new CollectionAdminRequest.Delete();
|
|
||||||
deleteCollectionRequest.setCollectionName(collectionName);
|
|
||||||
deleteCollectionRequest.process(cloudClient);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testAddAndDeleteReplica() throws Exception {
|
@Test
|
||||||
String collectionName = "solrj_replicatests";
|
public void testAddAndDeleteReplica() throws Exception {
|
||||||
createCollection(collectionName, cloudClient, 1, 2);
|
|
||||||
|
|
||||||
cloudClient.setDefaultCollection(collectionName);
|
final String collectionName = "solrj_replicatests";
|
||||||
|
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 2)
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
String newReplicaName = Assign.assignNode(cloudClient.getZkStateReader().getClusterState().getCollection(collectionName));
|
String newReplicaName = Assign.assignNode(getCollectionState(collectionName));
|
||||||
ArrayList<String> nodeList = new ArrayList<>(cloudClient.getZkStateReader().getClusterState().getLiveNodes());
|
ArrayList<String> nodeList
|
||||||
|
= new ArrayList<>(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes());
|
||||||
Collections.shuffle(nodeList, random());
|
Collections.shuffle(nodeList, random());
|
||||||
CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica()
|
final String node = nodeList.get(0);
|
||||||
.setCollectionName(collectionName)
|
|
||||||
.setShardName("shard1")
|
CollectionAdminResponse response = CollectionAdminRequest.addReplicaToShard(collectionName, "shard1")
|
||||||
.setNode(nodeList.get(0));
|
.setNode(node)
|
||||||
CollectionAdminResponse response = addReplica.process(cloudClient);
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertTrue(response.isSuccess());
|
assertTrue(response.isSuccess());
|
||||||
|
|
||||||
TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
|
waitForState("Expected to see replica " + newReplicaName + " on node " + node, collectionName, (n, c) -> {
|
||||||
Replica newReplica = null;
|
Replica r = c.getSlice("shard1").getReplica(newReplicaName);
|
||||||
|
return r != null && r.getNodeName().equals(node);
|
||||||
while (! timeout.hasTimedOut() && newReplica == null) {
|
});
|
||||||
Slice slice = cloudClient.getZkStateReader().getClusterState().getSlice(collectionName, "shard1");
|
|
||||||
newReplica = slice.getReplica(newReplicaName);
|
|
||||||
}
|
|
||||||
|
|
||||||
assertNotNull(newReplica);
|
|
||||||
|
|
||||||
assertEquals("Replica should be created on the right node",
|
|
||||||
cloudClient.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)),
|
|
||||||
newReplica.getStr(ZkStateReader.BASE_URL_PROP)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Test DELETEREPLICA
|
// Test DELETEREPLICA
|
||||||
CollectionAdminRequest.DeleteReplica deleteReplicaRequest = new CollectionAdminRequest.DeleteReplica()
|
response = CollectionAdminRequest.deleteReplica(collectionName, "shard1", newReplicaName)
|
||||||
.setCollectionName(collectionName)
|
.process(cluster.getSolrClient());
|
||||||
.setShardName("shard1")
|
|
||||||
.setReplica(newReplicaName);
|
|
||||||
response = deleteReplicaRequest.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
|
|
||||||
timeout = new TimeOut(3, TimeUnit.SECONDS);
|
waitForState("Expected replica " + newReplicaName + " to vanish from cluster state", collectionName,
|
||||||
|
(n, c) -> c.getSlice("shard1").getReplica(newReplicaName) == null);
|
||||||
|
|
||||||
while (! timeout.hasTimedOut() && newReplica != null) {
|
|
||||||
Slice slice = cloudClient.getZkStateReader().getClusterState().getSlice(collectionName, "shard1");
|
|
||||||
newReplica = slice.getReplica(newReplicaName);
|
|
||||||
}
|
|
||||||
|
|
||||||
assertNull(newReplica);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testClusterProp() throws InterruptedException, IOException, SolrServerException {
|
@Test
|
||||||
CollectionAdminRequest.ClusterProp clusterPropRequest = new CollectionAdminRequest.ClusterProp()
|
public void testClusterProp() throws InterruptedException, IOException, SolrServerException {
|
||||||
.setPropertyName(ZkStateReader.LEGACY_CLOUD)
|
|
||||||
.setPropertyValue("false");
|
CollectionAdminResponse response = CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, "false")
|
||||||
CollectionAdminResponse response = clusterPropRequest.process(cloudClient);
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
|
|
||||||
TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
|
ClusterProperties props = new ClusterProperties(zkClient());
|
||||||
boolean changed = false;
|
assertEquals("Cluster property was not set", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true"), "false");
|
||||||
|
|
||||||
while(! timeout.hasTimedOut()){
|
|
||||||
Thread.sleep(10);
|
|
||||||
changed = Objects.equals("false",
|
|
||||||
cloudClient.getZkStateReader().getClusterProperty(ZkStateReader.LEGACY_CLOUD, "none"));
|
|
||||||
if(changed) break;
|
|
||||||
}
|
|
||||||
assertTrue("The Cluster property wasn't set", changed);
|
|
||||||
|
|
||||||
// Unset ClusterProp that we set.
|
// Unset ClusterProp that we set.
|
||||||
clusterPropRequest = new CollectionAdminRequest.ClusterProp()
|
CollectionAdminRequest.setClusterProperty(ZkStateReader.LEGACY_CLOUD, null).process(cluster.getSolrClient());
|
||||||
.setPropertyName(ZkStateReader.LEGACY_CLOUD)
|
assertEquals("Cluster property was not unset", props.getClusterProperty(ZkStateReader.LEGACY_CLOUD, "true"), "true");
|
||||||
.setPropertyValue(null);
|
|
||||||
clusterPropRequest.process(cloudClient);
|
|
||||||
|
|
||||||
timeout = new TimeOut(3, TimeUnit.SECONDS);
|
|
||||||
changed = false;
|
|
||||||
while(! timeout.hasTimedOut()) {
|
|
||||||
Thread.sleep(10);
|
|
||||||
changed = (cloudClient.getZkStateReader().getClusterProperty(ZkStateReader.LEGACY_CLOUD, (String) null) == null);
|
|
||||||
if(changed)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
assertTrue("The Cluster property wasn't unset", changed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testAddAndRemoveRole() throws InterruptedException, IOException, SolrServerException {
|
@Test
|
||||||
cloudClient.setDefaultCollection(DEFAULT_COLLECTION);
|
@SuppressWarnings("unchecked")
|
||||||
Replica replica = cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
|
public void testAddAndRemoveRole() throws InterruptedException, IOException, SolrServerException {
|
||||||
CollectionAdminRequest.AddRole addRoleRequest = new CollectionAdminRequest.AddRole()
|
|
||||||
.setNode(replica.getNodeName())
|
|
||||||
.setRole("overseer");
|
|
||||||
addRoleRequest.process(cloudClient);
|
|
||||||
|
|
||||||
CollectionAdminRequest.ClusterStatus clusterStatusRequest = new CollectionAdminRequest.ClusterStatus()
|
String node = cluster.getRandomJetty(random()).getNodeName();
|
||||||
.setCollectionName(DEFAULT_COLLECTION);
|
|
||||||
CollectionAdminResponse response = clusterStatusRequest.process(cloudClient);
|
CollectionAdminRequest.addRole(node, "overseer").process(cluster.getSolrClient());
|
||||||
|
|
||||||
|
CollectionAdminResponse response = CollectionAdminRequest.getClusterStatus().process(cluster.getSolrClient());
|
||||||
|
|
||||||
NamedList<Object> rsp = response.getResponse();
|
NamedList<Object> rsp = response.getResponse();
|
||||||
NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
|
NamedList<Object> cs = (NamedList<Object>) rsp.get("cluster");
|
||||||
assertNotNull("Cluster state should not be null", cluster);
|
assertNotNull("Cluster state should not be null", cs);
|
||||||
Map<String, Object> roles = (Map<String, Object>) cluster.get("roles");
|
Map<String, Object> roles = (Map<String, Object>) cs.get("roles");
|
||||||
assertNotNull("Role information should not be null", roles);
|
assertNotNull("Role information should not be null", roles);
|
||||||
List<String> overseer = (List<String>) roles.get("overseer");
|
List<String> overseer = (List<String>) roles.get("overseer");
|
||||||
assertNotNull(overseer);
|
assertNotNull(overseer);
|
||||||
assertEquals(1, overseer.size());
|
assertEquals(1, overseer.size());
|
||||||
assertTrue(overseer.contains(replica.getNodeName()));
|
assertTrue(overseer.contains(node));
|
||||||
|
|
||||||
// Remove role
|
// Remove role
|
||||||
new CollectionAdminRequest.RemoveRole()
|
CollectionAdminRequest.removeRole(node, "overseer").process(cluster.getSolrClient());
|
||||||
.setNode(replica.getNodeName())
|
|
||||||
.setRole("overseer")
|
|
||||||
.process(cloudClient);
|
|
||||||
|
|
||||||
clusterStatusRequest = new CollectionAdminRequest.ClusterStatus();
|
|
||||||
clusterStatusRequest.setCollectionName(DEFAULT_COLLECTION);
|
|
||||||
response = clusterStatusRequest.process(cloudClient);
|
|
||||||
|
|
||||||
|
response = CollectionAdminRequest.getClusterStatus().process(cluster.getSolrClient());
|
||||||
rsp = response.getResponse();
|
rsp = response.getResponse();
|
||||||
cluster = (NamedList<Object>) rsp.get("cluster");
|
cs = (NamedList<Object>) rsp.get("cluster");
|
||||||
assertNotNull("Cluster state should not be null", cluster);
|
assertNotNull("Cluster state should not be null", cs);
|
||||||
roles = (Map<String, Object>) cluster.get("roles");
|
roles = (Map<String, Object>) cs.get("roles");
|
||||||
assertNotNull("Role information should not be null", roles);
|
assertNotNull("Role information should not be null", roles);
|
||||||
overseer = (List<String>) roles.get("overseer");
|
overseer = (List<String>) roles.get("overseer");
|
||||||
assertFalse(overseer.contains(replica.getNodeName()));
|
assertFalse(overseer.contains(node));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testOverseerStatus() throws IOException, SolrServerException {
|
@Test
|
||||||
CollectionAdminResponse response = new CollectionAdminRequest.OverseerStatus().process(cloudClient);
|
public void testOverseerStatus() throws IOException, SolrServerException {
|
||||||
|
CollectionAdminResponse response = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient());
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertNotNull("overseer_operations shouldn't be null", response.getResponse().get("overseer_operations"));
|
assertNotNull("overseer_operations shouldn't be null", response.getResponse().get("overseer_operations"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testList() throws IOException, SolrServerException {
|
@Test
|
||||||
CollectionAdminResponse response = new CollectionAdminRequest.List().process(cloudClient);
|
public void testList() throws IOException, SolrServerException {
|
||||||
|
CollectionAdminResponse response = new CollectionAdminRequest.List().process(cluster.getSolrClient());
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
assertNotNull("collection list should not be null", response.getResponse().get("collections"));
|
assertNotNull("collection list should not be null", response.getResponse().get("collections"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testAddAndDeleteReplicaProp() throws InterruptedException, IOException, SolrServerException {
|
@Test
|
||||||
Replica replica = cloudClient.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
|
public void testAddAndDeleteReplicaProp() throws InterruptedException, IOException, SolrServerException {
|
||||||
CollectionAdminResponse response = new CollectionAdminRequest.AddReplicaProp()
|
|
||||||
.setCollectionName(DEFAULT_COLLECTION)
|
final String collection = "replicaProperties";
|
||||||
.setShardName(SHARD1)
|
CollectionAdminRequest.createCollection(collection, "conf", 2, 2)
|
||||||
.setReplica(replica.getName())
|
.process(cluster.getSolrClient());
|
||||||
.setPropertyName("preferredleader")
|
|
||||||
.setPropertyValue("true").process(cloudClient);
|
final Replica replica = getCollectionState(collection).getLeader("shard1");
|
||||||
|
CollectionAdminResponse response
|
||||||
|
= CollectionAdminRequest.addReplicaProperty(collection, "shard1", replica.getName(), "preferredleader", "true")
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
|
|
||||||
TimeOut timeout = new TimeOut(20, TimeUnit.SECONDS);
|
waitForState("Expecting property 'preferredleader' to appear on replica " + replica.getName(), collection,
|
||||||
String propertyValue = null;
|
(n, c) -> "true".equals(c.getReplica(replica.getName()).getStr("property.preferredleader")));
|
||||||
|
|
||||||
String replicaName = replica.getName();
|
|
||||||
while (! timeout.hasTimedOut()) {
|
|
||||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
|
||||||
replica = clusterState.getReplica(DEFAULT_COLLECTION, replicaName);
|
|
||||||
propertyValue = replica.getStr("property.preferredleader");
|
|
||||||
if(StringUtils.equals("true", propertyValue))
|
|
||||||
break;
|
|
||||||
Thread.sleep(50);
|
|
||||||
}
|
|
||||||
|
|
||||||
assertEquals("Replica property was not updated, Latest value: " +
|
|
||||||
cloudClient.getZkStateReader().getClusterState().getReplica(DEFAULT_COLLECTION, replicaName),
|
|
||||||
"true",
|
|
||||||
propertyValue);
|
|
||||||
|
|
||||||
response = new CollectionAdminRequest.DeleteReplicaProp()
|
response = CollectionAdminRequest.deleteReplicaProperty(collection, "shard1", replica.getName(), "property.preferredleader")
|
||||||
.setCollectionName(DEFAULT_COLLECTION)
|
.process(cluster.getSolrClient());
|
||||||
.setShardName(SHARD1)
|
|
||||||
.setReplica(replicaName)
|
|
||||||
.setPropertyName("property.preferredleader").process(cloudClient);
|
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
|
|
||||||
timeout = new TimeOut(20, TimeUnit.SECONDS);
|
waitForState("Expecting property 'preferredleader' to be removed from replica " + replica.getName(), collection,
|
||||||
boolean updated = false;
|
(n, c) -> c.getReplica(replica.getName()).getStr("property.preferredleader") == null);
|
||||||
|
|
||||||
while (! timeout.hasTimedOut()) {
|
|
||||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
|
||||||
replica = clusterState.getReplica(DEFAULT_COLLECTION, replicaName);
|
|
||||||
updated = replica.getStr("property.preferredleader") == null;
|
|
||||||
if(updated)
|
|
||||||
break;
|
|
||||||
Thread.sleep(50);
|
|
||||||
}
|
|
||||||
|
|
||||||
assertTrue("Replica property was not removed", updated);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void testBalanceShardUnique() throws IOException,
|
@Test
|
||||||
|
public void testBalanceShardUnique() throws IOException,
|
||||||
SolrServerException, KeeperException, InterruptedException {
|
SolrServerException, KeeperException, InterruptedException {
|
||||||
CollectionAdminResponse response = new CollectionAdminRequest.BalanceShardUnique()
|
|
||||||
.setCollection(DEFAULT_COLLECTION)
|
final String collection = "balancedProperties";
|
||||||
.setPropertyName("preferredLeader").process(cloudClient);
|
CollectionAdminRequest.createCollection(collection, "conf", 2, 2)
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
|
CollectionAdminResponse response = CollectionAdminRequest.balanceReplicaProperty(collection, "preferredLeader")
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
assertEquals(0, response.getStatus());
|
assertEquals(0, response.getStatus());
|
||||||
|
|
||||||
verifyUniqueAcrossCollection(cloudClient, DEFAULT_COLLECTION, "property.preferredleader");
|
waitForState("Expecting 'preferredleader' property to be balanced across all shards", collection, (n, c) -> {
|
||||||
|
for (Slice slice : c) {
|
||||||
|
int count = 0;
|
||||||
|
for (Replica replica : slice) {
|
||||||
|
if ("true".equals(replica.getStr("property.preferredleader")))
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
if (count != 1)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,106 +16,40 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import org.apache.solr.client.solrj.SolrRequest;
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.client.solrj.SolrServerException;
|
|
||||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
|
||||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
|
||||||
import org.apache.solr.common.cloud.DocCollection;
|
import org.apache.solr.common.cloud.DocCollection;
|
||||||
import org.apache.solr.common.cloud.ImplicitDocRouter;
|
|
||||||
import org.apache.solr.common.cloud.Replica;
|
import org.apache.solr.common.cloud.Replica;
|
||||||
import org.apache.solr.common.cloud.Slice;
|
import org.junit.BeforeClass;
|
||||||
import org.apache.solr.common.cloud.ZkStateReader;
|
|
||||||
import org.apache.solr.common.params.MapSolrParams;
|
|
||||||
import org.apache.solr.common.params.SolrParams;
|
|
||||||
import org.apache.solr.common.util.Utils;
|
|
||||||
import org.apache.solr.util.TimeOut;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
public class DeleteLastCustomShardedReplicaTest extends SolrCloudTestCase {
|
||||||
import java.lang.invoke.MethodHandles;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
|
@BeforeClass
|
||||||
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.SHARDS_PROP;
|
public static void setupCluster() throws Exception {
|
||||||
import static org.apache.solr.common.util.Utils.makeMap;
|
configureCluster(2)
|
||||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
public class DeleteLastCustomShardedReplicaTest extends AbstractFullDistribZkTestBase {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
|
||||||
|
|
||||||
protected String getSolrXml() {
|
|
||||||
return "solr.xml";
|
|
||||||
}
|
|
||||||
|
|
||||||
public DeleteLastCustomShardedReplicaTest() {
|
|
||||||
sliceCount = 2;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ShardsFixed(num = 2)
|
|
||||||
public void test() throws Exception {
|
public void test() throws Exception {
|
||||||
try (CloudSolrClient client = createCloudClient(null)) {
|
|
||||||
int replicationFactor = 1;
|
|
||||||
int maxShardsPerNode = 5;
|
|
||||||
|
|
||||||
Map<String, Object> props = Utils.makeMap(
|
final String collectionName = "customcollreplicadeletion";
|
||||||
"router.name", ImplicitDocRouter.NAME,
|
|
||||||
ZkStateReader.REPLICATION_FACTOR, replicationFactor,
|
|
||||||
ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode,
|
|
||||||
NUM_SLICES, 1,
|
|
||||||
SHARDS_PROP, "a,b");
|
|
||||||
|
|
||||||
Map<String,List<Integer>> collectionInfos = new HashMap<>();
|
CollectionAdminRequest.createCollectionWithImplicitRouter(collectionName, "conf", "a,b", 1)
|
||||||
|
.setMaxShardsPerNode(5)
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
String collectionName = "customcollreplicadeletion";
|
DocCollection collectionState = getCollectionState(collectionName);
|
||||||
|
Replica replica = getRandomReplica(collectionState.getSlice("a"));
|
||||||
|
|
||||||
createCollection(collectionInfos, collectionName, props, client);
|
CollectionAdminRequest.deleteReplica(collectionName, "a", replica.getName())
|
||||||
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
waitForRecoveriesToFinish(collectionName, false);
|
waitForState("Expected shard 'a' to have no replicas", collectionName, (n, c) -> {
|
||||||
|
return c.getSlice("a") == null || c.getSlice("a").getReplicas().size() == 0;
|
||||||
|
});
|
||||||
|
|
||||||
DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
|
|
||||||
.getClusterState().getCollection(collectionName);
|
|
||||||
Replica replica = testcoll.getSlice("a").getReplicas().iterator().next();
|
|
||||||
|
|
||||||
removeAndWaitForReplicaGone(client, collectionName, replica, "a", replicationFactor-1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void removeAndWaitForReplicaGone(CloudSolrClient client, String COLL_NAME, Replica replica, String shard,
|
|
||||||
final int expectedNumReplicasRemaining)
|
|
||||||
throws SolrServerException, IOException, InterruptedException {
|
|
||||||
Map m = makeMap("collection", COLL_NAME, "action", DELETEREPLICA.toLower(), "shard",
|
|
||||||
shard, "replica", replica.getName());
|
|
||||||
SolrParams params = new MapSolrParams(m);
|
|
||||||
SolrRequest request = new QueryRequest(params);
|
|
||||||
request.setPath("/admin/collections");
|
|
||||||
client.request(request);
|
|
||||||
TimeOut timeout = new TimeOut(3, TimeUnit.SECONDS);
|
|
||||||
boolean success = false;
|
|
||||||
DocCollection testcoll = null;
|
|
||||||
while (! timeout.hasTimedOut()) {
|
|
||||||
testcoll = getCommonCloudSolrClient().getZkStateReader()
|
|
||||||
.getClusterState().getCollection(COLL_NAME);
|
|
||||||
// As of SOLR-5209 the last replica deletion no longer leads to
|
|
||||||
// the deletion of the slice.
|
|
||||||
final Slice slice = testcoll.getSlice(shard);
|
|
||||||
final int actualNumReplicasRemaining = (slice == null ? 0 : slice.getReplicas().size());
|
|
||||||
success = (actualNumReplicasRemaining == expectedNumReplicasRemaining);
|
|
||||||
if (success) {
|
|
||||||
log.info("replica cleaned up {}/{} core {}",
|
|
||||||
shard + "/" + replica.getName(), replica.getStr("core"));
|
|
||||||
log.info("current state {}", testcoll);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Thread.sleep(100);
|
|
||||||
}
|
|
||||||
assertTrue("Replica not cleaned up", success);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,202 +20,123 @@ import java.io.IOException;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.solr.client.solrj.SolrRequest;
|
|
||||||
import org.apache.solr.client.solrj.SolrServerException;
|
import org.apache.solr.client.solrj.SolrServerException;
|
||||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
import org.apache.solr.client.solrj.request.CoreStatus;
|
||||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
|
||||||
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
|
||||||
import org.apache.solr.client.solrj.response.CoreAdminResponse;
|
|
||||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||||
import org.apache.solr.common.SolrException;
|
import org.apache.solr.common.cloud.DocCollection;
|
||||||
import org.apache.solr.common.cloud.ClusterState;
|
|
||||||
import org.apache.solr.common.cloud.Replica;
|
import org.apache.solr.common.cloud.Replica;
|
||||||
import org.apache.solr.common.cloud.Slice;
|
import org.apache.solr.common.cloud.Slice;
|
||||||
import org.apache.solr.common.cloud.Slice.State;
|
import org.apache.solr.common.cloud.Slice.State;
|
||||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||||
import org.apache.solr.common.cloud.ZkStateReader;
|
import org.apache.solr.common.cloud.ZkStateReader;
|
||||||
import org.apache.solr.common.params.CollectionParams;
|
|
||||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
|
||||||
import org.apache.solr.common.util.NamedList;
|
|
||||||
import org.apache.solr.common.util.Utils;
|
import org.apache.solr.common.util.Utils;
|
||||||
import org.apache.solr.util.FileUtils;
|
import org.apache.solr.util.FileUtils;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class DeleteShardTest extends AbstractFullDistribZkTestBase {
|
public class DeleteShardTest extends SolrCloudTestCase {
|
||||||
|
|
||||||
public DeleteShardTest() {
|
|
||||||
super();
|
|
||||||
sliceCount = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Custom hash slice deletion test
|
// TODO: Custom hash slice deletion test
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setupCluster() throws Exception {
|
||||||
|
configureCluster(2)
|
||||||
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ShardsFixed(num = 2)
|
|
||||||
public void test() throws Exception {
|
public void test() throws Exception {
|
||||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
|
||||||
|
|
||||||
Slice slice1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
|
final String collection = "deleteShard";
|
||||||
Slice slice2 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
|
|
||||||
|
|
||||||
assertNotNull("Shard1 not found", slice1);
|
CollectionAdminRequest.createCollection(collection, "conf", 2, 1)
|
||||||
assertNotNull("Shard2 not found", slice2);
|
.process(cluster.getSolrClient());
|
||||||
assertSame("Shard1 is not active", Slice.State.ACTIVE, slice1.getState());
|
|
||||||
assertSame("Shard2 is not active", Slice.State.ACTIVE, slice2.getState());
|
|
||||||
|
|
||||||
try {
|
DocCollection state = getCollectionState(collection);
|
||||||
deleteShard(SHARD1);
|
assertEquals(State.ACTIVE, state.getSlice("shard1").getState());
|
||||||
fail("Deleting an active shard should not have succeeded");
|
assertEquals(State.ACTIVE, state.getSlice("shard2").getState());
|
||||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
|
||||||
// expected
|
|
||||||
}
|
|
||||||
|
|
||||||
setSliceState(SHARD1, Slice.State.INACTIVE);
|
// Can't delete an ACTIVE shard
|
||||||
|
expectThrows(Exception.class, () -> {
|
||||||
|
CollectionAdminRequest.deleteShard(collection, "shard1").process(cluster.getSolrClient());
|
||||||
|
});
|
||||||
|
|
||||||
clusterState = cloudClient.getZkStateReader().getClusterState();
|
setSliceState(collection, "shard1", Slice.State.INACTIVE);
|
||||||
|
|
||||||
slice1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
|
// Can delete an INATIVE shard
|
||||||
|
CollectionAdminRequest.deleteShard(collection, "shard1").process(cluster.getSolrClient());
|
||||||
|
waitForState("Expected 'shard1' to be removed", collection, (n, c) -> {
|
||||||
|
return c.getSlice("shard1") == null;
|
||||||
|
});
|
||||||
|
|
||||||
assertSame("Shard1 is not inactive yet.", Slice.State.INACTIVE, slice1.getState());
|
// Can delete a shard under construction
|
||||||
|
setSliceState(collection, "shard2", Slice.State.CONSTRUCTION);
|
||||||
|
CollectionAdminRequest.deleteShard(collection, "shard2").process(cluster.getSolrClient());
|
||||||
|
waitForState("Expected 'shard2' to be removed", collection, (n, c) -> {
|
||||||
|
return c.getSlice("shard2") == null;
|
||||||
|
});
|
||||||
|
|
||||||
deleteShard(SHARD1);
|
|
||||||
|
|
||||||
confirmShardDeletion(SHARD1);
|
|
||||||
|
|
||||||
setSliceState(SHARD2, Slice.State.CONSTRUCTION);
|
|
||||||
deleteShard(SHARD2);
|
|
||||||
confirmShardDeletion(SHARD2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void confirmShardDeletion(String shard) throws SolrServerException, KeeperException,
|
protected void setSliceState(String collection, String slice, State state) throws SolrServerException, IOException,
|
||||||
InterruptedException {
|
|
||||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
|
||||||
ClusterState clusterState = zkStateReader.getClusterState();
|
|
||||||
int counter = 10;
|
|
||||||
while (counter-- > 0) {
|
|
||||||
clusterState = zkStateReader.getClusterState();
|
|
||||||
if (clusterState.getSlice("collection1", shard) == null) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Thread.sleep(1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
assertNull("Cluster still contains shard1 even after waiting for it to be deleted.",
|
|
||||||
clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1));
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void deleteShard(String shard) throws SolrServerException, IOException,
|
|
||||||
KeeperException, InterruptedException {
|
KeeperException, InterruptedException {
|
||||||
|
|
||||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
CloudSolrClient client = cluster.getSolrClient();
|
||||||
params.set("action", CollectionParams.CollectionAction.DELETESHARD.toString());
|
|
||||||
params.set("collection", AbstractFullDistribZkTestBase.DEFAULT_COLLECTION);
|
|
||||||
params.set("shard", shard);
|
|
||||||
SolrRequest request = new QueryRequest(params);
|
|
||||||
request.setPath("/admin/collections");
|
|
||||||
|
|
||||||
String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
// TODO can this be encapsulated better somewhere?
|
||||||
.getBaseURL();
|
DistributedQueue inQueue = Overseer.getStateUpdateQueue(client.getZkStateReader().getZkClient());
|
||||||
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
|
|
||||||
|
|
||||||
try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl)) {
|
|
||||||
baseServer.setConnectionTimeout(15000);
|
|
||||||
baseServer.setSoTimeout(60000);
|
|
||||||
baseServer.request(request);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
protected void setSliceState(String slice, State state) throws SolrServerException, IOException,
|
|
||||||
KeeperException, InterruptedException {
|
|
||||||
DistributedQueue inQueue = Overseer.getStateUpdateQueue(cloudClient.getZkStateReader().getZkClient());
|
|
||||||
Map<String, Object> propMap = new HashMap<>();
|
Map<String, Object> propMap = new HashMap<>();
|
||||||
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
|
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
|
||||||
propMap.put(slice, state.toString());
|
propMap.put(slice, state.toString());
|
||||||
propMap.put(ZkStateReader.COLLECTION_PROP, "collection1");
|
propMap.put(ZkStateReader.COLLECTION_PROP, collection);
|
||||||
ZkNodeProps m = new ZkNodeProps(propMap);
|
ZkNodeProps m = new ZkNodeProps(propMap);
|
||||||
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
|
|
||||||
inQueue.offer(Utils.toJSON(m));
|
inQueue.offer(Utils.toJSON(m));
|
||||||
boolean transition = false;
|
|
||||||
|
|
||||||
for (int counter = 10; counter > 0; counter--) {
|
waitForState("Expected shard " + slice + " to be in state " + state.toString(), collection, (n, c) -> {
|
||||||
ClusterState clusterState = zkStateReader.getClusterState();
|
return c.getSlice(slice).getState() == state;
|
||||||
State sliceState = clusterState.getSlice("collection1", slice).getState();
|
});
|
||||||
if (sliceState == state) {
|
|
||||||
transition = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Thread.sleep(1000);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!transition) {
|
|
||||||
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Could not set shard [" + slice + "] as " + state);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDirectoryCleanupAfterDeleteShard() throws InterruptedException, IOException, SolrServerException {
|
public void testDirectoryCleanupAfterDeleteShard() throws InterruptedException, IOException, SolrServerException {
|
||||||
CollectionAdminResponse rsp = new CollectionAdminRequest.Create()
|
|
||||||
.setCollectionName("deleteshard_test")
|
final String collection = "deleteshard_test";
|
||||||
.setRouterName("implicit")
|
CollectionAdminRequest.createCollectionWithImplicitRouter(collection, "conf", "a,b,c", 1)
|
||||||
.setShards("a,b,c")
|
.setMaxShardsPerNode(2)
|
||||||
.setReplicationFactor(1)
|
.process(cluster.getSolrClient());
|
||||||
.setConfigName("conf1")
|
|
||||||
.process(cloudClient);
|
|
||||||
|
|
||||||
// Get replica details
|
// Get replica details
|
||||||
Replica leader = cloudClient.getZkStateReader().getLeaderRetry("deleteshard_test", "a");
|
Replica leader = getCollectionState(collection).getLeader("a");
|
||||||
String baseUrl = (String) leader.get("base_url");
|
|
||||||
String core = (String) leader.get("core");
|
|
||||||
|
|
||||||
String instanceDir;
|
CoreStatus coreStatus = getCoreStatus(leader);
|
||||||
String dataDir;
|
assertTrue("Instance directory doesn't exist", FileUtils.fileExists(coreStatus.getInstanceDirectory()));
|
||||||
|
assertTrue("Data directory doesn't exist", FileUtils.fileExists(coreStatus.getDataDirectory()));
|
||||||
|
|
||||||
try (HttpSolrClient client = getHttpSolrClient(baseUrl)) {
|
assertEquals(3, getCollectionState(collection).getActiveSlices().size());
|
||||||
CoreAdminResponse statusResp = CoreAdminRequest.getStatus(core, client);
|
|
||||||
NamedList r = statusResp.getCoreStatus().get(core);
|
|
||||||
instanceDir = (String) r.findRecursive("instanceDir");
|
|
||||||
dataDir = (String) r.get("dataDir");
|
|
||||||
}
|
|
||||||
|
|
||||||
assertTrue("Instance directory doesn't exist", FileUtils.fileExists(instanceDir));
|
|
||||||
assertTrue("Data directory doesn't exist", FileUtils.fileExists(dataDir));
|
|
||||||
|
|
||||||
assertEquals(3, cloudClient.getZkStateReader().getClusterState().getActiveSlices("deleteshard_test").size());
|
|
||||||
|
|
||||||
// Delete shard 'a'
|
// Delete shard 'a'
|
||||||
new CollectionAdminRequest.DeleteShard()
|
CollectionAdminRequest.deleteShard(collection, "a").process(cluster.getSolrClient());
|
||||||
.setCollectionName("deleteshard_test")
|
|
||||||
.setShardName("a")
|
|
||||||
.process(cloudClient);
|
|
||||||
|
|
||||||
assertEquals(2, cloudClient.getZkStateReader().getClusterState().getActiveSlices("deleteshard_test").size());
|
assertEquals(2, getCollectionState(collection).getActiveSlices().size());
|
||||||
assertFalse("Instance directory still exists", FileUtils.fileExists(instanceDir));
|
assertFalse("Instance directory still exists", FileUtils.fileExists(coreStatus.getInstanceDirectory()));
|
||||||
assertFalse("Data directory still exists", FileUtils.fileExists(dataDir));
|
assertFalse("Data directory still exists", FileUtils.fileExists(coreStatus.getDataDirectory()));
|
||||||
|
|
||||||
leader = cloudClient.getZkStateReader().getLeaderRetry("deleteshard_test", "b");
|
leader = getCollectionState(collection).getLeader("b");
|
||||||
baseUrl = (String) leader.get("base_url");
|
coreStatus = getCoreStatus(leader);
|
||||||
core = (String) leader.get("core");
|
|
||||||
|
|
||||||
try (HttpSolrClient client = getHttpSolrClient(baseUrl)) {
|
|
||||||
CoreAdminResponse statusResp = CoreAdminRequest.getStatus(core, client);
|
|
||||||
NamedList r = statusResp.getCoreStatus().get(core);
|
|
||||||
instanceDir = (String) r.findRecursive("instanceDir");
|
|
||||||
dataDir = (String) r.get("dataDir");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete shard 'b'
|
// Delete shard 'b'
|
||||||
new CollectionAdminRequest.DeleteShard()
|
CollectionAdminRequest.deleteShard(collection, "b")
|
||||||
.setCollectionName("deleteshard_test")
|
|
||||||
.setShardName("b")
|
|
||||||
.setDeleteDataDir(false)
|
.setDeleteDataDir(false)
|
||||||
.setDeleteInstanceDir(false)
|
.setDeleteInstanceDir(false)
|
||||||
.process(cloudClient);
|
.process(cluster.getSolrClient());
|
||||||
|
|
||||||
assertEquals(1, cloudClient.getZkStateReader().getClusterState().getActiveSlices("deleteshard_test").size());
|
assertEquals(1, getCollectionState(collection).getActiveSlices().size());
|
||||||
assertTrue("Instance directory still exists", FileUtils.fileExists(instanceDir));
|
assertTrue("Instance directory still exists", FileUtils.fileExists(coreStatus.getInstanceDirectory()));
|
||||||
assertTrue("Data directory still exists", FileUtils.fileExists(dataDir));
|
assertTrue("Data directory still exists", FileUtils.fileExists(coreStatus.getDataDirectory()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,78 +17,62 @@
|
||||||
|
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
|
|
||||||
import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
|
|
||||||
|
|
||||||
import java.lang.invoke.MethodHandles;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.solr.client.solrj.SolrClient;
|
|
||||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
|
||||||
import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
|
|
||||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.client.solrj.request.ConfigSetAdminRequest;
|
|
||||||
import org.apache.solr.client.solrj.request.GenericSolrRequest;
|
import org.apache.solr.client.solrj.request.GenericSolrRequest;
|
||||||
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
|
||||||
import org.apache.solr.client.solrj.response.ConfigSetAdminResponse;
|
|
||||||
import org.apache.solr.common.cloud.ZkStateReader;
|
import org.apache.solr.common.cloud.ZkStateReader;
|
||||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||||
import org.apache.solr.common.util.Utils;
|
import org.apache.solr.common.util.Utils;
|
||||||
import org.apache.zookeeper.KeeperException;
|
import org.apache.zookeeper.KeeperException;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
public class OverseerModifyCollectionTest extends AbstractFullDistribZkTestBase {
|
import static org.apache.solr.client.solrj.SolrRequest.METHOD.POST;
|
||||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
import static org.apache.solr.common.params.CommonParams.COLLECTIONS_HANDLER_PATH;
|
||||||
|
|
||||||
|
public class OverseerModifyCollectionTest extends SolrCloudTestCase {
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setupCluster() throws Exception {
|
||||||
|
configureCluster(2)
|
||||||
|
.addConfig("conf1", configset("cloud-minimal"))
|
||||||
|
.addConfig("conf2", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testModifyColl() throws Exception {
|
public void testModifyColl() throws Exception {
|
||||||
String collName = "modifyColl";
|
|
||||||
String newConfName = "conf" + random().nextInt();
|
final String collName = "modifyColl";
|
||||||
String oldConfName = "conf1";
|
|
||||||
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
|
CollectionAdminRequest.createCollection(collName, "conf1", 1, 2)
|
||||||
CollectionAdminResponse rsp;
|
.process(cluster.getSolrClient());
|
||||||
CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collName, oldConfName, 1, 2);
|
|
||||||
rsp = create.process(client);
|
// TODO create a modifyCollection() method on CollectionAdminRequest
|
||||||
assertEquals(0, rsp.getStatus());
|
ModifiableSolrParams p1 = new ModifiableSolrParams();
|
||||||
assertTrue(rsp.isSuccess());
|
p1.add("collection", collName);
|
||||||
|
p1.add("action", "MODIFYCOLLECTION");
|
||||||
ConfigSetAdminRequest.Create createConfig = new ConfigSetAdminRequest.Create()
|
p1.add("collection.configName", "conf2");
|
||||||
.setBaseConfigSetName(oldConfName)
|
cluster.getSolrClient().request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p1));
|
||||||
.setConfigSetName(newConfName);
|
|
||||||
|
assertEquals("conf2", getConfigNameFromZk(collName));
|
||||||
ConfigSetAdminResponse configRsp = createConfig.process(client);
|
|
||||||
|
|
||||||
assertEquals(0, configRsp.getStatus());
|
|
||||||
|
|
||||||
ModifiableSolrParams p = new ModifiableSolrParams();
|
|
||||||
p.add("collection", collName);
|
|
||||||
p.add("action", "MODIFYCOLLECTION");
|
|
||||||
p.add("collection.configName", newConfName);
|
|
||||||
client.request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p));
|
|
||||||
}
|
|
||||||
|
|
||||||
assertEquals(newConfName, getConfigNameFromZk(collName));
|
|
||||||
|
|
||||||
//Try an invalid config name
|
//Try an invalid config name
|
||||||
try (SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)))) {
|
ModifiableSolrParams p2 = new ModifiableSolrParams();
|
||||||
ModifiableSolrParams p = new ModifiableSolrParams();
|
p2.add("collection", collName);
|
||||||
p.add("collection", collName);
|
p2.add("action", "MODIFYCOLLECTION");
|
||||||
p.add("action", "MODIFYCOLLECTION");
|
p2.add("collection.configName", "notARealConfigName");
|
||||||
p.add("collection.configName", "notARealConfigName");
|
Exception e = expectThrows(Exception.class, () -> {
|
||||||
try{
|
cluster.getSolrClient().request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p2));
|
||||||
client.request(new GenericSolrRequest(POST, COLLECTIONS_HANDLER_PATH, p));
|
});
|
||||||
fail("Exception should be thrown");
|
|
||||||
} catch(RemoteSolrException e) {
|
assertTrue(e.getMessage(), e.getMessage().contains("Can not find the specified config set"));
|
||||||
assertTrue(e.getMessage(), e.getMessage().contains("Can not find the specified config set"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private String getConfigNameFromZk(String collName) throws KeeperException, InterruptedException {
|
private String getConfigNameFromZk(String collName) throws KeeperException, InterruptedException {
|
||||||
byte[] b = cloudClient.getZkStateReader().getZkClient().getData(ZkStateReader.getCollectionPathRoot(collName), null, null, false);
|
byte[] b = zkClient().getData(ZkStateReader.getCollectionPathRoot(collName), null, null, false);
|
||||||
Map confData = (Map) Utils.fromJSON(b);
|
Map confData = (Map) Utils.fromJSON(b);
|
||||||
return (String) confData.get(ZkController.CONFIGNAME_PROP);
|
return (String) confData.get(ZkController.CONFIGNAME_PROP);
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,118 +16,95 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import java.lang.invoke.MethodHandles;
|
||||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
import java.util.Collections;
|
||||||
import org.apache.solr.client.solrj.SolrRequest;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
|
||||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||||
import org.apache.solr.common.cloud.SolrZkClient;
|
import org.apache.solr.common.cloud.SolrZkClient;
|
||||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||||
import org.apache.solr.common.params.CollectionParams.CollectionAction;
|
|
||||||
import org.apache.solr.common.params.MapSolrParams;
|
|
||||||
import org.apache.solr.common.params.SolrParams;
|
|
||||||
import org.apache.solr.common.util.Utils;
|
import org.apache.solr.common.util.Utils;
|
||||||
import org.apache.solr.util.TimeOut;
|
import org.apache.solr.util.TimeOut;
|
||||||
import org.apache.zookeeper.data.Stat;
|
import org.apache.zookeeper.data.Stat;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.lang.invoke.MethodHandles;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Locale;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getLeaderNode;
|
import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getLeaderNode;
|
||||||
import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames;
|
import static org.apache.solr.cloud.OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames;
|
||||||
import static org.apache.solr.cloud.OverseerCollectionMessageHandler.NUM_SLICES;
|
import static org.hamcrest.CoreMatchers.not;
|
||||||
import static org.apache.solr.common.util.Utils.makeMap;
|
|
||||||
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
|
|
||||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
|
|
||||||
|
|
||||||
@LuceneTestCase.Slow
|
public class OverseerRolesTest extends SolrCloudTestCase {
|
||||||
@SuppressSSL(bugUrl = "SOLR-5776")
|
|
||||||
public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||||
|
|
||||||
protected String getSolrXml() {
|
@BeforeClass
|
||||||
return "solr.xml";
|
public static void setupCluster() throws Exception {
|
||||||
|
configureCluster(4)
|
||||||
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
}
|
}
|
||||||
|
|
||||||
public OverseerRolesTest() {
|
@Before
|
||||||
sliceCount = 2;
|
public void clearAllOverseerRoles() throws Exception {
|
||||||
fixShardCount(TEST_NIGHTLY ? 6 : 2);
|
for (String node : OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient())) {
|
||||||
|
CollectionAdminRequest.removeRole(node, "overseer").process(cluster.getSolrClient());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void test() throws Exception {
|
public void testQuitCommand() throws Exception {
|
||||||
try (CloudSolrClient client = createCloudClient(null)) {
|
|
||||||
testQuitCommand(client);
|
|
||||||
testOverseerRole(client);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void testQuitCommand(CloudSolrClient client) throws Exception{
|
SolrZkClient zk = zkClient();
|
||||||
String collectionName = "testOverseerQuit";
|
byte[] data = zk.getData("/overseer_elect/leader", null, new Stat(), true);
|
||||||
|
|
||||||
createCollection(collectionName, client);
|
|
||||||
|
|
||||||
waitForRecoveriesToFinish(collectionName, false);
|
|
||||||
|
|
||||||
SolrZkClient zk = client.getZkStateReader().getZkClient();
|
|
||||||
byte[] data = new byte[0];
|
|
||||||
data = zk.getData("/overseer_elect/leader", null, new Stat(), true);
|
|
||||||
Map m = (Map) Utils.fromJSON(data);
|
Map m = (Map) Utils.fromJSON(data);
|
||||||
String s = (String) m.get("id");
|
String s = (String) m.get("id");
|
||||||
String leader = LeaderElector.getNodeName(s);
|
String leader = LeaderElector.getNodeName(s);
|
||||||
Overseer.getStateUpdateQueue(zk).offer(Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower())));
|
log.info("Current overseer: {}", leader);
|
||||||
|
Overseer.getStateUpdateQueue(zk)
|
||||||
|
.offer(Utils.toJSON(new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.QUIT.toLower(),
|
||||||
|
"id", s)));
|
||||||
final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
|
final TimeOut timeout = new TimeOut(10, TimeUnit.SECONDS);
|
||||||
String newLeader=null;
|
String newLeader = null;
|
||||||
for(;! timeout.hasTimedOut();){
|
for(;! timeout.hasTimedOut();){
|
||||||
newLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(zk);
|
newLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(zk);
|
||||||
if(newLeader!=null && !newLeader.equals(leader)) break;
|
if (newLeader != null && !newLeader.equals(leader))
|
||||||
|
break;
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
assertNotSame( "Leader not changed yet",newLeader,leader);
|
assertThat("Leader not changed yet", newLeader, not(leader));
|
||||||
|
|
||||||
|
assertTrue("The old leader should have rejoined election",
|
||||||
|
OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zk).contains(leader));
|
||||||
assertTrue("The old leader should have rejoined election ", OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zk).contains(leader));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testOverseerRole() throws Exception {
|
||||||
|
|
||||||
|
List<String> l = OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(zkClient()) ;
|
||||||
|
|
||||||
private void testOverseerRole(CloudSolrClient client) throws Exception {
|
|
||||||
String collectionName = "testOverseerCol";
|
|
||||||
|
|
||||||
createCollection(collectionName, client);
|
|
||||||
|
|
||||||
waitForRecoveriesToFinish(collectionName, false);
|
|
||||||
List<String> l = OverseerCollectionConfigSetProcessor.getSortedOverseerNodeNames(client.getZkStateReader().getZkClient()) ;
|
|
||||||
|
|
||||||
log.info("All nodes {}", l);
|
log.info("All nodes {}", l);
|
||||||
String currentLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(client.getZkStateReader().getZkClient());
|
String currentLeader = OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient());
|
||||||
log.info("Current leader {} ", currentLeader);
|
log.info("Current leader {} ", currentLeader);
|
||||||
l.remove(currentLeader);
|
l.remove(currentLeader);
|
||||||
|
|
||||||
Collections.shuffle(l, random());
|
Collections.shuffle(l, random());
|
||||||
String overseerDesignate = l.get(0);
|
String overseerDesignate = l.get(0);
|
||||||
log.info("overseerDesignate {}",overseerDesignate);
|
log.info("overseerDesignate {}", overseerDesignate);
|
||||||
setOverseerRole(client, CollectionAction.ADDROLE,overseerDesignate);
|
|
||||||
|
CollectionAdminRequest.addRole(overseerDesignate, "overseer").process(cluster.getSolrClient());
|
||||||
|
|
||||||
TimeOut timeout = new TimeOut(15, TimeUnit.SECONDS);
|
TimeOut timeout = new TimeOut(15, TimeUnit.SECONDS);
|
||||||
|
|
||||||
boolean leaderchanged = false;
|
boolean leaderchanged = false;
|
||||||
for(;!timeout.hasTimedOut();){
|
for (;!timeout.hasTimedOut();) {
|
||||||
if(overseerDesignate.equals(OverseerCollectionConfigSetProcessor.getLeaderNode(client.getZkStateReader().getZkClient()))){
|
if (overseerDesignate.equals(OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient()))) {
|
||||||
log.info("overseer designate is the new overseer");
|
log.info("overseer designate is the new overseer");
|
||||||
leaderchanged =true;
|
leaderchanged =true;
|
||||||
break;
|
break;
|
||||||
|
@ -136,36 +113,29 @@ public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
|
||||||
}
|
}
|
||||||
assertTrue("could not set the new overseer . expected "+
|
assertTrue("could not set the new overseer . expected "+
|
||||||
overseerDesignate + " current order : " +
|
overseerDesignate + " current order : " +
|
||||||
getSortedOverseerNodeNames(client.getZkStateReader().getZkClient()) +
|
getSortedOverseerNodeNames(zkClient()) +
|
||||||
" ldr :"+ OverseerCollectionConfigSetProcessor.getLeaderNode(client.getZkStateReader().getZkClient()) ,leaderchanged);
|
" ldr :"+ OverseerCollectionConfigSetProcessor.getLeaderNode(zkClient()) ,leaderchanged);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
//add another node as overseer
|
//add another node as overseer
|
||||||
|
|
||||||
|
|
||||||
l.remove(overseerDesignate);
|
l.remove(overseerDesignate);
|
||||||
|
|
||||||
Collections.shuffle(l, random());
|
Collections.shuffle(l, random());
|
||||||
|
|
||||||
String anotherOverseer = l.get(0);
|
String anotherOverseer = l.get(0);
|
||||||
log.info("Adding another overseer designate {}", anotherOverseer);
|
log.info("Adding another overseer designate {}", anotherOverseer);
|
||||||
setOverseerRole(client, CollectionAction.ADDROLE, anotherOverseer);
|
CollectionAdminRequest.addRole(anotherOverseer, "overseer").process(cluster.getSolrClient());
|
||||||
|
|
||||||
String currentOverseer = getLeaderNode(client.getZkStateReader().getZkClient());
|
String currentOverseer = getLeaderNode(zkClient());
|
||||||
|
|
||||||
log.info("Current Overseer {}", currentOverseer);
|
log.info("Current Overseer {}", currentOverseer);
|
||||||
|
|
||||||
String hostPort = currentOverseer.substring(0,currentOverseer.indexOf('_'));
|
String hostPort = currentOverseer.substring(0, currentOverseer.indexOf('_'));
|
||||||
|
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
//
|
|
||||||
//
|
|
||||||
log.info("hostPort : {}", hostPort);
|
log.info("hostPort : {}", hostPort);
|
||||||
|
|
||||||
JettySolrRunner leaderJetty = null;
|
JettySolrRunner leaderJetty = null;
|
||||||
|
|
||||||
for (JettySolrRunner jetty : jettys) {
|
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
|
||||||
String s = jetty.getBaseUrl().toString();
|
String s = jetty.getBaseUrl().toString();
|
||||||
log.info("jetTy {}",s);
|
log.info("jetTy {}",s);
|
||||||
sb.append(s).append(" , ");
|
sb.append(s).append(" , ");
|
||||||
|
@ -178,49 +148,20 @@ public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
|
||||||
assertNotNull("Could not find a jetty2 kill", leaderJetty);
|
assertNotNull("Could not find a jetty2 kill", leaderJetty);
|
||||||
|
|
||||||
log.info("leader node {}", leaderJetty.getBaseUrl());
|
log.info("leader node {}", leaderJetty.getBaseUrl());
|
||||||
log.info ("current election Queue",
|
log.info("current election Queue",
|
||||||
OverseerCollectionConfigSetProcessor.getSortedElectionNodes(client.getZkStateReader().getZkClient(),
|
OverseerCollectionConfigSetProcessor.getSortedElectionNodes(zkClient(), "/overseer_elect/election"));
|
||||||
"/overseer_elect/election"));
|
|
||||||
ChaosMonkey.stop(leaderJetty);
|
ChaosMonkey.stop(leaderJetty);
|
||||||
timeout = new TimeOut(10, TimeUnit.SECONDS);
|
timeout = new TimeOut(10, TimeUnit.SECONDS);
|
||||||
leaderchanged = false;
|
leaderchanged = false;
|
||||||
for (; !timeout.hasTimedOut(); ) {
|
for (; !timeout.hasTimedOut(); ) {
|
||||||
currentOverseer = getLeaderNode(client.getZkStateReader().getZkClient());
|
currentOverseer = getLeaderNode(zkClient());
|
||||||
if (anotherOverseer.equals(currentOverseer)) {
|
if (anotherOverseer.equals(currentOverseer)) {
|
||||||
leaderchanged = true;
|
leaderchanged = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
assertTrue("New overseer designate has not become the overseer, expected : " + anotherOverseer + "actual : " + getLeaderNode(client.getZkStateReader().getZkClient()), leaderchanged);
|
assertTrue("New overseer designate has not become the overseer, expected : " + anotherOverseer + "actual : " + getLeaderNode(zkClient()), leaderchanged);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setOverseerRole(CloudSolrClient client, CollectionAction action, String overseerDesignate) throws Exception, IOException {
|
|
||||||
log.info("Adding overseer designate {} ", overseerDesignate);
|
|
||||||
Map m = makeMap(
|
|
||||||
"action", action.toString().toLowerCase(Locale.ROOT),
|
|
||||||
"role", "overseer",
|
|
||||||
"node", overseerDesignate);
|
|
||||||
SolrParams params = new MapSolrParams(m);
|
|
||||||
SolrRequest request = new QueryRequest(params);
|
|
||||||
request.setPath("/admin/collections");
|
|
||||||
client.request(request);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
|
|
||||||
int replicationFactor = 2;
|
|
||||||
int numShards = 4;
|
|
||||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
|
|
||||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
|
||||||
|
|
||||||
Map<String, Object> props = makeMap(
|
|
||||||
REPLICATION_FACTOR, replicationFactor,
|
|
||||||
MAX_SHARDS_PER_NODE, maxShardsPerNode,
|
|
||||||
NUM_SLICES, numShards);
|
|
||||||
Map<String,List<Integer>> collectionInfos = new HashMap<>();
|
|
||||||
createCollection(collectionInfos, COLL_NAME, props, client);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,74 +17,56 @@
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
|
||||||
import org.apache.solr.common.params.CollectionParams;
|
import org.apache.solr.common.params.CollectionParams;
|
||||||
import org.apache.solr.common.util.NamedList;
|
import org.apache.solr.common.util.NamedList;
|
||||||
import org.apache.solr.common.util.SimpleOrderedMap;
|
import org.apache.solr.common.util.SimpleOrderedMap;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class OverseerStatusTest extends BasicDistributedZkTest {
|
public class OverseerStatusTest extends SolrCloudTestCase {
|
||||||
|
|
||||||
public OverseerStatusTest() {
|
@BeforeClass
|
||||||
schemaString = "schema15.xml"; // we need a string id
|
public static void setupCluster() throws Exception {
|
||||||
sliceCount = 1;
|
configureCluster(2)
|
||||||
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ShardsFixed(num = 1)
|
|
||||||
public void test() throws Exception {
|
public void test() throws Exception {
|
||||||
|
|
||||||
waitForThingsToLevelOut(15);
|
|
||||||
|
|
||||||
// find existing command counts because collection may be created by base test class too
|
// find existing command counts because collection may be created by base test class too
|
||||||
int numCollectionCreates = 0, numOverseerCreates = 0;
|
int numCollectionCreates = 0, numOverseerCreates = 0;
|
||||||
NamedList<Object> resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
|
|
||||||
if (resp != null) {
|
|
||||||
NamedList<Object> collection_operations = (NamedList<Object>) resp.get("collection_operations");
|
|
||||||
if (collection_operations != null) {
|
|
||||||
SimpleOrderedMap<Object> createcollection = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
|
|
||||||
if (createcollection != null && createcollection.get("requests") != null) {
|
|
||||||
numCollectionCreates = (Integer) createcollection.get("requests");
|
|
||||||
}
|
|
||||||
NamedList<Object> overseer_operations = (NamedList<Object>) resp.get("overseer_operations");
|
|
||||||
if (overseer_operations != null) {
|
|
||||||
createcollection = (SimpleOrderedMap<Object>) overseer_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
|
|
||||||
if (createcollection != null && createcollection.get("requests") != null) {
|
|
||||||
numOverseerCreates = (Integer) createcollection.get("requests");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
String collectionName = "overseer_status_test";
|
String collectionName = "overseer_status_test";
|
||||||
CollectionAdminResponse response = createCollection(collectionName, 1, 1, 1);
|
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1).process(cluster.getSolrClient());
|
||||||
resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
|
|
||||||
|
NamedList<Object> resp = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient()).getResponse();
|
||||||
NamedList<Object> collection_operations = (NamedList<Object>) resp.get("collection_operations");
|
NamedList<Object> collection_operations = (NamedList<Object>) resp.get("collection_operations");
|
||||||
NamedList<Object> overseer_operations = (NamedList<Object>) resp.get("overseer_operations");
|
NamedList<Object> overseer_operations = (NamedList<Object>) resp.get("overseer_operations");
|
||||||
SimpleOrderedMap<Object> createcollection = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
|
SimpleOrderedMap<Object> createcollection
|
||||||
|
= (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
|
||||||
assertEquals("No stats for create in OverseerCollectionProcessor", numCollectionCreates + 1, createcollection.get("requests"));
|
assertEquals("No stats for create in OverseerCollectionProcessor", numCollectionCreates + 1, createcollection.get("requests"));
|
||||||
createcollection = (SimpleOrderedMap<Object>) overseer_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
|
createcollection = (SimpleOrderedMap<Object>) overseer_operations.get(CollectionParams.CollectionAction.CREATE.toLower());
|
||||||
assertEquals("No stats for create in Overseer", numOverseerCreates + 1, createcollection.get("requests"));
|
assertEquals("No stats for create in Overseer", numOverseerCreates + 1, createcollection.get("requests"));
|
||||||
|
|
||||||
// Reload the collection
|
// Reload the collection
|
||||||
new CollectionAdminRequest.Reload().setCollectionName(collectionName).process(cloudClient);
|
CollectionAdminRequest.reloadCollection(collectionName).process(cluster.getSolrClient());
|
||||||
|
|
||||||
|
resp = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient()).getResponse();
|
||||||
resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
|
|
||||||
collection_operations = (NamedList<Object>) resp.get("collection_operations");
|
collection_operations = (NamedList<Object>) resp.get("collection_operations");
|
||||||
SimpleOrderedMap<Object> reload = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.RELOAD.toLower());
|
SimpleOrderedMap<Object> reload = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.RELOAD.toLower());
|
||||||
assertEquals("No stats for reload in OverseerCollectionProcessor", 1, reload.get("requests"));
|
assertEquals("No stats for reload in OverseerCollectionProcessor", 1, reload.get("requests"));
|
||||||
|
|
||||||
try {
|
try {
|
||||||
new CollectionAdminRequest.SplitShard()
|
CollectionAdminRequest.splitShard("non_existent_collection")
|
||||||
.setCollectionName("non_existent_collection")
|
.setShardName("non_existent_shard")
|
||||||
.setShardName("non_existent_shard")
|
.process(cluster.getSolrClient());
|
||||||
.process(cloudClient);
|
|
||||||
fail("Split shard for non existent collection should have failed");
|
fail("Split shard for non existent collection should have failed");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// expected because we did not correctly specify required params for split
|
// expected because we did not correctly specify required params for split
|
||||||
}
|
}
|
||||||
resp = new CollectionAdminRequest.OverseerStatus().process(cloudClient).getResponse();
|
resp = new CollectionAdminRequest.OverseerStatus().process(cluster.getSolrClient()).getResponse();
|
||||||
collection_operations = (NamedList<Object>) resp.get("collection_operations");
|
collection_operations = (NamedList<Object>) resp.get("collection_operations");
|
||||||
SimpleOrderedMap<Object> split = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.SPLITSHARD.toLower());
|
SimpleOrderedMap<Object> split = (SimpleOrderedMap<Object>) collection_operations.get(CollectionParams.CollectionAction.SPLITSHARD.toLower());
|
||||||
assertEquals("No stats for split in OverseerCollectionProcessor", 1, split.get("errors"));
|
assertEquals("No stats for split in OverseerCollectionProcessor", 1, split.get("errors"));
|
||||||
|
@ -111,6 +93,5 @@ public class OverseerStatusTest extends BasicDistributedZkTest {
|
||||||
assertNotNull(updateState.get("errors"));
|
assertNotNull(updateState.get("errors"));
|
||||||
assertNotNull(updateState.get("avgTimePerRequest"));
|
assertNotNull(updateState.get("avgTimePerRequest"));
|
||||||
|
|
||||||
waitForThingsToLevelOut(15);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,58 +16,43 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
|
||||||
import org.apache.solr.client.solrj.SolrClient;
|
import org.apache.solr.client.solrj.SolrClient;
|
||||||
|
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||||
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.common.SolrException;
|
import org.apache.solr.common.SolrException;
|
||||||
import org.apache.solr.common.SolrInputDocument;
|
import org.apache.solr.common.SolrInputDocument;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import static org.junit.internal.matchers.StringContains.containsString;
|
import static org.junit.internal.matchers.StringContains.containsString;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Verify that remote (proxied) queries return proper error messages
|
* Verify that remote (proxied) queries return proper error messages
|
||||||
*/
|
*/
|
||||||
@Slow
|
public class RemoteQueryErrorTest extends SolrCloudTestCase {
|
||||||
public class RemoteQueryErrorTest extends AbstractFullDistribZkTestBase {
|
|
||||||
|
|
||||||
public RemoteQueryErrorTest() {
|
@BeforeClass
|
||||||
super();
|
public static void setupCluster() throws Exception {
|
||||||
sliceCount = 1;
|
configureCluster(3)
|
||||||
fixShardCount(random().nextBoolean() ? 3 : 4);
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO add test for CloudSolrClient as well
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void test() throws Exception {
|
public void test() throws Exception {
|
||||||
handle.clear();
|
|
||||||
handle.put("timestamp", SKIPVAL);
|
|
||||||
|
|
||||||
waitForThingsToLevelOut(15);
|
|
||||||
|
|
||||||
del("*:*");
|
CollectionAdminRequest.createCollection("collection", "conf", 2, 1).process(cluster.getSolrClient());
|
||||||
|
|
||||||
createCollection("collection2", 2, 1, 10);
|
|
||||||
|
|
||||||
List<Integer> numShardsNumReplicaList = new ArrayList<>(2);
|
|
||||||
numShardsNumReplicaList.add(2);
|
|
||||||
numShardsNumReplicaList.add(1);
|
|
||||||
checkForCollection("collection2", numShardsNumReplicaList, null);
|
|
||||||
waitForRecoveriesToFinish("collection2", true);
|
|
||||||
|
|
||||||
for (SolrClient solrClient : clients) {
|
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
|
||||||
try {
|
try (SolrClient client = jetty.newClient()) {
|
||||||
SolrInputDocument emptyDoc = new SolrInputDocument();
|
SolrException e = expectThrows(SolrException.class, () -> {
|
||||||
solrClient.add(emptyDoc);
|
client.add("collection", new SolrInputDocument());
|
||||||
fail("Expected unique key exception");
|
});
|
||||||
} catch (SolrException ex) {
|
assertThat(e.getMessage(), containsString("Document is missing mandatory uniqueKey field: id"));
|
||||||
assertThat(ex.getMessage(), containsString("Document is missing mandatory uniqueKey field: id"));
|
|
||||||
} catch(Exception ex) {
|
|
||||||
fail("Expected a SolrException to occur, instead received: " + ex.getClass());
|
|
||||||
} finally {
|
|
||||||
solrClient.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,13 +16,15 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.lang.invoke.MethodHandles;
|
import java.lang.invoke.MethodHandles;
|
||||||
|
|
||||||
import org.apache.solr.client.solrj.SolrQuery;
|
import org.apache.solr.client.solrj.SolrQuery;
|
||||||
import org.apache.solr.client.solrj.SolrServerException;
|
import org.apache.solr.client.solrj.SolrServerException;
|
||||||
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
|
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||||
import org.apache.solr.common.params.ShardParams;
|
import org.apache.solr.common.params.ShardParams;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -34,41 +36,47 @@ import static org.hamcrest.CoreMatchers.is;
|
||||||
* and also asserts that a meaningful exception is thrown when shards.tolerant=false
|
* and also asserts that a meaningful exception is thrown when shards.tolerant=false
|
||||||
* See SOLR-7566
|
* See SOLR-7566
|
||||||
*/
|
*/
|
||||||
public class TestDownShardTolerantSearch extends AbstractFullDistribZkTestBase {
|
public class TestDownShardTolerantSearch extends SolrCloudTestCase {
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||||
|
|
||||||
public TestDownShardTolerantSearch() {
|
@BeforeClass
|
||||||
sliceCount = 2;
|
public static void setupCluster() throws Exception {
|
||||||
|
configureCluster(2)
|
||||||
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@ShardsFixed(num = 2)
|
|
||||||
public void searchingShouldFailWithoutTolerantSearchSetToTrue() throws Exception {
|
public void searchingShouldFailWithoutTolerantSearchSetToTrue() throws Exception {
|
||||||
waitForRecoveriesToFinish(true);
|
|
||||||
|
|
||||||
indexAbunchOfDocs();
|
CollectionAdminRequest.createCollection("tolerant", "conf", 2, 1)
|
||||||
commit();
|
.process(cluster.getSolrClient());
|
||||||
QueryResponse response = cloudClient.query(new SolrQuery("*:*").setRows(1));
|
|
||||||
|
UpdateRequest update = new UpdateRequest();
|
||||||
|
for (int i = 0; i < 100; i++) {
|
||||||
|
update.add("id", Integer.toString(i));
|
||||||
|
}
|
||||||
|
update.commit(cluster.getSolrClient(), "tolerant");
|
||||||
|
|
||||||
|
QueryResponse response = cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1));
|
||||||
assertThat(response.getStatus(), is(0));
|
assertThat(response.getStatus(), is(0));
|
||||||
assertThat(response.getResults().getNumFound(), is(66L));
|
assertThat(response.getResults().getNumFound(), is(100L));
|
||||||
|
|
||||||
ChaosMonkey.kill(shardToJetty.get(SHARD1).get(0));
|
cluster.stopJettySolrRunner(0);
|
||||||
|
|
||||||
response = cloudClient.query(new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, true));
|
response = cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, true));
|
||||||
assertThat(response.getStatus(), is(0));
|
assertThat(response.getStatus(), is(0));
|
||||||
assertTrue(response.getResults().getNumFound() > 0);
|
assertTrue(response.getResults().getNumFound() > 0);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cloudClient.query(new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, false));
|
cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, false));
|
||||||
fail("Request should have failed because we killed shard1 jetty");
|
fail("Request should have failed because we killed shard1 jetty");
|
||||||
} catch (SolrServerException e) {
|
} catch (SolrServerException e) {
|
||||||
log.info("error from server", e);
|
log.info("error from server", e);
|
||||||
assertNotNull(e.getCause());
|
assertNotNull(e.getCause());
|
||||||
assertTrue("Error message from server should have the name of the down shard",
|
assertTrue("Error message from server should have the name of the down shard",
|
||||||
e.getCause().getMessage().contains(SHARD1));
|
e.getCause().getMessage().contains("shard"));
|
||||||
} catch (IOException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,34 +16,32 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.solr.cloud;
|
package org.apache.solr.cloud;
|
||||||
|
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
|
||||||
import org.apache.solr.client.solrj.SolrClient;
|
|
||||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
|
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
@LuceneTestCase.Slow
|
public class TestExclusionRuleCollectionAccess extends SolrCloudTestCase {
|
||||||
public class TestExclusionRuleCollectionAccess extends AbstractFullDistribZkTestBase {
|
|
||||||
|
|
||||||
public TestExclusionRuleCollectionAccess() {
|
@BeforeClass
|
||||||
schemaString = "schema15.xml"; // we need a string id
|
public static void setupCluster() throws Exception {
|
||||||
sliceCount = 1;
|
configureCluster(1)
|
||||||
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void doTest() throws Exception {
|
public void doTest() throws Exception {
|
||||||
CollectionAdminRequest.Create req = new CollectionAdminRequest.Create();
|
|
||||||
req.setCollectionName("css33");
|
|
||||||
req.setNumShards(1);
|
|
||||||
req.process(cloudClient);
|
|
||||||
|
|
||||||
waitForRecoveriesToFinish("css33", false);
|
|
||||||
|
|
||||||
try (SolrClient c = createCloudClient("css33")) {
|
|
||||||
c.add(getDoc("id", "1"));
|
|
||||||
c.commit();
|
|
||||||
|
|
||||||
assertEquals("Should have returned 1 result", 1, c.query(params("q", "*:*", "collection", "css33")).getResults().getNumFound());
|
CollectionAdminRequest.createCollection("css33", "conf", 1, 1).process(cluster.getSolrClient());
|
||||||
}
|
|
||||||
|
new UpdateRequest()
|
||||||
|
.add("id", "1")
|
||||||
|
.commit(cluster.getSolrClient(), "css33");
|
||||||
|
|
||||||
|
assertEquals("Should have returned 1 result", 1,
|
||||||
|
cluster.getSolrClient().query("css33", params("q", "*:*", "collection", "css33")).getResults().getNumFound());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,19 +18,21 @@ package org.apache.solr.security;
|
||||||
|
|
||||||
import javax.servlet.ServletRequest;
|
import javax.servlet.ServletRequest;
|
||||||
import javax.servlet.http.HttpServletRequest;
|
import javax.servlet.http.HttpServletRequest;
|
||||||
|
|
||||||
import java.lang.invoke.MethodHandles;
|
import java.lang.invoke.MethodHandles;
|
||||||
import java.security.Principal;
|
import java.security.Principal;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
|
||||||
import org.apache.solr.SolrTestCaseJ4;
|
import org.apache.http.client.HttpClient;
|
||||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||||
|
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
import org.apache.solr.cloud.SolrCloudTestCase;
|
||||||
import org.apache.solr.common.cloud.ZkStateReader;
|
import org.apache.solr.common.cloud.ZkStateReader;
|
||||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||||
import org.apache.solr.common.util.Utils;
|
import org.apache.solr.common.util.Utils;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -39,27 +41,32 @@ import static java.util.Collections.singletonMap;
|
||||||
import static org.apache.solr.common.util.Utils.makeMap;
|
import static org.apache.solr.common.util.Utils.makeMap;
|
||||||
import static org.apache.solr.security.TestAuthorizationFramework.verifySecurityStatus;
|
import static org.apache.solr.security.TestAuthorizationFramework.verifySecurityStatus;
|
||||||
|
|
||||||
@SolrTestCaseJ4.SuppressSSL
|
public class PKIAuthenticationIntegrationTest extends SolrCloudTestCase {
|
||||||
public class PKIAuthenticationIntegrationTest extends AbstractFullDistribZkTestBase {
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||||
|
|
||||||
static final int TIMEOUT = 10000;
|
@BeforeClass
|
||||||
|
public static void setupCluster() throws Exception {
|
||||||
|
configureCluster(2)
|
||||||
|
.addConfig("conf", configset("cloud-minimal"))
|
||||||
|
.configure();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPkiAuth() throws Exception {
|
public void testPkiAuth() throws Exception {
|
||||||
waitForThingsToLevelOut(10);
|
|
||||||
|
|
||||||
|
CollectionAdminRequest.createCollection("collection", "conf", 2, 1).process(cluster.getSolrClient());
|
||||||
|
|
||||||
|
// TODO make a SolrJ helper class for this
|
||||||
byte[] bytes = Utils.toJSON(makeMap("authorization", singletonMap("class", MockAuthorizationPlugin.class.getName()),
|
byte[] bytes = Utils.toJSON(makeMap("authorization", singletonMap("class", MockAuthorizationPlugin.class.getName()),
|
||||||
"authentication", singletonMap("class", MockAuthenticationPlugin.class.getName())));
|
"authentication", singletonMap("class", MockAuthenticationPlugin.class.getName())));
|
||||||
|
zkClient().setData(ZkStateReader.SOLR_SECURITY_CONF_PATH, bytes, true);
|
||||||
|
|
||||||
try (ZkStateReader zkStateReader = new ZkStateReader(zkServer.getZkAddress(),
|
HttpClient httpClient = cluster.getSolrClient().getHttpClient();
|
||||||
TIMEOUT, TIMEOUT)) {
|
for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
|
||||||
zkStateReader.getZkClient().setData(ZkStateReader.SOLR_SECURITY_CONF_PATH, bytes, true);
|
|
||||||
}
|
|
||||||
for (JettySolrRunner jetty : jettys) {
|
|
||||||
String baseUrl = jetty.getBaseUrl().toString();
|
String baseUrl = jetty.getBaseUrl().toString();
|
||||||
verifySecurityStatus(cloudClient.getLbClient().getHttpClient(), baseUrl + "/admin/authorization", "authorization/class", MockAuthorizationPlugin.class.getName(), 20);
|
verifySecurityStatus(httpClient, baseUrl + "/admin/authorization", "authorization/class", MockAuthorizationPlugin.class.getName(), 20);
|
||||||
verifySecurityStatus(cloudClient.getLbClient().getHttpClient(), baseUrl + "/admin/authentication", "authentication.enabled", "true", 20);
|
verifySecurityStatus(httpClient, baseUrl + "/admin/authentication", "authentication.enabled", "true", 20);
|
||||||
}
|
}
|
||||||
log.info("Starting test");
|
log.info("Starting test");
|
||||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||||
|
@ -95,13 +102,12 @@ public class PKIAuthenticationIntegrationTest extends AbstractFullDistribZkTestB
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
QueryRequest query = new QueryRequest(params);
|
QueryRequest query = new QueryRequest(params);
|
||||||
query.process(cloudClient);
|
query.process(cluster.getSolrClient(), "collection");
|
||||||
assertTrue("all nodes must get the user solr , no:of nodes got solr : " + count.get(),count.get() > 2);
|
assertTrue("all nodes must get the user solr , no:of nodes got solr : " + count.get(),count.get() > 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@After
|
||||||
public void distribTearDown() throws Exception {
|
public void distribTearDown() throws Exception {
|
||||||
super.distribTearDown();
|
|
||||||
MockAuthenticationPlugin.predicate = null;
|
MockAuthenticationPlugin.predicate = null;
|
||||||
MockAuthorizationPlugin.predicate = null;
|
MockAuthorizationPlugin.predicate = null;
|
||||||
}
|
}
|
||||||
|
|
|
@ -280,6 +280,8 @@ public abstract class CollectionAdminRequest<T extends CollectionAdminResponse>
|
||||||
|
|
||||||
public CollectionAdminRoleRequest(CollectionAction action, String node, String role) {
|
public CollectionAdminRoleRequest(CollectionAction action, String node, String role) {
|
||||||
super(action);
|
super(action);
|
||||||
|
this.node = node;
|
||||||
|
this.role = role;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
Loading…
Reference in New Issue