mirror of https://github.com/apache/lucene.git
SOLR-9321: Remove tests, they were accidentally added because of cherry-pick
This commit is contained in:
parent
93ed4770ac
commit
292fca651a
|
@ -1,389 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.cloud;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.embedded.JettyConfig;
|
||||
import org.apache.solr.client.solrj.embedded.JettyConfig.Builder;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.core.CoreDescriptor;
|
||||
import org.apache.solr.index.TieredMergePolicyFactory;
|
||||
import org.apache.solr.util.RevertDefaultThreadHandlerRule;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.RuleChain;
|
||||
import org.junit.rules.TestRule;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Test of the MiniSolrCloudCluster functionality. Keep in mind,
|
||||
* MiniSolrCloudCluster is designed to be used outside of the Lucene test
|
||||
* hierarchy.
|
||||
*/
|
||||
@SuppressSysoutChecks(bugUrl = "Solr logs to JUL")
|
||||
public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
protected int NUM_SERVERS = 5;
|
||||
protected int NUM_SHARDS = 2;
|
||||
protected int REPLICATION_FACTOR = 2;
|
||||
|
||||
public TestMiniSolrCloudCluster () {
|
||||
NUM_SERVERS = 5;
|
||||
NUM_SHARDS = 2;
|
||||
REPLICATION_FACTOR = 2;
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setupHackNumerics() { // SOLR-10916
|
||||
SolrTestCaseJ4.randomizeNumericTypesProperties();
|
||||
}
|
||||
@AfterClass
|
||||
public static void clearHackNumerics() { // SOLR-10916
|
||||
SolrTestCaseJ4.clearNumericTypesProperties();
|
||||
}
|
||||
|
||||
@Rule
|
||||
public TestRule solrTestRules = RuleChain
|
||||
.outerRule(new SystemPropertiesRestoreRule());
|
||||
|
||||
@ClassRule
|
||||
public static TestRule solrClassRules = RuleChain.outerRule(
|
||||
new SystemPropertiesRestoreRule()).around(
|
||||
new RevertDefaultThreadHandlerRule());
|
||||
|
||||
private MiniSolrCloudCluster createMiniSolrCloudCluster() throws Exception {
|
||||
Builder jettyConfig = JettyConfig.builder();
|
||||
jettyConfig.waitForLoadingCoresToFinish(null);
|
||||
return new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), jettyConfig.build());
|
||||
}
|
||||
|
||||
private void createCollection(MiniSolrCloudCluster miniCluster, String collectionName, String createNodeSet, String asyncId,
|
||||
Boolean indexToPersist, Map<String,String> collectionProperties) throws Exception {
|
||||
String configName = "solrCloudCollectionConfig";
|
||||
miniCluster.uploadConfigSet(SolrTestCaseJ4.TEST_PATH().resolve("collection1").resolve("conf"), configName);
|
||||
|
||||
final boolean persistIndex = (indexToPersist != null ? indexToPersist.booleanValue() : random().nextBoolean());
|
||||
if (collectionProperties == null) {
|
||||
collectionProperties = new HashMap<>();
|
||||
}
|
||||
collectionProperties.putIfAbsent(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
|
||||
collectionProperties.putIfAbsent("solr.tests.maxBufferedDocs", "100000");
|
||||
collectionProperties.putIfAbsent("solr.tests.ramBufferSizeMB", "100");
|
||||
// use non-test classes so RandomizedRunner isn't necessary
|
||||
collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
|
||||
collectionProperties.putIfAbsent("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
|
||||
collectionProperties.putIfAbsent("solr.directoryFactory", (persistIndex ? "solr.StandardDirectoryFactory" : "solr.RAMDirectoryFactory"));
|
||||
|
||||
if (asyncId == null) {
|
||||
CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR)
|
||||
.setCreateNodeSet(createNodeSet)
|
||||
.setProperties(collectionProperties)
|
||||
.process(miniCluster.getSolrClient());
|
||||
}
|
||||
else {
|
||||
CollectionAdminRequest.createCollection(collectionName, configName, NUM_SHARDS, REPLICATION_FACTOR)
|
||||
.setCreateNodeSet(createNodeSet)
|
||||
.setProperties(collectionProperties)
|
||||
.processAndWait(miniCluster.getSolrClient(), 30);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCollectionCreateSearchDelete() throws Exception {
|
||||
|
||||
final String collectionName = "testcollection";
|
||||
MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
|
||||
|
||||
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
|
||||
|
||||
try {
|
||||
assertNotNull(miniCluster.getZkServer());
|
||||
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
|
||||
assertEquals(NUM_SERVERS, jettys.size());
|
||||
for (JettySolrRunner jetty : jettys) {
|
||||
assertTrue(jetty.isRunning());
|
||||
}
|
||||
|
||||
// shut down a server
|
||||
log.info("#### Stopping a server");
|
||||
JettySolrRunner stoppedServer = miniCluster.stopJettySolrRunner(0);
|
||||
assertTrue(stoppedServer.isStopped());
|
||||
assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
|
||||
|
||||
// create a server
|
||||
log.info("#### Starting a server");
|
||||
JettySolrRunner startedServer = miniCluster.startJettySolrRunner();
|
||||
assertTrue(startedServer.isRunning());
|
||||
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
|
||||
|
||||
// create collection
|
||||
log.info("#### Creating a collection");
|
||||
final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
||||
createCollection(miniCluster, collectionName, null, asyncId, null, null);
|
||||
|
||||
ZkStateReader zkStateReader = miniCluster.getSolrClient().getZkStateReader();
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
// modify/query collection
|
||||
log.info("#### updating a querying collection");
|
||||
cloudSolrClient.setDefaultCollection(collectionName);
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.setField("id", "1");
|
||||
cloudSolrClient.add(doc);
|
||||
cloudSolrClient.commit();
|
||||
SolrQuery query = new SolrQuery();
|
||||
query.setQuery("*:*");
|
||||
QueryResponse rsp = cloudSolrClient.query(query);
|
||||
assertEquals(1, rsp.getResults().getNumFound());
|
||||
|
||||
// remove a server not hosting any replicas
|
||||
zkStateReader.forceUpdateCollection(collectionName);
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
HashMap<String, JettySolrRunner> jettyMap = new HashMap<String, JettySolrRunner>();
|
||||
for (JettySolrRunner jetty : miniCluster.getJettySolrRunners()) {
|
||||
String key = jetty.getBaseUrl().toString().substring((jetty.getBaseUrl().getProtocol() + "://").length());
|
||||
jettyMap.put(key, jetty);
|
||||
}
|
||||
Collection<Slice> slices = clusterState.getCollection(collectionName).getSlices();
|
||||
// track the servers not host repliacs
|
||||
for (Slice slice : slices) {
|
||||
jettyMap.remove(slice.getLeader().getNodeName().replace("_solr", "/solr"));
|
||||
for (Replica replica : slice.getReplicas()) {
|
||||
jettyMap.remove(replica.getNodeName().replace("_solr", "/solr"));
|
||||
}
|
||||
}
|
||||
assertTrue("Expected to find a node without a replica", jettyMap.size() > 0);
|
||||
log.info("#### Stopping a server");
|
||||
JettySolrRunner jettyToStop = jettyMap.entrySet().iterator().next().getValue();
|
||||
jettys = miniCluster.getJettySolrRunners();
|
||||
for (int i = 0; i < jettys.size(); ++i) {
|
||||
if (jettys.get(i).equals(jettyToStop)) {
|
||||
miniCluster.stopJettySolrRunner(i);
|
||||
assertEquals(NUM_SERVERS - 1, miniCluster.getJettySolrRunners().size());
|
||||
}
|
||||
}
|
||||
|
||||
// re-create a server (to restore original NUM_SERVERS count)
|
||||
log.info("#### Starting a server");
|
||||
startedServer = miniCluster.startJettySolrRunner(jettyToStop);
|
||||
assertTrue(startedServer.isRunning());
|
||||
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
|
||||
|
||||
CollectionAdminRequest.deleteCollection(collectionName).process(miniCluster.getSolrClient());
|
||||
|
||||
// create it again
|
||||
String asyncId2 = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
||||
createCollection(miniCluster, collectionName, null, asyncId2, null, null);
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
// check that there's no left-over state
|
||||
assertEquals(0, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
cloudSolrClient.add(doc);
|
||||
cloudSolrClient.commit();
|
||||
assertEquals(1, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
|
||||
}
|
||||
finally {
|
||||
miniCluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
|
||||
|
||||
final String collectionName = "testSolrCloudCollectionWithoutCores";
|
||||
final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
|
||||
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
|
||||
|
||||
try {
|
||||
assertNotNull(miniCluster.getZkServer());
|
||||
assertFalse(miniCluster.getJettySolrRunners().isEmpty());
|
||||
|
||||
// create collection
|
||||
final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
||||
createCollection(miniCluster, collectionName, OverseerCollectionMessageHandler.CREATE_NODE_SET_EMPTY, asyncId, null, null);
|
||||
|
||||
try (SolrZkClient zkClient = new SolrZkClient
|
||||
(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
|
||||
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
|
||||
zkStateReader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
// wait for collection to appear
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
// check the collection's corelessness
|
||||
{
|
||||
int coreCount = 0;
|
||||
for (Map.Entry<String,Slice> entry : zkStateReader.getClusterState()
|
||||
.getCollection(collectionName).getSlicesMap().entrySet()) {
|
||||
coreCount += entry.getValue().getReplicasMap().entrySet().size();
|
||||
}
|
||||
assertEquals(0, coreCount);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
finally {
|
||||
miniCluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStopAllStartAll() throws Exception {
|
||||
|
||||
final String collectionName = "testStopAllStartAllCollection";
|
||||
|
||||
final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
|
||||
|
||||
try {
|
||||
assertNotNull(miniCluster.getZkServer());
|
||||
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
|
||||
assertEquals(NUM_SERVERS, jettys.size());
|
||||
for (JettySolrRunner jetty : jettys) {
|
||||
assertTrue(jetty.isRunning());
|
||||
}
|
||||
|
||||
createCollection(miniCluster, collectionName, null, null, Boolean.TRUE, null);
|
||||
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
|
||||
cloudSolrClient.setDefaultCollection(collectionName);
|
||||
final SolrQuery query = new SolrQuery("*:*");
|
||||
final SolrInputDocument doc = new SolrInputDocument();
|
||||
|
||||
try (SolrZkClient zkClient = new SolrZkClient
|
||||
(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, AbstractZkTestCase.TIMEOUT, null);
|
||||
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
|
||||
zkStateReader.createClusterStateWatchersAndUpdate();
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
// modify collection
|
||||
final int numDocs = 1 + random().nextInt(10);
|
||||
for (int ii = 1; ii <= numDocs; ++ii) {
|
||||
doc.setField("id", ""+ii);
|
||||
cloudSolrClient.add(doc);
|
||||
if (ii*2 == numDocs) cloudSolrClient.commit();
|
||||
}
|
||||
cloudSolrClient.commit();
|
||||
// query collection
|
||||
{
|
||||
final QueryResponse rsp = cloudSolrClient.query(query);
|
||||
assertEquals(numDocs, rsp.getResults().getNumFound());
|
||||
}
|
||||
|
||||
// the test itself
|
||||
zkStateReader.forceUpdateCollection(collectionName);
|
||||
final ClusterState clusterState = zkStateReader.getClusterState();
|
||||
|
||||
final HashSet<Integer> leaderIndices = new HashSet<Integer>();
|
||||
final HashSet<Integer> followerIndices = new HashSet<Integer>();
|
||||
{
|
||||
final HashMap<String,Boolean> shardLeaderMap = new HashMap<String,Boolean>();
|
||||
for (final Slice slice : clusterState.getCollection(collectionName).getSlices()) {
|
||||
for (final Replica replica : slice.getReplicas()) {
|
||||
shardLeaderMap.put(replica.getNodeName().replace("_solr", "/solr"), Boolean.FALSE);
|
||||
}
|
||||
shardLeaderMap.put(slice.getLeader().getNodeName().replace("_solr", "/solr"), Boolean.TRUE);
|
||||
}
|
||||
for (int ii = 0; ii < jettys.size(); ++ii) {
|
||||
final URL jettyBaseUrl = jettys.get(ii).getBaseUrl();
|
||||
final String jettyBaseUrlString = jettyBaseUrl.toString().substring((jettyBaseUrl.getProtocol() + "://").length());
|
||||
final Boolean isLeader = shardLeaderMap.get(jettyBaseUrlString);
|
||||
if (Boolean.TRUE.equals(isLeader)) {
|
||||
leaderIndices.add(new Integer(ii));
|
||||
} else if (Boolean.FALSE.equals(isLeader)) {
|
||||
followerIndices.add(new Integer(ii));
|
||||
} // else neither leader nor follower i.e. node without a replica (for our collection)
|
||||
}
|
||||
}
|
||||
final List<Integer> leaderIndicesList = new ArrayList<Integer>(leaderIndices);
|
||||
final List<Integer> followerIndicesList = new ArrayList<Integer>(followerIndices);
|
||||
|
||||
// first stop the followers (in no particular order)
|
||||
Collections.shuffle(followerIndicesList, random());
|
||||
for (Integer ii : followerIndicesList) {
|
||||
if (!leaderIndices.contains(ii)) {
|
||||
miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
|
||||
}
|
||||
}
|
||||
|
||||
// then stop the leaders (again in no particular order)
|
||||
Collections.shuffle(leaderIndicesList, random());
|
||||
for (Integer ii : leaderIndicesList) {
|
||||
miniCluster.stopJettySolrRunner(jettys.get(ii.intValue()));
|
||||
}
|
||||
|
||||
// calculate restart order
|
||||
final List<Integer> restartIndicesList = new ArrayList<Integer>();
|
||||
Collections.shuffle(leaderIndicesList, random());
|
||||
restartIndicesList.addAll(leaderIndicesList);
|
||||
Collections.shuffle(followerIndicesList, random());
|
||||
restartIndicesList.addAll(followerIndicesList);
|
||||
if (random().nextBoolean()) Collections.shuffle(restartIndicesList, random());
|
||||
|
||||
// and then restart jettys in that order
|
||||
for (Integer ii : restartIndicesList) {
|
||||
final JettySolrRunner jetty = jettys.get(ii.intValue());
|
||||
if (!jetty.isRunning()) {
|
||||
miniCluster.startJettySolrRunner(jetty);
|
||||
assertTrue(jetty.isRunning());
|
||||
}
|
||||
}
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
zkStateReader.forceUpdateCollection(collectionName);
|
||||
|
||||
// re-query collection
|
||||
{
|
||||
final QueryResponse rsp = cloudSolrClient.query(query);
|
||||
assertEquals(numDocs, rsp.getResults().getNumFound());
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
finally {
|
||||
miniCluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,717 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.schema;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.util.BaseTestHarness;
|
||||
import org.apache.solr.util.RestTestHarness;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
import org.eclipse.jetty.servlet.ServletHolder;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Ignore;
|
||||
import org.junit.Test;
|
||||
import org.restlet.ext.servlet.ServerServlet;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
@Ignore
|
||||
public class TestCloudManagedSchemaConcurrent extends AbstractFullDistribZkTestBase {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final String SUCCESS_XPATH = "/response/lst[@name='responseHeader']/int[@name='status'][.='0']";
|
||||
private static final String PUT_DYNAMIC_FIELDNAME = "newdynamicfieldPut";
|
||||
private static final String POST_DYNAMIC_FIELDNAME = "newdynamicfieldPost";
|
||||
private static final String PUT_FIELDNAME = "newfieldPut";
|
||||
private static final String POST_FIELDNAME = "newfieldPost";
|
||||
private static final String PUT_FIELDTYPE = "newfieldtypePut";
|
||||
private static final String POST_FIELDTYPE = "newfieldtypePost";
|
||||
|
||||
public TestCloudManagedSchemaConcurrent() {
|
||||
super();
|
||||
sliceCount = 4;
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void initSysProperties() {
|
||||
System.setProperty("managed.schema.mutable", "true");
|
||||
System.setProperty("enable.update.log", "true");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void distribTearDown() throws Exception {
|
||||
super.distribTearDown();
|
||||
for (RestTestHarness h : restTestHarnesses) {
|
||||
h.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getCloudSolrConfig() {
|
||||
return "solrconfig-managed-schema.xml";
|
||||
}
|
||||
|
||||
@Override
|
||||
public SortedMap<ServletHolder,String> getExtraServlets() {
|
||||
final SortedMap<ServletHolder,String> extraServlets = new TreeMap<>();
|
||||
final ServletHolder solrRestApi = new ServletHolder("SolrSchemaRestApi", ServerServlet.class);
|
||||
solrRestApi.setInitParameter("org.restlet.application", "org.apache.solr.rest.SolrSchemaRestApi");
|
||||
extraServlets.put(solrRestApi, "/schema/*"); // '/schema/*' matches '/schema', '/schema/', and '/schema/whatever...'
|
||||
return extraServlets;
|
||||
}
|
||||
|
||||
private List<RestTestHarness> restTestHarnesses = new ArrayList<>();
|
||||
|
||||
private void setupHarnesses() {
|
||||
for (final SolrClient client : clients) {
|
||||
RestTestHarness harness = new RestTestHarness(() -> ((HttpSolrClient)client).getBaseURL());
|
||||
restTestHarnesses.add(harness);
|
||||
}
|
||||
}
|
||||
|
||||
private static void verifySuccess(String request, String response) throws Exception {
|
||||
String result = BaseTestHarness.validateXPath(response, SUCCESS_XPATH);
|
||||
if (null != result) {
|
||||
String msg = "QUERY FAILED: xpath=" + result + " request=" + request + " response=" + response;
|
||||
log.error(msg);
|
||||
fail(msg);
|
||||
}
|
||||
}
|
||||
|
||||
private static void addFieldPut(RestTestHarness publisher, String fieldName, int updateTimeoutSecs) throws Exception {
|
||||
final String content = "{\"type\":\"text\",\"stored\":\"false\"}";
|
||||
String request = "/schema/fields/" + fieldName + "?wt=xml";
|
||||
if (updateTimeoutSecs > 0)
|
||||
request += "&updateTimeoutSecs="+updateTimeoutSecs;
|
||||
String response = publisher.put(request, content);
|
||||
verifySuccess(request, response);
|
||||
}
|
||||
|
||||
private static void addFieldPost(RestTestHarness publisher, String fieldName, int updateTimeoutSecs) throws Exception {
|
||||
final String content = "[{\"name\":\""+fieldName+"\",\"type\":\"text\",\"stored\":\"false\"}]";
|
||||
String request = "/schema/fields/?wt=xml";
|
||||
if (updateTimeoutSecs > 0)
|
||||
request += "&updateTimeoutSecs="+updateTimeoutSecs;
|
||||
String response = publisher.post(request, content);
|
||||
verifySuccess(request, response);
|
||||
}
|
||||
|
||||
private static void addDynamicFieldPut(RestTestHarness publisher, String dynamicFieldPattern, int updateTimeoutSecs) throws Exception {
|
||||
final String content = "{\"type\":\"text\",\"stored\":\"false\"}";
|
||||
String request = "/schema/dynamicfields/" + dynamicFieldPattern + "?wt=xml";
|
||||
if (updateTimeoutSecs > 0)
|
||||
request += "&updateTimeoutSecs="+updateTimeoutSecs;
|
||||
String response = publisher.put(request, content);
|
||||
verifySuccess(request, response);
|
||||
}
|
||||
|
||||
private static void addDynamicFieldPost(RestTestHarness publisher, String dynamicFieldPattern, int updateTimeoutSecs) throws Exception {
|
||||
final String content = "[{\"name\":\""+dynamicFieldPattern+"\",\"type\":\"text\",\"stored\":\"false\"}]";
|
||||
String request = "/schema/dynamicfields/?wt=xml";
|
||||
if (updateTimeoutSecs > 0)
|
||||
request += "&updateTimeoutSecs="+updateTimeoutSecs;
|
||||
String response = publisher.post(request, content);
|
||||
verifySuccess(request, response);
|
||||
}
|
||||
|
||||
private static void copyField(RestTestHarness publisher, String source, String dest, int updateTimeoutSecs) throws Exception {
|
||||
final String content = "[{\"source\":\""+source+"\",\"dest\":[\""+dest+"\"]}]";
|
||||
String request = "/schema/copyfields/?wt=xml";
|
||||
if (updateTimeoutSecs > 0)
|
||||
request += "&updateTimeoutSecs="+updateTimeoutSecs;
|
||||
String response = publisher.post(request, content);
|
||||
verifySuccess(request, response);
|
||||
}
|
||||
|
||||
private static void addFieldTypePut(RestTestHarness publisher, String typeName, int updateTimeoutSecs) throws Exception {
|
||||
final String content = "{\"class\":\""+RANDOMIZED_NUMERIC_FIELDTYPES.get(Integer.class)+"\"}";
|
||||
String request = "/schema/fieldtypes/" + typeName + "?wt=xml";
|
||||
if (updateTimeoutSecs > 0)
|
||||
request += "&updateTimeoutSecs="+updateTimeoutSecs;
|
||||
String response = publisher.put(request, content);
|
||||
verifySuccess(request, response);
|
||||
}
|
||||
|
||||
private static void addFieldTypePost(RestTestHarness publisher, String typeName, int updateTimeoutSecs) throws Exception {
|
||||
final String content = "[{\"name\":\""+typeName+"\",\"class\":\""+RANDOMIZED_NUMERIC_FIELDTYPES.get(Integer.class)+"\"}]";
|
||||
String request = "/schema/fieldtypes/?wt=xml";
|
||||
if (updateTimeoutSecs > 0)
|
||||
request += "&updateTimeoutSecs="+updateTimeoutSecs;
|
||||
String response = publisher.post(request, content);
|
||||
verifySuccess(request, response);
|
||||
}
|
||||
|
||||
private String[] getExpectedFieldResponses(Info info) {
|
||||
String[] expectedAddFields = new String[1 + info.numAddFieldPuts + info.numAddFieldPosts];
|
||||
expectedAddFields[0] = SUCCESS_XPATH;
|
||||
|
||||
for (int i = 0; i < info.numAddFieldPuts; ++i) {
|
||||
String newFieldName = PUT_FIELDNAME + info.fieldNameSuffix + i;
|
||||
expectedAddFields[1 + i]
|
||||
= "/response/arr[@name='fields']/lst/str[@name='name'][.='" + newFieldName + "']";
|
||||
}
|
||||
|
||||
for (int i = 0; i < info.numAddFieldPosts; ++i) {
|
||||
String newFieldName = POST_FIELDNAME + info.fieldNameSuffix + i;
|
||||
expectedAddFields[1 + info.numAddFieldPuts + i]
|
||||
= "/response/arr[@name='fields']/lst/str[@name='name'][.='" + newFieldName + "']";
|
||||
}
|
||||
|
||||
return expectedAddFields;
|
||||
}
|
||||
|
||||
private String[] getExpectedDynamicFieldResponses(Info info) {
|
||||
String[] expectedAddDynamicFields = new String[1 + info.numAddDynamicFieldPuts + info.numAddDynamicFieldPosts];
|
||||
expectedAddDynamicFields[0] = SUCCESS_XPATH;
|
||||
|
||||
for (int i = 0; i < info.numAddDynamicFieldPuts; ++i) {
|
||||
String newDynamicFieldPattern = PUT_DYNAMIC_FIELDNAME + info.fieldNameSuffix + i + "_*";
|
||||
expectedAddDynamicFields[1 + i]
|
||||
= "/response/arr[@name='dynamicFields']/lst/str[@name='name'][.='" + newDynamicFieldPattern + "']";
|
||||
}
|
||||
|
||||
for (int i = 0; i < info.numAddDynamicFieldPosts; ++i) {
|
||||
String newDynamicFieldPattern = POST_DYNAMIC_FIELDNAME + info.fieldNameSuffix + i + "_*";
|
||||
expectedAddDynamicFields[1 + info.numAddDynamicFieldPuts + i]
|
||||
= "/response/arr[@name='dynamicFields']/lst/str[@name='name'][.='" + newDynamicFieldPattern + "']";
|
||||
}
|
||||
|
||||
return expectedAddDynamicFields;
|
||||
}
|
||||
|
||||
private String[] getExpectedCopyFieldResponses(Info info) {
|
||||
ArrayList<String> expectedCopyFields = new ArrayList<>();
|
||||
expectedCopyFields.add(SUCCESS_XPATH);
|
||||
for (CopyFieldInfo cpi : info.copyFields) {
|
||||
String expectedSourceName = cpi.getSourceField();
|
||||
expectedCopyFields.add
|
||||
("/response/arr[@name='copyFields']/lst/str[@name='source'][.='" + expectedSourceName + "']");
|
||||
String expectedDestName = cpi.getDestField();
|
||||
expectedCopyFields.add
|
||||
("/response/arr[@name='copyFields']/lst/str[@name='dest'][.='" + expectedDestName + "']");
|
||||
}
|
||||
|
||||
return expectedCopyFields.toArray(new String[expectedCopyFields.size()]);
|
||||
}
|
||||
|
||||
private String[] getExpectedFieldTypeResponses(Info info) {
|
||||
String[] expectedAddFieldTypes = new String[1 + info.numAddFieldTypePuts + info.numAddFieldTypePosts];
|
||||
expectedAddFieldTypes[0] = SUCCESS_XPATH;
|
||||
|
||||
for (int i = 0; i < info.numAddFieldTypePuts; ++i) {
|
||||
String newFieldTypeName = PUT_FIELDTYPE + info.fieldNameSuffix + i;
|
||||
expectedAddFieldTypes[1 + i]
|
||||
= "/response/arr[@name='fieldTypes']/lst/str[@name='name'][.='" + newFieldTypeName + "']";
|
||||
}
|
||||
|
||||
for (int i = 0; i < info.numAddFieldTypePosts; ++i) {
|
||||
String newFieldTypeName = POST_FIELDTYPE + info.fieldNameSuffix + i;
|
||||
expectedAddFieldTypes[1 + info.numAddFieldTypePuts + i]
|
||||
= "/response/arr[@name='fieldTypes']/lst/str[@name='name'][.='" + newFieldTypeName + "']";
|
||||
}
|
||||
|
||||
return expectedAddFieldTypes;
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
@ShardsFixed(num = 8)
|
||||
public void test() throws Exception {
|
||||
verifyWaitForSchemaUpdateToPropagate();
|
||||
setupHarnesses();
|
||||
concurrentOperationsTest();
|
||||
schemaLockTest();
|
||||
}
|
||||
|
||||
private static class Info {
|
||||
int numAddFieldPuts = 0;
|
||||
int numAddFieldPosts = 0;
|
||||
int numAddDynamicFieldPuts = 0;
|
||||
int numAddDynamicFieldPosts = 0;
|
||||
int numAddFieldTypePuts = 0;
|
||||
int numAddFieldTypePosts = 0;
|
||||
public String fieldNameSuffix;
|
||||
List<CopyFieldInfo> copyFields = new ArrayList<>();
|
||||
|
||||
public Info(String fieldNameSuffix) {
|
||||
this.fieldNameSuffix = fieldNameSuffix;
|
||||
}
|
||||
}
|
||||
|
||||
private enum Operation {
|
||||
PUT_AddField {
|
||||
@Override public void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception {
|
||||
String fieldname = PUT_FIELDNAME + info.numAddFieldPuts++;
|
||||
addFieldPut(publisher, fieldname, 15);
|
||||
}
|
||||
},
|
||||
POST_AddField {
|
||||
@Override public void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception {
|
||||
String fieldname = POST_FIELDNAME + info.numAddFieldPosts++;
|
||||
addFieldPost(publisher, fieldname, 15);
|
||||
}
|
||||
},
|
||||
PUT_AddDynamicField {
|
||||
@Override public void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception {
|
||||
addDynamicFieldPut(publisher, PUT_DYNAMIC_FIELDNAME + info.numAddDynamicFieldPuts++ + "_*", 15);
|
||||
}
|
||||
},
|
||||
POST_AddDynamicField {
|
||||
@Override public void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception {
|
||||
addDynamicFieldPost(publisher, POST_DYNAMIC_FIELDNAME + info.numAddDynamicFieldPosts++ + "_*", 15);
|
||||
}
|
||||
},
|
||||
POST_AddCopyField {
|
||||
@Override public void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception {
|
||||
String sourceField = null;
|
||||
String destField = null;
|
||||
|
||||
int sourceType = random().nextInt(3);
|
||||
if (sourceType == 0) { // existing
|
||||
sourceField = "name";
|
||||
} else if (sourceType == 1) { // newly created
|
||||
sourceField = "copySource" + fieldNum;
|
||||
addFieldPut(publisher, sourceField, 15);
|
||||
} else { // dynamic
|
||||
sourceField = "*_dynamicSource" + fieldNum + "_t";
|
||||
// * only supported if both src and dst use it
|
||||
destField = "*_dynamicDest" + fieldNum + "_t";
|
||||
}
|
||||
|
||||
if (destField == null) {
|
||||
int destType = random().nextInt(2);
|
||||
if (destType == 0) { // existing
|
||||
destField = "title";
|
||||
} else { // newly created
|
||||
destField = "copyDest" + fieldNum;
|
||||
addFieldPut(publisher, destField, 15);
|
||||
}
|
||||
}
|
||||
copyField(publisher, sourceField, destField, 15);
|
||||
info.copyFields.add(new CopyFieldInfo(sourceField, destField));
|
||||
}
|
||||
},
|
||||
PUT_AddFieldType {
|
||||
@Override public void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception {
|
||||
String typeName = PUT_FIELDTYPE + info.numAddFieldTypePuts++;
|
||||
addFieldTypePut(publisher, typeName, 15);
|
||||
}
|
||||
},
|
||||
POST_AddFieldType {
|
||||
@Override public void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception {
|
||||
String typeName = POST_FIELDTYPE + info.numAddFieldTypePosts++;
|
||||
addFieldTypePost(publisher, typeName, 15);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
public abstract void execute(RestTestHarness publisher, int fieldNum, Info info) throws Exception;
|
||||
|
||||
private static final Operation[] VALUES = values();
|
||||
public static Operation randomOperation() {
|
||||
return VALUES[r.nextInt(VALUES.length)];
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyWaitForSchemaUpdateToPropagate() throws Exception {
|
||||
String testCollectionName = "collection1";
|
||||
|
||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
||||
Replica shard1Leader = clusterState.getCollection(testCollectionName).getLeader("shard1");
|
||||
final String coreUrl = (new ZkCoreNodeProps(shard1Leader)).getCoreUrl();
|
||||
assertNotNull(coreUrl);
|
||||
|
||||
RestTestHarness harness = new RestTestHarness(() -> coreUrl.endsWith("/") ? coreUrl.substring(0, coreUrl.length()-1) : coreUrl);
|
||||
try {
|
||||
addFieldTypePut(harness, "fooInt", 15);
|
||||
} finally {
|
||||
harness.close();
|
||||
}
|
||||
|
||||
// go into ZK to get the version of the managed schema after the update
|
||||
SolrZkClient zkClient = cloudClient.getZkStateReader().getZkClient();
|
||||
Stat stat = new Stat();
|
||||
String znodePath = "/configs/conf1/managed-schema";
|
||||
byte[] managedSchemaBytes = zkClient.getData(znodePath, null, stat, false);
|
||||
int schemaZkVersion = stat.getVersion();
|
||||
|
||||
// now loop over all replicas and verify each has the same schema version
|
||||
Replica randomReplicaNotLeader = null;
|
||||
for (Slice slice : clusterState.getCollection(testCollectionName).getActiveSlices()) {
|
||||
for (Replica replica : slice.getReplicas()) {
|
||||
validateZkVersion(replica, schemaZkVersion, 0, false);
|
||||
|
||||
// save a random replica to test zk watcher behavior
|
||||
if (randomReplicaNotLeader == null && !replica.getName().equals(shard1Leader.getName()))
|
||||
randomReplicaNotLeader = replica;
|
||||
}
|
||||
}
|
||||
assertNotNull(randomReplicaNotLeader);
|
||||
|
||||
// now update the data and then verify the znode watcher fires correctly
|
||||
// before an after a zk session expiration (see SOLR-6249)
|
||||
zkClient.setData(znodePath, managedSchemaBytes, schemaZkVersion, false);
|
||||
stat = new Stat();
|
||||
managedSchemaBytes = zkClient.getData(znodePath, null, stat, false);
|
||||
int updatedSchemaZkVersion = stat.getVersion();
|
||||
assertTrue(updatedSchemaZkVersion > schemaZkVersion);
|
||||
validateZkVersion(randomReplicaNotLeader, updatedSchemaZkVersion, 2, true);
|
||||
|
||||
// ok - looks like the watcher fired correctly on the replica
|
||||
// now, expire that replica's zk session and then verify the watcher fires again (after reconnect)
|
||||
JettySolrRunner randomReplicaJetty =
|
||||
getJettyOnPort(getReplicaPort(randomReplicaNotLeader));
|
||||
assertNotNull(randomReplicaJetty);
|
||||
chaosMonkey.expireSession(randomReplicaJetty);
|
||||
|
||||
// update the data again to cause watchers to fire
|
||||
zkClient.setData(znodePath, managedSchemaBytes, updatedSchemaZkVersion, false);
|
||||
stat = new Stat();
|
||||
managedSchemaBytes = zkClient.getData(znodePath, null, stat, false);
|
||||
updatedSchemaZkVersion = stat.getVersion();
|
||||
// give up to 10 secs for the replica to recover after zk session loss and see the update
|
||||
validateZkVersion(randomReplicaNotLeader, updatedSchemaZkVersion, 10, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a GET request to get the zk schema version from a specific replica.
|
||||
*/
|
||||
protected void validateZkVersion(Replica replica, int schemaZkVersion, int waitSecs, boolean retry) throws Exception {
|
||||
final String replicaUrl = (new ZkCoreNodeProps(replica)).getCoreUrl();
|
||||
RestTestHarness testHarness = new RestTestHarness(() -> replicaUrl.endsWith("/") ? replicaUrl.substring(0, replicaUrl.length()-1) : replicaUrl);
|
||||
try {
|
||||
long waitMs = waitSecs * 1000L;
|
||||
if (waitMs > 0) Thread.sleep(waitMs); // wait a moment for the zk watcher to fire
|
||||
|
||||
try {
|
||||
testHarness.validateQuery("/schema/zkversion?wt=xml", "//zkversion=" + schemaZkVersion);
|
||||
} catch (Exception exc) {
|
||||
if (retry) {
|
||||
// brief wait before retrying
|
||||
Thread.sleep(waitMs > 0 ? waitMs : 2000L);
|
||||
|
||||
testHarness.validateQuery("/schema/zkversion?wt=xml", "//zkversion=" + schemaZkVersion);
|
||||
} else {
|
||||
throw exc;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
testHarness.close();
|
||||
}
|
||||
}
|
||||
|
||||
private void concurrentOperationsTest() throws Exception {
|
||||
|
||||
// First, add a bunch of fields and dynamic fields via PUT and POST, as well as copyFields,
|
||||
// but do it fast enough and verify shards' schemas after all of them are added
|
||||
int numFields = 100;
|
||||
Info info = new Info("");
|
||||
|
||||
for (int fieldNum = 0; fieldNum <= numFields ; ++fieldNum) {
|
||||
RestTestHarness publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
|
||||
Operation.randomOperation().execute(publisher, fieldNum, info);
|
||||
}
|
||||
|
||||
String[] expectedAddFields = getExpectedFieldResponses(info);
|
||||
String[] expectedAddDynamicFields = getExpectedDynamicFieldResponses(info);
|
||||
String[] expectedCopyFields = getExpectedCopyFieldResponses(info);
|
||||
String[] expectedAddFieldTypes = getExpectedFieldTypeResponses(info);
|
||||
|
||||
boolean success = false;
|
||||
long maxTimeoutMillis = 100000;
|
||||
long startTime = System.nanoTime();
|
||||
String request = null;
|
||||
String response = null;
|
||||
String result = null;
|
||||
|
||||
while ( ! success
|
||||
&& TimeUnit.MILLISECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) < maxTimeoutMillis) {
|
||||
Thread.sleep(100);
|
||||
|
||||
for (RestTestHarness client : restTestHarnesses) {
|
||||
// verify addFieldTypePuts and addFieldTypePosts
|
||||
request = "/schema/fieldtypes?wt=xml";
|
||||
response = client.query(request);
|
||||
result = BaseTestHarness.validateXPath(response, expectedAddFieldTypes);
|
||||
if (result != null) {
|
||||
break;
|
||||
}
|
||||
|
||||
// verify addFieldPuts and addFieldPosts
|
||||
request = "/schema/fields?wt=xml";
|
||||
response = client.query(request);
|
||||
result = BaseTestHarness.validateXPath(response, expectedAddFields);
|
||||
if (result != null) {
|
||||
break;
|
||||
}
|
||||
|
||||
// verify addDynamicFieldPuts and addDynamicFieldPosts
|
||||
request = "/schema/dynamicfields?wt=xml";
|
||||
response = client.query(request);
|
||||
result = BaseTestHarness.validateXPath(response, expectedAddDynamicFields);
|
||||
if (result != null) {
|
||||
break;
|
||||
}
|
||||
|
||||
// verify copyFields
|
||||
request = "/schema/copyfields?wt=xml";
|
||||
response = client.query(request);
|
||||
result = BaseTestHarness.validateXPath(response, expectedCopyFields);
|
||||
if (result != null) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
success = (result == null);
|
||||
}
|
||||
if ( ! success) {
|
||||
String msg = "QUERY FAILED: xpath=" + result + " request=" + request + " response=" + response;
|
||||
log.error(msg);
|
||||
fail(msg);
|
||||
}
|
||||
}
|
||||
|
||||
private abstract class PutPostThread extends Thread {
|
||||
RestTestHarness harness;
|
||||
Info info;
|
||||
public String fieldName;
|
||||
|
||||
public PutPostThread(RestTestHarness harness, Info info) {
|
||||
this.harness = harness;
|
||||
this.info = info;
|
||||
}
|
||||
|
||||
public abstract void run();
|
||||
}
|
||||
|
||||
private class PutFieldThread extends PutPostThread {
|
||||
public PutFieldThread(RestTestHarness harness, Info info) {
|
||||
super(harness, info);
|
||||
fieldName = PUT_FIELDNAME + "Thread" + info.numAddFieldPuts++;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
// don't have the client side wait for all replicas to see the update or that defeats the purpose
|
||||
// of testing the locking support on the server-side
|
||||
addFieldPut(harness, fieldName, -1);
|
||||
} catch (Exception e) {
|
||||
// log.error("###ACTUAL FAILURE!");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PostFieldThread extends PutPostThread {
|
||||
public PostFieldThread(RestTestHarness harness, Info info) {
|
||||
super(harness, info);
|
||||
fieldName = POST_FIELDNAME + "Thread" + info.numAddFieldPosts++;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
addFieldPost(harness, fieldName, -1);
|
||||
} catch (Exception e) {
|
||||
// log.error("###ACTUAL FAILURE!");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PutFieldTypeThread extends PutPostThread {
|
||||
public PutFieldTypeThread(RestTestHarness harness, Info info) {
|
||||
super(harness, info);
|
||||
fieldName = PUT_FIELDTYPE + "Thread" + info.numAddFieldTypePuts++;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
addFieldTypePut(harness, fieldName, -1);
|
||||
} catch (Exception e) {
|
||||
// log.error("###ACTUAL FAILURE!");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PostFieldTypeThread extends PutPostThread {
|
||||
public PostFieldTypeThread(RestTestHarness harness, Info info) {
|
||||
super(harness, info);
|
||||
fieldName = POST_FIELDTYPE + "Thread" + info.numAddFieldTypePosts++;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
addFieldTypePost(harness, fieldName, -1);
|
||||
} catch (Exception e) {
|
||||
// log.error("###ACTUAL FAILURE!");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PutDynamicFieldThread extends PutPostThread {
|
||||
public PutDynamicFieldThread(RestTestHarness harness, Info info) {
|
||||
super(harness, info);
|
||||
fieldName = PUT_FIELDNAME + "Thread" + info.numAddFieldPuts++;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
addFieldPut(harness, fieldName, -1);
|
||||
} catch (Exception e) {
|
||||
// log.error("###ACTUAL FAILURE!");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class PostDynamicFieldThread extends PutPostThread {
|
||||
public PostDynamicFieldThread(RestTestHarness harness, Info info) {
|
||||
super(harness, info);
|
||||
fieldName = POST_FIELDNAME + "Thread" + info.numAddFieldPosts++;
|
||||
}
|
||||
public void run() {
|
||||
try {
|
||||
addFieldPost(harness, fieldName, -1);
|
||||
} catch (Exception e) {
|
||||
// log.error("###ACTUAL FAILURE!");
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void schemaLockTest() throws Exception {
|
||||
|
||||
// First, add a bunch of fields via PUT and POST, as well as copyFields,
|
||||
// but do it fast enough and verify shards' schemas after all of them are added
|
||||
int numFields = 5;
|
||||
Info info = new Info("Thread");
|
||||
|
||||
for (int i = 0; i <= numFields ; ++i) {
|
||||
// System.err.println("###ITERATION: " + i);
|
||||
RestTestHarness publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
|
||||
PostFieldThread postFieldThread = new PostFieldThread(publisher, info);
|
||||
postFieldThread.start();
|
||||
|
||||
publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
|
||||
PutFieldThread putFieldThread = new PutFieldThread(publisher, info);
|
||||
putFieldThread.start();
|
||||
|
||||
publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
|
||||
PostDynamicFieldThread postDynamicFieldThread = new PostDynamicFieldThread(publisher, info);
|
||||
postDynamicFieldThread.start();
|
||||
|
||||
publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
|
||||
PutDynamicFieldThread putDynamicFieldThread = new PutDynamicFieldThread(publisher, info);
|
||||
putDynamicFieldThread.start();
|
||||
|
||||
publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
|
||||
PostFieldTypeThread postFieldTypeThread = new PostFieldTypeThread(publisher, info);
|
||||
postFieldTypeThread.start();
|
||||
|
||||
publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
|
||||
PutFieldTypeThread putFieldTypeThread = new PutFieldTypeThread(publisher, info);
|
||||
putFieldTypeThread.start();
|
||||
|
||||
postFieldThread.join();
|
||||
putFieldThread.join();
|
||||
postDynamicFieldThread.join();
|
||||
putDynamicFieldThread.join();
|
||||
postFieldTypeThread.join();
|
||||
putFieldTypeThread.join();
|
||||
|
||||
String[] expectedAddFields = getExpectedFieldResponses(info);
|
||||
String[] expectedAddFieldTypes = getExpectedFieldTypeResponses(info);
|
||||
String[] expectedAddDynamicFields = getExpectedDynamicFieldResponses(info);
|
||||
|
||||
boolean success = false;
|
||||
long maxTimeoutMillis = 100000;
|
||||
long startTime = System.nanoTime();
|
||||
String request = null;
|
||||
String response = null;
|
||||
String result = null;
|
||||
|
||||
while ( ! success
|
||||
&& TimeUnit.MILLISECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) < maxTimeoutMillis) {
|
||||
Thread.sleep(10);
|
||||
|
||||
// int j = 0;
|
||||
for (RestTestHarness client : restTestHarnesses) {
|
||||
// System.err.println("###CHECKING HARNESS: " + j++ + " for iteration: " + i);
|
||||
|
||||
// verify addFieldPuts and addFieldPosts
|
||||
request = "/schema/fields?wt=xml";
|
||||
response = client.query(request);
|
||||
//System.err.println("###RESPONSE: " + response);
|
||||
result = BaseTestHarness.validateXPath(response, expectedAddFields);
|
||||
|
||||
if (result != null) {
|
||||
// System.err.println("###FAILURE!");
|
||||
break;
|
||||
}
|
||||
|
||||
// verify addDynamicFieldPuts and addDynamicFieldPosts
|
||||
request = "/schema/dynamicfields?wt=xml";
|
||||
response = client.query(request);
|
||||
//System.err.println("###RESPONSE: " + response);
|
||||
result = BaseTestHarness.validateXPath(response, expectedAddDynamicFields);
|
||||
|
||||
if (result != null) {
|
||||
// System.err.println("###FAILURE!");
|
||||
break;
|
||||
}
|
||||
|
||||
request = "/schema/fieldtypes?wt=xml";
|
||||
response = client.query(request);
|
||||
//System.err.println("###RESPONSE: " + response);
|
||||
result = BaseTestHarness.validateXPath(response, expectedAddFieldTypes);
|
||||
|
||||
if (result != null) {
|
||||
// System.err.println("###FAILURE!");
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
success = (result == null);
|
||||
}
|
||||
if ( ! success) {
|
||||
String msg = "QUERY FAILED: xpath=" + result + " request=" + request + " response=" + response;
|
||||
log.error(msg);
|
||||
fail(msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class CopyFieldInfo {
|
||||
private String sourceField;
|
||||
private String destField;
|
||||
|
||||
public CopyFieldInfo(String sourceField, String destField) {
|
||||
this.sourceField = sourceField;
|
||||
this.destField = destField;
|
||||
}
|
||||
|
||||
public String getSourceField() { return sourceField; }
|
||||
public String getDestField() { return destField; }
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue