mirror of https://github.com/apache/lucene.git
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
06cb145f12
|
@ -38,6 +38,11 @@ Upgrading from Solr 6.x
|
|||
HttpSolrClient#setMaxTotalConnections have been removed. These now default very
|
||||
high and can only be changed via param when creating an HttpClient instance.
|
||||
|
||||
Bug Fixes
|
||||
----------------------
|
||||
* SOLR-9262: Connection and read timeouts are being ignored by UpdateShardHandler after SOLR-4509.
|
||||
(Mark Miller, shalin)
|
||||
|
||||
Optimizations
|
||||
----------------------
|
||||
|
||||
|
@ -108,6 +113,9 @@ Optimizations
|
|||
|
||||
* SOLR-9219: Make hdfs blockcache read buffer sizes configurable and improve cache concurrency. (Mark Miller)
|
||||
|
||||
* SOLR-9264: Optimize ZkController.publishAndWaitForDownStates to not read all collection states and
|
||||
watch relevant collections instead. (Hrishikesh Gadre, shalin)
|
||||
|
||||
Other Changes
|
||||
----------------------
|
||||
|
||||
|
@ -120,6 +128,8 @@ Other Changes
|
|||
* SOLR-9076: Update to Hadoop 2.7.2
|
||||
(Mark Miller, Gregory Chanan)
|
||||
|
||||
* SOLR-8787: TestAuthenticationFramework should not extend TestMiniSolrCloudCluster. (Trey Cahill via shalin)
|
||||
|
||||
================== 6.1.0 ==================
|
||||
|
||||
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.
|
||||
|
|
|
@ -37,6 +37,7 @@ import java.util.Map;
|
|||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
|
@ -52,25 +53,7 @@ import org.apache.solr.cloud.overseer.OverseerAction;
|
|||
import org.apache.solr.cloud.overseer.SliceMutator;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.BeforeReconnect;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.ClusterStateUtil;
|
||||
import org.apache.solr.common.cloud.DefaultConnectionStrategy;
|
||||
import org.apache.solr.common.cloud.DefaultZkACLProvider;
|
||||
import org.apache.solr.common.cloud.DefaultZkCredentialsProvider;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.OnReconnect;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkACLProvider;
|
||||
import org.apache.solr.common.cloud.ZkCmdExecutor;
|
||||
import org.apache.solr.common.cloud.ZkConfigManager;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkCredentialsProvider;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.cloud.ZooKeeperException;
|
||||
import org.apache.solr.common.cloud.*;
|
||||
import org.apache.solr.common.params.CollectionParams;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
|
@ -747,34 +730,35 @@ public final class ZkController {
|
|||
|
||||
publishNodeAsDown(getNodeName());
|
||||
|
||||
// now wait till the updates are in our state
|
||||
long now = System.nanoTime();
|
||||
long timeout = now + TimeUnit.NANOSECONDS.convert(WAIT_DOWN_STATES_TIMEOUT_SECONDS, TimeUnit.SECONDS);
|
||||
Set<String> collectionsWithLocalReplica = ConcurrentHashMap.newKeySet();
|
||||
for (SolrCore core : cc.getCores()) {
|
||||
collectionsWithLocalReplica.add(core.getCoreDescriptor().getCloudDescriptor().getCollectionName());
|
||||
}
|
||||
|
||||
while (System.nanoTime() < timeout) {
|
||||
boolean foundStates = true;
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
Map<String, DocCollection> collections = clusterState.getCollectionsMap();
|
||||
for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
|
||||
DocCollection collection = entry.getValue();
|
||||
Collection<Slice> slices = collection.getSlices();
|
||||
for (Slice slice : slices) {
|
||||
Collection<Replica> replicas = slice.getReplicas();
|
||||
for (Replica replica : replicas) {
|
||||
if (getNodeName().equals(replica.getNodeName()) && replica.getState() != Replica.State.DOWN) {
|
||||
CountDownLatch latch = new CountDownLatch(collectionsWithLocalReplica.size());
|
||||
for (String collectionWithLocalReplica : collectionsWithLocalReplica) {
|
||||
zkStateReader.registerCollectionStateWatcher(collectionWithLocalReplica, (liveNodes, collectionState) -> {
|
||||
boolean foundStates = true;
|
||||
for (SolrCore core : cc.getCores()) {
|
||||
if (core.getCoreDescriptor().getCloudDescriptor().getCollectionName().equals(collectionWithLocalReplica)) {
|
||||
Replica replica = collectionState.getReplica(core.getCoreDescriptor().getCloudDescriptor().getCoreNodeName());
|
||||
if (replica.getState() != Replica.State.DOWN) {
|
||||
foundStates = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Thread.sleep(1000);
|
||||
if (foundStates) {
|
||||
return;
|
||||
}
|
||||
if (foundStates && collectionsWithLocalReplica.remove(collectionWithLocalReplica)) {
|
||||
latch.countDown();
|
||||
}
|
||||
return foundStates;
|
||||
});
|
||||
}
|
||||
|
||||
log.warn("Timed out waiting to see all nodes published as DOWN in our cluster state.");
|
||||
boolean allPublishedDown = latch.await(WAIT_DOWN_STATES_TIMEOUT_SECONDS, TimeUnit.SECONDS);
|
||||
if (!allPublishedDown) {
|
||||
log.warn("Timed out waiting to see all nodes published as DOWN in our cluster state.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -60,8 +60,19 @@ public class UpdateShardHandler {
|
|||
}
|
||||
|
||||
ModifiableSolrParams clientParams = new ModifiableSolrParams();
|
||||
log.info("Creating UpdateShardHandler HTTP client with params: {}", clientParams);
|
||||
if (cfg != null) {
|
||||
clientParams.set(HttpClientUtil.PROP_SO_TIMEOUT, cfg.getDistributedSocketTimeout());
|
||||
clientParams.set(HttpClientUtil.PROP_CONNECTION_TIMEOUT, cfg.getDistributedConnectionTimeout());
|
||||
}
|
||||
client = HttpClientUtil.createClient(clientParams, clientConnectionManager);
|
||||
|
||||
// following is done only for logging complete configuration.
|
||||
// The maxConnections and maxConnectionsPerHost have already been specified on the connection manager
|
||||
if (cfg != null) {
|
||||
clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS, cfg.getMaxUpdateConnections());
|
||||
clientParams.set(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, cfg.getMaxUpdateConnectionsPerHost());
|
||||
}
|
||||
log.info("Created UpdateShardHandler HTTP client with params: {}", clientParams);
|
||||
}
|
||||
|
||||
public HttpClient getHttpClient() {
|
||||
|
|
|
@ -16,8 +16,11 @@
|
|||
*/
|
||||
package org.apache.solr.cloud;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.servlet.FilterChain;
|
||||
|
@ -30,13 +33,27 @@ import org.apache.http.HttpException;
|
|||
import org.apache.http.HttpRequest;
|
||||
import org.apache.http.HttpRequestInterceptor;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
import org.apache.lucene.index.TieredMergePolicy;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.embedded.JettyConfig;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.SolrHttpClientBuilder;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.client.solrj.response.RequestStatusState;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.core.CoreDescriptor;
|
||||
import org.apache.solr.index.TieredMergePolicyFactory;
|
||||
import org.apache.solr.security.AuthenticationPlugin;
|
||||
import org.apache.solr.security.HttpClientBuilderPlugin;
|
||||
import org.apache.solr.util.RevertDefaultThreadHandlerRule;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.ClassRule;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
|
@ -52,15 +69,12 @@ import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
|
|||
*/
|
||||
@LuceneTestCase.Slow
|
||||
@SuppressSysoutChecks(bugUrl = "Solr logs to JUL")
|
||||
public class TestAuthenticationFramework extends TestMiniSolrCloudCluster {
|
||||
public class TestAuthenticationFramework extends LuceneTestCase {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public TestAuthenticationFramework () {
|
||||
NUM_SERVERS = 5;
|
||||
NUM_SHARDS = 2;
|
||||
REPLICATION_FACTOR = 2;
|
||||
}
|
||||
private int NUM_SERVERS = 5;
|
||||
private int NUM_SHARDS = 2;
|
||||
private int REPLICATION_FACTOR = 2;
|
||||
|
||||
static String requestUsername = MockAuthenticationPlugin.expectedUsername;
|
||||
static String requestPassword = MockAuthenticationPlugin.expectedPassword;
|
||||
|
@ -74,7 +88,7 @@ public class TestAuthenticationFramework extends TestMiniSolrCloudCluster {
|
|||
new SystemPropertiesRestoreRule()).around(
|
||||
new RevertDefaultThreadHandlerRule());
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
setupAuthenticationPlugin();
|
||||
super.setUp();
|
||||
|
@ -84,38 +98,135 @@ public class TestAuthenticationFramework extends TestMiniSolrCloudCluster {
|
|||
System.setProperty("authenticationPlugin", "org.apache.solr.cloud.TestAuthenticationFramework$MockAuthenticationPlugin");
|
||||
MockAuthenticationPlugin.expectedUsername = null;
|
||||
MockAuthenticationPlugin.expectedPassword = null;
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasics() throws Exception {
|
||||
|
||||
// Should pass
|
||||
testCollectionCreateSearchDelete();
|
||||
|
||||
MockAuthenticationPlugin.expectedUsername = "solr";
|
||||
MockAuthenticationPlugin.expectedPassword = "s0lrRocks";
|
||||
|
||||
// Should fail with 401
|
||||
MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
|
||||
try {
|
||||
testCollectionCreateSearchDelete();
|
||||
fail("Should've returned a 401 error");
|
||||
} catch (Exception ex) {
|
||||
if (!ex.getMessage().contains("Error 401")) {
|
||||
// Should pass
|
||||
collectionCreateSearchDelete(miniCluster);
|
||||
|
||||
MockAuthenticationPlugin.expectedUsername = "solr";
|
||||
MockAuthenticationPlugin.expectedPassword = "s0lrRocks";
|
||||
|
||||
// Should fail with 401
|
||||
try {
|
||||
collectionCreateSearchDelete(miniCluster);
|
||||
fail("Should've returned a 401 error");
|
||||
} catch (Exception ex) {
|
||||
if (!ex.getMessage().contains("Error 401")) {
|
||||
fail("Should've returned a 401 error");
|
||||
}
|
||||
} finally {
|
||||
MockAuthenticationPlugin.expectedUsername = null;
|
||||
MockAuthenticationPlugin.expectedPassword = null;
|
||||
}
|
||||
} finally {
|
||||
MockAuthenticationPlugin.expectedUsername = null;
|
||||
MockAuthenticationPlugin.expectedPassword = null;
|
||||
miniCluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
System.clearProperty("authenticationPlugin");
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
private MiniSolrCloudCluster createMiniSolrCloudCluster() throws Exception {
|
||||
JettyConfig.Builder jettyConfig = JettyConfig.builder();
|
||||
jettyConfig.waitForLoadingCoresToFinish(null);
|
||||
return new MiniSolrCloudCluster(NUM_SERVERS, createTempDir(), jettyConfig.build());
|
||||
}
|
||||
|
||||
private void createCollection(MiniSolrCloudCluster miniCluster, String collectionName, String asyncId)
|
||||
throws Exception {
|
||||
String configName = "solrCloudCollectionConfig";
|
||||
File configDir = new File(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
|
||||
miniCluster.uploadConfigDir(configDir, configName);
|
||||
|
||||
final boolean persistIndex = random().nextBoolean();
|
||||
Map<String, String> collectionProperties = new HashMap<>();
|
||||
|
||||
collectionProperties.putIfAbsent(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
|
||||
collectionProperties.putIfAbsent("solr.tests.maxBufferedDocs", "100000");
|
||||
collectionProperties.putIfAbsent("solr.tests.ramBufferSizeMB", "100");
|
||||
// use non-test classes so RandomizedRunner isn't necessary
|
||||
if (random().nextBoolean()) {
|
||||
collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICY, TieredMergePolicy.class.getName());
|
||||
collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICY, "true");
|
||||
collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICYFACTORY, "false");
|
||||
} else {
|
||||
collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_MERGEPOLICYFACTORY, TieredMergePolicyFactory.class.getName());
|
||||
collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICYFACTORY, "true");
|
||||
collectionProperties.putIfAbsent(SolrTestCaseJ4.SYSTEM_PROPERTY_SOLR_TESTS_USEMERGEPOLICY, "false");
|
||||
}
|
||||
collectionProperties.putIfAbsent("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
|
||||
collectionProperties.putIfAbsent("solr.directoryFactory", (persistIndex ? "solr.StandardDirectoryFactory" : "solr.RAMDirectoryFactory"));
|
||||
|
||||
miniCluster.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName, null, asyncId, collectionProperties);
|
||||
}
|
||||
|
||||
public void collectionCreateSearchDelete(MiniSolrCloudCluster miniCluster) throws Exception {
|
||||
|
||||
final String collectionName = "testcollection";
|
||||
|
||||
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
|
||||
|
||||
assertNotNull(miniCluster.getZkServer());
|
||||
List<JettySolrRunner> jettys = miniCluster.getJettySolrRunners();
|
||||
assertEquals(NUM_SERVERS, jettys.size());
|
||||
for (JettySolrRunner jetty : jettys) {
|
||||
assertTrue(jetty.isRunning());
|
||||
}
|
||||
|
||||
// create collection
|
||||
log.info("#### Creating a collection");
|
||||
final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
||||
createCollection(miniCluster, collectionName, asyncId);
|
||||
if (asyncId != null) {
|
||||
final RequestStatusState state = AbstractFullDistribZkTestBase.getRequestStateAfterCompletion(asyncId, 330,
|
||||
cloudSolrClient);
|
||||
assertSame("did not see async createCollection completion", RequestStatusState.COMPLETED, state);
|
||||
}
|
||||
|
||||
ZkStateReader zkStateReader = miniCluster.getSolrClient().getZkStateReader();
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
// modify/query collection
|
||||
log.info("#### updating a querying collection");
|
||||
cloudSolrClient.setDefaultCollection(collectionName);
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.setField("id", "1");
|
||||
cloudSolrClient.add(doc);
|
||||
cloudSolrClient.commit();
|
||||
SolrQuery query = new SolrQuery();
|
||||
query.setQuery("*:*");
|
||||
QueryResponse rsp = cloudSolrClient.query(query);
|
||||
assertEquals(1, rsp.getResults().getNumFound());
|
||||
|
||||
// delete the collection we created earlier
|
||||
miniCluster.deleteCollection(collectionName);
|
||||
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
// create it again
|
||||
String asyncId2 = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
||||
createCollection(miniCluster, collectionName, asyncId2);
|
||||
if (asyncId2 != null) {
|
||||
final RequestStatusState state = AbstractFullDistribZkTestBase.getRequestStateAfterCompletion(asyncId2, 330,
|
||||
cloudSolrClient);
|
||||
assertSame("did not see async createCollection completion", RequestStatusState.COMPLETED, state);
|
||||
}
|
||||
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
|
||||
// check that there's no left-over state
|
||||
assertEquals(0, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
cloudSolrClient.add(doc);
|
||||
cloudSolrClient.commit();
|
||||
assertEquals(1, cloudSolrClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
}
|
||||
|
||||
public static class MockAuthenticationPlugin extends AuthenticationPlugin implements HttpClientBuilderPlugin {
|
||||
public static String expectedUsername;
|
||||
public static String expectedPassword;
|
||||
|
|
|
@ -221,10 +221,6 @@ public class HttpClientUtil {
|
|||
logger.debug("Creating new http client, config:" + config);
|
||||
}
|
||||
|
||||
if (params.get(PROP_SO_TIMEOUT) != null || params.get(PROP_CONNECTION_TIMEOUT) != null) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "The socket connect and read timeout cannot be set here and must be set");
|
||||
}
|
||||
|
||||
cm.setMaxTotal(params.getInt(HttpClientUtil.PROP_MAX_CONNECTIONS, 10000));
|
||||
cm.setDefaultMaxPerRoute(params.getInt(HttpClientUtil.PROP_MAX_CONNECTIONS_PER_HOST, 10000));
|
||||
cm.setValidateAfterInactivity(Integer.getInteger(VALIDATE_AFTER_INACTIVITY, VALIDATE_AFTER_INACTIVITY_DEFAULT));
|
||||
|
@ -261,7 +257,7 @@ public class HttpClientUtil {
|
|||
newHttpClientBuilder = newHttpClientBuilder.setKeepAliveStrategy(keepAliveStrat)
|
||||
.evictIdleConnections((long) Integer.getInteger(EVICT_IDLE_CONNECTIONS, EVICT_IDLE_CONNECTIONS_DEFAULT), TimeUnit.MILLISECONDS);
|
||||
|
||||
HttpClientBuilder builder = setupBuilder(newHttpClientBuilder, params == null ? new ModifiableSolrParams() : params);
|
||||
HttpClientBuilder builder = setupBuilder(newHttpClientBuilder, params);
|
||||
|
||||
HttpClient httpClient = builder.setConnectionManager(cm).build();
|
||||
|
||||
|
|
Loading…
Reference in New Issue