mirror of https://github.com/apache/lucene.git
SOLR-6895: Deprecate SolrServer classes and replace with SolrClient
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1648697 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9327c28874
commit
ef809a0f10
|
@ -132,6 +132,9 @@ Upgrading from Solr 4.x
|
|||
discovery are now resolved relative to SOLR_HOME, rather than cwd. See
|
||||
SOLR-6718.
|
||||
|
||||
* SolrServer and associated classes have been deprecated. Applications using
|
||||
SolrJ should use the equivalent SolrClient classes instead.
|
||||
|
||||
Detailed Change List
|
||||
----------------------
|
||||
|
||||
|
@ -182,7 +185,7 @@ New Features
|
|||
* SOLR-6485: ReplicationHandler should have an option to throttle the speed of
|
||||
replication (Varun Thacker, NOble Paul)
|
||||
|
||||
* SOLR-6543: Give HttpSolrServer the ability to send PUT requests (Gregory Chanan)
|
||||
* SOLR-6543: Give HttpSolrClient the ability to send PUT requests (Gregory Chanan)
|
||||
|
||||
* SOLR-5986: Don't allow runaway queries from harming Solr cluster health or search
|
||||
performance (Anshum Gupta, Steve Rowe, Robert Muir)
|
||||
|
@ -208,7 +211,7 @@ New Features
|
|||
* SOLR-6617: /update/json/docs path will use fully qualified node names by default
|
||||
(Noble Paul)
|
||||
|
||||
* SOLR-4715: Add CloudSolrServer constructors which accept a HttpClient instance.
|
||||
* SOLR-4715: Add CloudSolrClient constructors which accept a HttpClient instance.
|
||||
(Hardik Upadhyay, Shawn Heisey, shalin)
|
||||
|
||||
* SOLR-5992: add "removeregex" as an atomic update operation
|
||||
|
@ -290,7 +293,7 @@ Bug Fixes
|
|||
NOTE: This does NOT fixed for the (deprecated) facet.date idiom, use facet.range
|
||||
instead. (Erick Erickson, Zaccheo Bagnati, Ronald Matamoros, Vamsee Yalargadda)
|
||||
|
||||
* SOLR-6457: LBHttpSolrServer: ArrayIndexOutOfBoundsException risk if counter overflows
|
||||
* SOLR-6457: LBHttpSolrClient: ArrayIndexOutOfBoundsException risk if counter overflows
|
||||
(longkey via Noble Paul)
|
||||
|
||||
* SOLR-6499: Log warning about multiple update request handlers
|
||||
|
@ -376,7 +379,7 @@ Bug Fixes
|
|||
Optimizations
|
||||
----------------------
|
||||
|
||||
* SOLR-6603: LBHttpSolrServer - lazily allocate skipped-zombie-servers list.
|
||||
* SOLR-6603: LBHttpSolrClient - lazily allocate skipped-zombie-servers list.
|
||||
(Christine Poerschke via shalin)
|
||||
|
||||
* SOLR-6554: Speed up overseer operations avoiding cluster state reads from
|
||||
|
@ -420,8 +423,8 @@ Other Changes
|
|||
* LUCENE-5650: Tests can no longer write to CWD. Update log dir is now made relative
|
||||
to the instance dir if it is not an absolute path. (Ryan Ernst, Dawid Weiss)
|
||||
|
||||
* SOLR-6390: Remove unnecessary checked exception for CloudSolrServer
|
||||
constructors, improve javadocs for CloudSolrServer constructors.
|
||||
* SOLR-6390: Remove unnecessary checked exception for CloudSolrClient
|
||||
constructors, improve javadocs for CloudSolrClient constructors.
|
||||
(Steve Davids via Shawn Heisey)
|
||||
|
||||
* LUCENE-5901: Replaced all occurences of LUCENE_CURRENT with LATEST for luceneMatchVersion.
|
||||
|
@ -457,7 +460,7 @@ Other Changes
|
|||
* SOLR-6597: SolrIndexConfig parameter in one of the SolrIndexSearcher constructor has been removed.
|
||||
It was just passed and never used via that constructor. (Anshum Gupta)
|
||||
|
||||
* SOLR-5852: Add CloudSolrServer helper method to connect to a ZK ensemble. (Varun Thacker, Furkan KAMACI,
|
||||
* SOLR-5852: Add CloudSolrClient helper method to connect to a ZK ensemble. (Varun Thacker, Furkan KAMACI,
|
||||
Shawn Heisey, Mark Miller, Erick Erickson via shalin)
|
||||
|
||||
* SOLR-6592: Avoid waiting for the leader to see the down state if that leader is not live.
|
||||
|
@ -542,7 +545,7 @@ Other Changes
|
|||
* SOLR-6826: fieldType capitalization is not consistent with the rest of case-sensitive field names.
|
||||
(Alexandre Rafalovitch via Erick Erickson)
|
||||
|
||||
* SOLR-6849: HttpSolrServer.RemoteSolrException reports the URL of the remote
|
||||
* SOLR-6849: HttpSolrClient.RemoteSolrException reports the URL of the remote
|
||||
host where the exception occurred. (Alan Woodward)
|
||||
|
||||
* SOLR-6852: SimplePostTool no longer defaults to collection1 making core/collection/update URL
|
||||
|
@ -584,6 +587,9 @@ Other Changes
|
|||
|
||||
* Fixed a typo in various solrconfig.xml files. (sdumitriu - pull request #120)
|
||||
|
||||
* SOLR-6895: SolrServer classes are renamed to *SolrClient. The existing
|
||||
classes still exist, but are deprecated. (Alan Woodward, Erik Hatcher)
|
||||
|
||||
================== 4.10.3 ==================
|
||||
|
||||
Bug Fixes
|
||||
|
|
|
@ -19,10 +19,10 @@ package org.apache.solr.handler.dataimport;
|
|||
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.XMLResponseParser;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
|
@ -64,7 +64,7 @@ public class SolrEntityProcessor extends EntityProcessorBase {
|
|||
public static final int TIMEOUT_SECS = 5 * 60; // 5 minutes
|
||||
public static final int ROWS_DEFAULT = 50;
|
||||
|
||||
private SolrServer solrServer = null;
|
||||
private SolrClient solrClient = null;
|
||||
private String queryString;
|
||||
private int rows = ROWS_DEFAULT;
|
||||
private String[] filterQueries;
|
||||
|
@ -100,11 +100,11 @@ public class SolrEntityProcessor extends EntityProcessorBase {
|
|||
// (wt="javabin|xml") default is javabin
|
||||
if ("xml".equals(context.getResolvedEntityAttribute(CommonParams.WT))) {
|
||||
// TODO: it doesn't matter for this impl when passing a client currently, but we should close this!
|
||||
solrServer = new HttpSolrServer(url.toExternalForm(), client, new XMLResponseParser());
|
||||
solrClient = new HttpSolrClient(url.toExternalForm(), client, new XMLResponseParser());
|
||||
LOG.info("using XMLResponseParser");
|
||||
} else {
|
||||
// TODO: it doesn't matter for this impl when passing a client currently, but we should close this!
|
||||
solrServer = new HttpSolrServer(url.toExternalForm(), client);
|
||||
solrClient = new HttpSolrClient(url.toExternalForm(), client);
|
||||
LOG.info("using BinaryResponseParser");
|
||||
}
|
||||
} catch (MalformedURLException e) {
|
||||
|
@ -184,7 +184,7 @@ public class SolrEntityProcessor extends EntityProcessorBase {
|
|||
|
||||
QueryResponse response = null;
|
||||
try {
|
||||
response = solrServer.query(solrQuery);
|
||||
response = solrClient.query(solrQuery);
|
||||
} catch (SolrServerException e) {
|
||||
if (ABORT.equals(onError)) {
|
||||
wrapAndThrow(SEVERE, e);
|
||||
|
|
|
@ -21,7 +21,7 @@ import java.util.List;
|
|||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.DirectXmlRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
|
@ -67,7 +67,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa
|
|||
params.set("command", "full-import");
|
||||
params.set("clean", "false");
|
||||
req.setParams(params);
|
||||
HttpSolrServer solrServer = new HttpSolrServer(buildUrl(jetty.getLocalPort(), "/solr"));
|
||||
HttpSolrClient solrServer = new HttpSolrClient(buildUrl(jetty.getLocalPort(), "/solr"));
|
||||
solrServer.request(req);
|
||||
ModifiableSolrParams qparams = new ModifiableSolrParams();
|
||||
qparams.add("q", "*:*");
|
||||
|
@ -87,7 +87,7 @@ public class TestContentStreamDataSource extends AbstractDataImportHandlerTestCa
|
|||
"clean", "false", UpdateParams.COMMIT, "false",
|
||||
UpdateParams.COMMIT_WITHIN, "1000");
|
||||
req.setParams(params);
|
||||
HttpSolrServer solrServer = new HttpSolrServer(buildUrl(jetty.getLocalPort(), "/solr"));
|
||||
HttpSolrClient solrServer = new HttpSolrClient(buildUrl(jetty.getLocalPort(), "/solr"));
|
||||
solrServer.request(req);
|
||||
Thread.sleep(100);
|
||||
ModifiableSolrParams queryAll = params("q", "*");
|
||||
|
|
|
@ -27,10 +27,9 @@ import java.util.Map.Entry;
|
|||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -281,7 +280,7 @@ public class TestSolrEntityProcessorEndToEnd extends AbstractDataImportHandlerTe
|
|||
sidl.add(sd);
|
||||
}
|
||||
|
||||
HttpSolrServer solrServer = new HttpSolrServer(getSourceUrl());
|
||||
HttpSolrClient solrServer = new HttpSolrClient(getSourceUrl());
|
||||
try {
|
||||
solrServer.setConnectionTimeout(15000);
|
||||
solrServer.setSoTimeout(30000);
|
||||
|
|
|
@ -33,8 +33,8 @@ import java.util.concurrent.TimeUnit;
|
|||
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.hadoop.MapReduceIndexerTool.Options;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -91,7 +91,7 @@ class GoLive {
|
|||
public Request call() {
|
||||
Request req = new Request();
|
||||
LOG.info("Live merge " + dir.getPath() + " into " + mergeUrl);
|
||||
final HttpSolrServer server = new HttpSolrServer(mergeUrl);
|
||||
final HttpSolrClient server = new HttpSolrClient(mergeUrl);
|
||||
try {
|
||||
CoreAdminRequest.MergeIndexes mergeRequest = new CoreAdminRequest.MergeIndexes();
|
||||
mergeRequest.setCoreName(name);
|
||||
|
@ -149,7 +149,7 @@ class GoLive {
|
|||
try {
|
||||
LOG.info("Committing live merge...");
|
||||
if (options.zkHost != null) {
|
||||
CloudSolrServer server = new CloudSolrServer(options.zkHost);
|
||||
CloudSolrClient server = new CloudSolrClient(options.zkHost);
|
||||
server.setDefaultCollection(options.collection);
|
||||
server.commit();
|
||||
server.shutdown();
|
||||
|
@ -157,7 +157,7 @@ class GoLive {
|
|||
for (List<String> urls : options.shardUrls) {
|
||||
for (String url : urls) {
|
||||
// TODO: we should do these concurrently
|
||||
HttpSolrServer server = new HttpSolrServer(url);
|
||||
HttpSolrClient server = new HttpSolrClient(url);
|
||||
server.commit();
|
||||
server.shutdown();
|
||||
}
|
||||
|
|
|
@ -49,10 +49,10 @@ import org.apache.lucene.util.LuceneTestCase.Slow;
|
|||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrQuery.ORDER;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
|
@ -383,7 +383,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase {
|
|||
MapReduceIndexerTool tool;
|
||||
int res;
|
||||
QueryResponse results;
|
||||
HttpSolrServer server = new HttpSolrServer(cloudJettys.get(0).url);
|
||||
HttpSolrClient server = new HttpSolrClient(cloudJettys.get(0).url);
|
||||
String[] args = new String[]{};
|
||||
|
||||
args = new String[] {
|
||||
|
@ -699,7 +699,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
}
|
||||
|
||||
private SolrDocumentList executeSolrQuery(SolrServer collection, String queryString) throws SolrServerException {
|
||||
private SolrDocumentList executeSolrQuery(SolrClient collection, String queryString) throws SolrServerException {
|
||||
SolrQuery query = new SolrQuery(queryString).setRows(2 * RECORD_COUNT).addSort("id", ORDER.asc);
|
||||
QueryResponse response = collection.query(query);
|
||||
return response.getResults();
|
||||
|
@ -713,7 +713,7 @@ public class MorphlineGoLiveMiniMRTest extends AbstractFullDistribZkTestBase {
|
|||
Collection<Replica> replicas = slice.getReplicas();
|
||||
long found = -1;
|
||||
for (Replica replica : replicas) {
|
||||
HttpSolrServer client = new HttpSolrServer(
|
||||
HttpSolrClient client = new HttpSolrClient(
|
||||
new ZkCoreNodeProps(replica).getCoreUrl());
|
||||
SolrQuery query = new SolrQuery("*:*");
|
||||
query.set("distrib", false);
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
package org.apache.solr.morphlines.solr;
|
||||
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -25,18 +25,18 @@ import org.slf4j.LoggerFactory;
|
|||
* ConcurrentUpdateSolrServer that propagates exceptions up to the submitter of
|
||||
* requests on blockUntilFinished()
|
||||
*/
|
||||
final class SafeConcurrentUpdateSolrServer extends ConcurrentUpdateSolrServer {
|
||||
final class SafeConcurrentUpdateSolrClient extends ConcurrentUpdateSolrClient {
|
||||
|
||||
private Throwable currentException = null;
|
||||
private final Object myLock = new Object();
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(SafeConcurrentUpdateSolrServer.class);
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(SafeConcurrentUpdateSolrClient.class);
|
||||
|
||||
public SafeConcurrentUpdateSolrServer(String solrServerUrl, int queueSize, int threadCount) {
|
||||
public SafeConcurrentUpdateSolrClient(String solrServerUrl, int queueSize, int threadCount) {
|
||||
this(solrServerUrl, null, queueSize, threadCount);
|
||||
}
|
||||
|
||||
public SafeConcurrentUpdateSolrServer(String solrServerUrl, HttpClient client, int queueSize, int threadCount) {
|
||||
public SafeConcurrentUpdateSolrClient(String solrServerUrl, HttpClient client, int queueSize, int threadCount) {
|
||||
super(solrServerUrl, client, queueSize, threadCount);
|
||||
}
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.morphlines.solr;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
|
||||
import org.apache.solr.client.solrj.response.SolrPingResponse;
|
||||
import org.apache.solr.client.solrj.response.UpdateResponse;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A vehicle to load a list of Solr documents into a local or remote {@link org.apache.solr.client.solrj.SolrClient}.
|
||||
*/
|
||||
public class SolrClientDocumentLoader implements DocumentLoader {
|
||||
|
||||
private final SolrClient client; // proxy to local or remote solr server
|
||||
private long numLoadedDocs = 0; // number of documents loaded in the current transaction
|
||||
private final int batchSize;
|
||||
private final List<SolrInputDocument> batch = new ArrayList();
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(SolrClientDocumentLoader.class);
|
||||
|
||||
public SolrClientDocumentLoader(SolrClient client, int batchSize) {
|
||||
if (client == null) {
|
||||
throw new IllegalArgumentException("solr server must not be null");
|
||||
}
|
||||
this.client = client;
|
||||
if (batchSize <= 0) {
|
||||
throw new IllegalArgumentException("batchSize must be a positive number: " + batchSize);
|
||||
}
|
||||
this.batchSize = batchSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beginTransaction() {
|
||||
LOGGER.trace("beginTransaction");
|
||||
batch.clear();
|
||||
numLoadedDocs = 0;
|
||||
if (client instanceof SafeConcurrentUpdateSolrClient) {
|
||||
((SafeConcurrentUpdateSolrClient) client).clearException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void load(SolrInputDocument doc) throws IOException, SolrServerException {
|
||||
LOGGER.trace("load doc: {}", doc);
|
||||
batch.add(doc);
|
||||
if (batch.size() >= batchSize) {
|
||||
loadBatch();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTransaction() throws SolrServerException, IOException {
|
||||
LOGGER.trace("commitTransaction");
|
||||
if (batch.size() > 0) {
|
||||
loadBatch();
|
||||
}
|
||||
if (numLoadedDocs > 0) {
|
||||
if (client instanceof ConcurrentUpdateSolrClient) {
|
||||
((ConcurrentUpdateSolrClient) client).blockUntilFinished();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void loadBatch() throws SolrServerException, IOException {
|
||||
numLoadedDocs += batch.size();
|
||||
try {
|
||||
UpdateResponse rsp = client.add(batch);
|
||||
} finally {
|
||||
batch.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateResponse rollbackTransaction() throws SolrServerException, IOException {
|
||||
LOGGER.trace("rollback");
|
||||
if (!(client instanceof CloudSolrClient)) {
|
||||
return client.rollback();
|
||||
} else {
|
||||
return new UpdateResponse();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
LOGGER.trace("shutdown");
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SolrPingResponse ping() throws SolrServerException, IOException {
|
||||
LOGGER.trace("ping");
|
||||
return client.ping();
|
||||
}
|
||||
|
||||
public SolrClient getSolrClient() {
|
||||
return client;
|
||||
}
|
||||
|
||||
}
|
|
@ -16,13 +16,14 @@
|
|||
*/
|
||||
package org.apache.solr.morphlines.solr;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.io.Files;
|
||||
import com.typesafe.config.Config;
|
||||
import com.typesafe.config.ConfigFactory;
|
||||
import com.typesafe.config.ConfigRenderOptions;
|
||||
import com.typesafe.config.ConfigUtil;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.core.SolrConfig;
|
||||
import org.apache.solr.core.SolrResourceLoader;
|
||||
|
@ -38,12 +39,9 @@ import org.slf4j.LoggerFactory;
|
|||
import org.xml.sax.InputSource;
|
||||
import org.xml.sax.SAXException;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.io.Files;
|
||||
import com.typesafe.config.Config;
|
||||
import com.typesafe.config.ConfigFactory;
|
||||
import com.typesafe.config.ConfigRenderOptions;
|
||||
import com.typesafe.config.ConfigUtil;
|
||||
import javax.xml.parsers.ParserConfigurationException;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Set of configuration parameters that identify the location and schema of a Solr server or
|
||||
|
@ -92,21 +90,21 @@ public class SolrLocator {
|
|||
if (collectionName == null || collectionName.length() == 0) {
|
||||
throw new MorphlineCompilationException("Parameter 'zkHost' requires that you also pass parameter 'collection'", config);
|
||||
}
|
||||
CloudSolrServer cloudSolrServer = new CloudSolrServer(zkHost);
|
||||
cloudSolrServer.setDefaultCollection(collectionName);
|
||||
cloudSolrServer.connect();
|
||||
return new SolrServerDocumentLoader(cloudSolrServer, batchSize);
|
||||
CloudSolrClient cloudSolrClient = new CloudSolrClient(zkHost);
|
||||
cloudSolrClient.setDefaultCollection(collectionName);
|
||||
cloudSolrClient.connect();
|
||||
return new SolrClientDocumentLoader(cloudSolrClient, batchSize);
|
||||
} else {
|
||||
if (solrUrl == null || solrUrl.length() == 0) {
|
||||
throw new MorphlineCompilationException("Missing parameter 'solrUrl'", config);
|
||||
}
|
||||
int solrServerNumThreads = 2;
|
||||
int solrServerQueueLength = solrServerNumThreads;
|
||||
SolrServer server = new SafeConcurrentUpdateSolrServer(solrUrl, solrServerQueueLength, solrServerNumThreads);
|
||||
SolrClient server = new SafeConcurrentUpdateSolrClient(solrUrl, solrServerQueueLength, solrServerNumThreads);
|
||||
// SolrServer server = new HttpSolrServer(solrServerUrl);
|
||||
// SolrServer server = new ConcurrentUpdateSolrServer(solrServerUrl, solrServerQueueLength, solrServerNumThreads);
|
||||
// server.setParser(new XMLResponseParser()); // binary parser is used by default
|
||||
return new SolrServerDocumentLoader(server, batchSize);
|
||||
return new SolrClientDocumentLoader(server, batchSize);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,110 +14,19 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.solr.morphlines.solr;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
|
||||
import org.apache.solr.client.solrj.response.SolrPingResponse;
|
||||
import org.apache.solr.client.solrj.response.UpdateResponse;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
|
||||
/**
|
||||
* A vehicle to load a list of Solr documents into a local or remote {@link SolrServer}.
|
||||
* @deprecated Use {@link org.apache.solr.morphlines.solr.SolrClientDocumentLoader}
|
||||
*/
|
||||
public class SolrServerDocumentLoader implements DocumentLoader {
|
||||
@Deprecated
|
||||
public class SolrServerDocumentLoader extends SolrClientDocumentLoader {
|
||||
|
||||
private final SolrServer server; // proxy to local or remote solr server
|
||||
private long numLoadedDocs = 0; // number of documents loaded in the current transaction
|
||||
private final int batchSize;
|
||||
private final List<SolrInputDocument> batch = new ArrayList();
|
||||
|
||||
private static final Logger LOGGER = LoggerFactory.getLogger(SolrServerDocumentLoader.class);
|
||||
|
||||
public SolrServerDocumentLoader(SolrServer server, int batchSize) {
|
||||
if (server == null) {
|
||||
throw new IllegalArgumentException("solr server must not be null");
|
||||
}
|
||||
this.server = server;
|
||||
if (batchSize <= 0) {
|
||||
throw new IllegalArgumentException("batchSize must be a positive number: " + batchSize);
|
||||
}
|
||||
this.batchSize = batchSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beginTransaction() {
|
||||
LOGGER.trace("beginTransaction");
|
||||
batch.clear();
|
||||
numLoadedDocs = 0;
|
||||
if (server instanceof SafeConcurrentUpdateSolrServer) {
|
||||
((SafeConcurrentUpdateSolrServer) server).clearException();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void load(SolrInputDocument doc) throws IOException, SolrServerException {
|
||||
LOGGER.trace("load doc: {}", doc);
|
||||
batch.add(doc);
|
||||
if (batch.size() >= batchSize) {
|
||||
loadBatch();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void commitTransaction() throws SolrServerException, IOException {
|
||||
LOGGER.trace("commitTransaction");
|
||||
if (batch.size() > 0) {
|
||||
loadBatch();
|
||||
}
|
||||
if (numLoadedDocs > 0) {
|
||||
if (server instanceof ConcurrentUpdateSolrServer) {
|
||||
((ConcurrentUpdateSolrServer) server).blockUntilFinished();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void loadBatch() throws SolrServerException, IOException {
|
||||
numLoadedDocs += batch.size();
|
||||
try {
|
||||
UpdateResponse rsp = server.add(batch);
|
||||
} finally {
|
||||
batch.clear();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UpdateResponse rollbackTransaction() throws SolrServerException, IOException {
|
||||
LOGGER.trace("rollback");
|
||||
if (!(server instanceof CloudSolrServer)) {
|
||||
return server.rollback();
|
||||
} else {
|
||||
return new UpdateResponse();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
LOGGER.trace("shutdown");
|
||||
server.shutdown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SolrPingResponse ping() throws SolrServerException, IOException {
|
||||
LOGGER.trace("ping");
|
||||
return server.ping();
|
||||
}
|
||||
|
||||
public SolrServer getSolrServer() {
|
||||
return server;
|
||||
public SolrServerDocumentLoader(SolrClient client, int batchSize) {
|
||||
super(client, batchSize);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,9 +34,9 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.XMLResponseParser;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
|
@ -65,7 +65,7 @@ public class AbstractSolrMorphlineTestBase extends SolrTestCaseJ4 {
|
|||
private static Locale savedLocale;
|
||||
protected Collector collector;
|
||||
protected Command morphline;
|
||||
protected SolrServer solrServer;
|
||||
protected SolrClient solrClient;
|
||||
protected DocumentLoader testServer;
|
||||
|
||||
protected static final boolean TEST_WITH_EMBEDDED_SOLR_SERVER = true;
|
||||
|
@ -119,19 +119,19 @@ public class AbstractSolrMorphlineTestBase extends SolrTestCaseJ4 {
|
|||
if (EXTERNAL_SOLR_SERVER_URL != null) {
|
||||
//solrServer = new ConcurrentUpdateSolrServer(EXTERNAL_SOLR_SERVER_URL, 2, 2);
|
||||
//solrServer = new SafeConcurrentUpdateSolrServer(EXTERNAL_SOLR_SERVER_URL, 2, 2);
|
||||
solrServer = new HttpSolrServer(EXTERNAL_SOLR_SERVER_URL);
|
||||
((HttpSolrServer)solrServer).setParser(new XMLResponseParser());
|
||||
solrClient = new HttpSolrClient(EXTERNAL_SOLR_SERVER_URL);
|
||||
((HttpSolrClient) solrClient).setParser(new XMLResponseParser());
|
||||
} else {
|
||||
if (TEST_WITH_EMBEDDED_SOLR_SERVER) {
|
||||
solrServer = new EmbeddedTestSolrServer(h.getCoreContainer(), "");
|
||||
solrClient = new EmbeddedTestSolrServer(h.getCoreContainer(), "");
|
||||
} else {
|
||||
throw new RuntimeException("Not yet implemented");
|
||||
//solrServer = new TestSolrServer(getSolrServer());
|
||||
//solrServer = new TestSolrServer(getSolrClient());
|
||||
}
|
||||
}
|
||||
|
||||
int batchSize = SEQ_NUM2.incrementAndGet() % 2 == 0 ? 100 : 1; //SolrInspector.DEFAULT_SOLR_SERVER_BATCH_SIZE : 1;
|
||||
testServer = new SolrServerDocumentLoader(solrServer, batchSize);
|
||||
testServer = new SolrClientDocumentLoader(solrClient, batchSize);
|
||||
deleteAllDocuments();
|
||||
|
||||
tempDir = createTempDir().toFile().getAbsolutePath();
|
||||
|
@ -140,8 +140,8 @@ public class AbstractSolrMorphlineTestBase extends SolrTestCaseJ4 {
|
|||
@After
|
||||
public void tearDown() throws Exception {
|
||||
collector = null;
|
||||
solrServer.shutdown();
|
||||
solrServer = null;
|
||||
solrClient.shutdown();
|
||||
solrClient = null;
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
@ -201,8 +201,8 @@ public class AbstractSolrMorphlineTestBase extends SolrTestCaseJ4 {
|
|||
// return collector.getRecords().size();
|
||||
try {
|
||||
testServer.commitTransaction();
|
||||
solrServer.commit(false, true, true);
|
||||
QueryResponse rsp = solrServer.query(new SolrQuery(query).setRows(Integer.MAX_VALUE));
|
||||
solrClient.commit(false, true, true);
|
||||
QueryResponse rsp = solrClient.query(new SolrQuery(query).setRows(Integer.MAX_VALUE));
|
||||
LOGGER.debug("rsp: {}", rsp);
|
||||
int i = 0;
|
||||
for (SolrDocument doc : rsp.getResults()) {
|
||||
|
@ -217,7 +217,7 @@ public class AbstractSolrMorphlineTestBase extends SolrTestCaseJ4 {
|
|||
|
||||
private void deleteAllDocuments() throws SolrServerException, IOException {
|
||||
collector.reset();
|
||||
SolrServer s = solrServer;
|
||||
SolrClient s = solrClient;
|
||||
s.deleteByQuery("*:*"); // delete everything!
|
||||
s.commit();
|
||||
}
|
||||
|
@ -255,7 +255,7 @@ public class AbstractSolrMorphlineTestBase extends SolrTestCaseJ4 {
|
|||
|
||||
protected void testDocumentContent(HashMap<String, ExpectedResult> expectedResultMap)
|
||||
throws Exception {
|
||||
QueryResponse rsp = solrServer.query(new SolrQuery("*:*").setRows(Integer.MAX_VALUE));
|
||||
QueryResponse rsp = solrClient.query(new SolrQuery("*:*").setRows(Integer.MAX_VALUE));
|
||||
// Check that every expected field/values shows up in the actual query
|
||||
for (Entry<String, ExpectedResult> current : expectedResultMap.entrySet()) {
|
||||
String field = current.getKey();
|
||||
|
|
|
@ -16,13 +16,13 @@
|
|||
*/
|
||||
package org.apache.solr.morphlines.solr;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
|
||||
import org.apache.solr.client.solrj.response.UpdateResponse;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* An EmbeddedSolrServer that supresses close and rollback requests as
|
||||
* necessary for testing
|
||||
|
|
|
@ -17,13 +17,8 @@
|
|||
|
||||
package org.apache.solr.client.solrj.embedded;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.StreamingResponseCallback;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
|
@ -45,8 +40,13 @@ import org.apache.solr.response.ResultContext;
|
|||
import org.apache.solr.response.SolrQueryResponse;
|
||||
import org.apache.solr.servlet.SolrRequestParsers;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
* SolrServer that connects directly to SolrCore.
|
||||
* SolrClient that connects directly to SolrCore.
|
||||
* <p>
|
||||
* TODO -- this implementation sends the response to XML and then parses it.
|
||||
* It *should* be able to convert the response directly into a named list.
|
||||
|
@ -54,7 +54,7 @@ import org.apache.solr.servlet.SolrRequestParsers;
|
|||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class EmbeddedSolrServer extends SolrServer
|
||||
public class EmbeddedSolrServer extends SolrClient
|
||||
{
|
||||
protected final CoreContainer coreContainer;
|
||||
protected final String coreName;
|
||||
|
@ -65,7 +65,7 @@ public class EmbeddedSolrServer extends SolrServer
|
|||
* @deprecated use {@link #EmbeddedSolrServer(CoreContainer, String)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public EmbeddedSolrServer( SolrCore core )
|
||||
public EmbeddedSolrServer(SolrCore core)
|
||||
{
|
||||
if ( core == null ) {
|
||||
throw new NullPointerException("SolrCore instance required");
|
||||
|
@ -88,7 +88,7 @@ public class EmbeddedSolrServer extends SolrServer
|
|||
* @param coreContainer the core container
|
||||
* @param coreName the core name
|
||||
*/
|
||||
public EmbeddedSolrServer( CoreContainer coreContainer, String coreName )
|
||||
public EmbeddedSolrServer(CoreContainer coreContainer, String coreName)
|
||||
{
|
||||
if ( coreContainer == null ) {
|
||||
throw new NullPointerException("CoreContainer instance required");
|
||||
|
|
|
@ -1,12 +1,8 @@
|
|||
package org.apache.solr.cloud;
|
||||
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketException;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.http.NoHttpResponseException;
|
||||
import org.apache.http.conn.ConnectTimeoutException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestRecovery;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
|
@ -18,6 +14,10 @@ import org.apache.solr.core.CoreContainer;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketException;
|
||||
import java.util.List;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -114,12 +114,12 @@ public class LeaderInitiatedRecoveryThread extends Thread {
|
|||
log.info("Asking core={} coreNodeName={} on " + recoveryUrl + " to recover", coreNeedingRecovery, replicaCoreNodeName);
|
||||
}
|
||||
|
||||
HttpSolrServer server = new HttpSolrServer(recoveryUrl);
|
||||
HttpSolrClient client = new HttpSolrClient(recoveryUrl);
|
||||
try {
|
||||
server.setSoTimeout(60000);
|
||||
server.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
client.setConnectionTimeout(15000);
|
||||
try {
|
||||
server.request(recoverRequestCmd);
|
||||
client.request(recoverRequestCmd);
|
||||
|
||||
log.info("Successfully sent " + CoreAdminAction.REQUESTRECOVERY +
|
||||
" command to core={} coreNodeName={} on " + recoveryUrl, coreNeedingRecovery, replicaCoreNodeName);
|
||||
|
@ -140,7 +140,7 @@ public class LeaderInitiatedRecoveryThread extends Thread {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
// wait a few seconds
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.util.concurrent.Callable;
|
|||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -418,10 +418,10 @@ public class OverseerAutoReplicaFailoverThread implements Runnable, Closeable {
|
|||
private boolean createSolrCore(final String collection,
|
||||
final String createUrl, final String dataDir, final String ulogDir,
|
||||
final String coreNodeName, final String coreName) {
|
||||
HttpSolrServer server = null;
|
||||
HttpSolrClient server = null;
|
||||
try {
|
||||
log.debug("create url={}", createUrl);
|
||||
server = new HttpSolrServer(createUrl);
|
||||
server = new HttpSolrClient(createUrl);
|
||||
server.setConnectionTimeout(30000);
|
||||
server.setSoTimeout(60000);
|
||||
Create createCmd = new Create();
|
||||
|
|
|
@ -17,53 +17,11 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import static org.apache.solr.cloud.Assign.getNodesForNewShard;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERSTATUS;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
|
@ -113,7 +71,48 @@ import org.apache.zookeeper.data.Stat;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.apache.solr.cloud.Assign.getNodesForNewShard;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.PROPERTY_VALUE_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICAPROP;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDROLE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.BALANCESHARDUNIQUE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CLUSTERSTATUS;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICAPROP;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETESHARD;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.REMOVEROLE;
|
||||
|
||||
|
||||
public class OverseerCollectionProcessor implements Runnable, Closeable {
|
||||
|
@ -1802,18 +1801,18 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
|
|||
|
||||
|
||||
static UpdateResponse softCommit(String url) throws SolrServerException, IOException {
|
||||
HttpSolrServer server = null;
|
||||
HttpSolrClient client = null;
|
||||
try {
|
||||
server = new HttpSolrServer(url);
|
||||
server.setConnectionTimeout(30000);
|
||||
server.setSoTimeout(120000);
|
||||
client = new HttpSolrClient(url);
|
||||
client.setConnectionTimeout(30000);
|
||||
client.setSoTimeout(120000);
|
||||
UpdateRequest ureq = new UpdateRequest();
|
||||
ureq.setParams(new ModifiableSolrParams());
|
||||
ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true, true);
|
||||
return ureq.process(server);
|
||||
return ureq.process(client);
|
||||
} finally {
|
||||
if (server != null) {
|
||||
server.shutdown();
|
||||
if (client != null) {
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ import org.apache.http.client.methods.HttpUriRequest;
|
|||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer.HttpUriRequestResponse;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient.HttpUriRequestResponse;
|
||||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
|
@ -200,7 +200,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
|
||||
private void commitOnLeader(String leaderUrl) throws SolrServerException,
|
||||
IOException {
|
||||
HttpSolrServer server = new HttpSolrServer(leaderUrl);
|
||||
HttpSolrClient server = new HttpSolrClient(leaderUrl);
|
||||
try {
|
||||
server.setConnectionTimeout(30000);
|
||||
UpdateRequest ureq = new UpdateRequest();
|
||||
|
@ -594,7 +594,7 @@ public class RecoveryStrategy extends Thread implements ClosableThread {
|
|||
|
||||
private void sendPrepRecoveryCmd(String leaderBaseUrl, String leaderCoreName, Slice slice)
|
||||
throws SolrServerException, IOException, InterruptedException, ExecutionException {
|
||||
HttpSolrServer server = new HttpSolrServer(leaderBaseUrl);
|
||||
HttpSolrClient server = new HttpSolrClient(leaderBaseUrl);
|
||||
try {
|
||||
server.setConnectionTimeout(30000);
|
||||
WaitForState prepCmd = new WaitForState();
|
||||
|
|
|
@ -17,20 +17,13 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestRecovery;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.CoreAdminParams.CoreAdminAction;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
|
@ -49,6 +42,11 @@ import org.apache.solr.update.UpdateShardHandler;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
public class SyncStrategy {
|
||||
protected final Logger log = LoggerFactory.getLogger(getClass());
|
||||
|
||||
|
@ -269,18 +267,18 @@ public class SyncStrategy {
|
|||
recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
|
||||
recoverRequestCmd.setCoreName(coreName);
|
||||
|
||||
HttpSolrServer server = new HttpSolrServer(baseUrl, client);
|
||||
HttpSolrClient client = new HttpSolrClient(baseUrl, SyncStrategy.this.client);
|
||||
try {
|
||||
server.setConnectionTimeout(30000);
|
||||
server.setSoTimeout(120000);
|
||||
server.request(recoverRequestCmd);
|
||||
client.setConnectionTimeout(30000);
|
||||
client.setSoTimeout(120000);
|
||||
client.request(recoverRequestCmd);
|
||||
} catch (Throwable t) {
|
||||
SolrException.log(log, ZkCoreNodeProps.getCoreUrl(leaderProps) + ": Could not tell a replica to recover", t);
|
||||
if (t instanceof Error) {
|
||||
throw (Error) t;
|
||||
}
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
@ -17,35 +17,9 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.URLEncoder;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.WaitForState;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
import org.apache.solr.cloud.overseer.SliceMutator;
|
||||
|
@ -80,15 +54,6 @@ import org.apache.solr.core.SolrResourceLoader;
|
|||
import org.apache.solr.handler.component.ShardHandler;
|
||||
import org.apache.solr.update.UpdateLog;
|
||||
import org.apache.solr.update.UpdateShardHandler;
|
||||
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
|
||||
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.KeeperException.ConnectionLossException;
|
||||
|
@ -100,6 +65,39 @@ import org.apache.zookeeper.data.Stat;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.URLEncoder;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Enumeration;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.BASE_URL_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.CORE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.ELECTION_NODE_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.NODE_NAME_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REJOIN_AT_HEAD_PROP;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
|
||||
|
||||
/**
|
||||
* Handle ZooKeeper interactions.
|
||||
*
|
||||
|
@ -1638,11 +1636,11 @@ public final class ZkController {
|
|||
log.info("Replica "+myCoreNodeName+
|
||||
" NOT in leader-initiated recovery, need to wait for leader to see down state.");
|
||||
|
||||
HttpSolrServer server = null;
|
||||
server = new HttpSolrServer(leaderBaseUrl);
|
||||
HttpSolrClient client = null;
|
||||
client = new HttpSolrClient(leaderBaseUrl);
|
||||
try {
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(120000);
|
||||
client.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(120000);
|
||||
WaitForState prepCmd = new WaitForState();
|
||||
prepCmd.setCoreName(leaderCoreName);
|
||||
prepCmd.setNodeName(getNodeName());
|
||||
|
@ -1658,7 +1656,7 @@ public final class ZkController {
|
|||
"We have been closed");
|
||||
}
|
||||
try {
|
||||
server.request(prepCmd);
|
||||
client.request(prepCmd);
|
||||
break;
|
||||
} catch (Exception e) {
|
||||
|
||||
|
@ -1692,7 +1690,7 @@ public final class ZkController {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,6 +16,43 @@
|
|||
*/
|
||||
package org.apache.solr.handler;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.common.util.FastInputStream;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.DirectoryFactory;
|
||||
import org.apache.solr.core.DirectoryFactory.DirContext;
|
||||
import org.apache.solr.core.IndexDeletionPolicyWrapper;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.handler.ReplicationHandler.FileInfo;
|
||||
import org.apache.solr.request.LocalSolrQueryRequest;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.update.CommitUpdateCommand;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.FileUtils;
|
||||
import org.apache.solr.util.PropertiesInputStream;
|
||||
import org.apache.solr.util.PropertiesOutputStream;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.FileOutputStream;
|
||||
|
@ -54,43 +91,6 @@ import java.util.zip.Adler32;
|
|||
import java.util.zip.Checksum;
|
||||
import java.util.zip.InflaterInputStream;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
import org.apache.solr.common.util.FastInputStream;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.DirectoryFactory.DirContext;
|
||||
import org.apache.solr.core.DirectoryFactory;
|
||||
import org.apache.solr.core.IndexDeletionPolicyWrapper;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.handler.ReplicationHandler.FileInfo;
|
||||
import org.apache.solr.request.LocalSolrQueryRequest;
|
||||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.search.SolrIndexSearcher;
|
||||
import org.apache.solr.update.CommitUpdateCommand;
|
||||
import org.apache.solr.util.DefaultSolrThreadFactory;
|
||||
import org.apache.solr.util.FileUtils;
|
||||
import org.apache.solr.util.PropertiesInputStream;
|
||||
import org.apache.solr.util.PropertiesOutputStream;
|
||||
import org.apache.solr.util.RefCounted;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.solr.handler.ReplicationHandler.ALIAS;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CHECKSUM;
|
||||
import static org.apache.solr.handler.ReplicationHandler.CMD_DETAILS;
|
||||
|
@ -246,17 +246,17 @@ public class SnapPuller {
|
|||
params.set(CommonParams.WT, "javabin");
|
||||
params.set(CommonParams.QT, "/replication");
|
||||
QueryRequest req = new QueryRequest(params);
|
||||
HttpSolrServer server = new HttpSolrServer(masterUrl, myHttpClient); //XXX modify to use shardhandler
|
||||
HttpSolrClient client = new HttpSolrClient(masterUrl, myHttpClient); //XXX modify to use shardhandler
|
||||
NamedList rsp;
|
||||
try {
|
||||
server.setSoTimeout(60000);
|
||||
server.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
client.setConnectionTimeout(15000);
|
||||
|
||||
rsp = server.request(req);
|
||||
rsp = client.request(req);
|
||||
} catch (SolrServerException e) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e);
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
return rsp;
|
||||
}
|
||||
|
@ -271,11 +271,11 @@ public class SnapPuller {
|
|||
params.set(CommonParams.WT, "javabin");
|
||||
params.set(CommonParams.QT, "/replication");
|
||||
QueryRequest req = new QueryRequest(params);
|
||||
HttpSolrServer server = new HttpSolrServer(masterUrl, myHttpClient); //XXX modify to use shardhandler
|
||||
HttpSolrClient client = new HttpSolrClient(masterUrl, myHttpClient); //XXX modify to use shardhandler
|
||||
try {
|
||||
server.setSoTimeout(60000);
|
||||
server.setConnectionTimeout(15000);
|
||||
NamedList response = server.request(req);
|
||||
client.setSoTimeout(60000);
|
||||
client.setConnectionTimeout(15000);
|
||||
NamedList response = client.request(req);
|
||||
|
||||
List<Map<String, Object>> files = (List<Map<String,Object>>) response.get(CMD_GET_FILE_LIST);
|
||||
if (files != null)
|
||||
|
@ -292,7 +292,7 @@ public class SnapPuller {
|
|||
} catch (SolrServerException e) {
|
||||
throw new IOException(e);
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1364,12 +1364,12 @@ public class SnapPuller {
|
|||
NamedList response;
|
||||
InputStream is = null;
|
||||
|
||||
HttpSolrServer s = new HttpSolrServer(masterUrl, myHttpClient, null); //XXX use shardhandler
|
||||
HttpSolrClient client = new HttpSolrClient(masterUrl, myHttpClient, null); //XXX use shardhandler
|
||||
try {
|
||||
s.setSoTimeout(60000);
|
||||
s.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
client.setConnectionTimeout(15000);
|
||||
QueryRequest req = new QueryRequest(params);
|
||||
response = s.request(req);
|
||||
response = client.request(req);
|
||||
is = (InputStream) response.get("stream");
|
||||
if(useInternal) {
|
||||
is = new InflaterInputStream(is);
|
||||
|
@ -1380,7 +1380,7 @@ public class SnapPuller {
|
|||
IOUtils.closeQuietly(is);
|
||||
throw new IOException("Could not download file '" + fileName + "'", e);
|
||||
} finally {
|
||||
s.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1631,12 +1631,12 @@ public class SnapPuller {
|
|||
|
||||
NamedList response;
|
||||
InputStream is = null;
|
||||
HttpSolrServer s = new HttpSolrServer(masterUrl, myHttpClient, null); //XXX use shardhandler
|
||||
HttpSolrClient client = new HttpSolrClient(masterUrl, myHttpClient, null); //XXX use shardhandler
|
||||
try {
|
||||
s.setSoTimeout(60000);
|
||||
s.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
client.setConnectionTimeout(15000);
|
||||
QueryRequest req = new QueryRequest(params);
|
||||
response = s.request(req);
|
||||
response = client.request(req);
|
||||
is = (InputStream) response.get("stream");
|
||||
if(useInternal) {
|
||||
is = new InflaterInputStream(is);
|
||||
|
@ -1647,7 +1647,7 @@ public class SnapPuller {
|
|||
IOUtils.closeQuietly(is);
|
||||
throw new IOException("Could not download file '" + fileName + "'", e);
|
||||
} finally {
|
||||
s.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1657,15 +1657,15 @@ public class SnapPuller {
|
|||
params.set(COMMAND, CMD_DETAILS);
|
||||
params.set("slave", false);
|
||||
params.set(CommonParams.QT, "/replication");
|
||||
HttpSolrServer server = new HttpSolrServer(masterUrl, myHttpClient); //XXX use shardhandler
|
||||
HttpSolrClient client = new HttpSolrClient(masterUrl, myHttpClient); //XXX use shardhandler
|
||||
NamedList rsp;
|
||||
try {
|
||||
server.setSoTimeout(60000);
|
||||
server.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
client.setConnectionTimeout(15000);
|
||||
QueryRequest request = new QueryRequest(params);
|
||||
rsp = server.request(request);
|
||||
rsp = client.request(request);
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
return rsp;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.RequestSyncShard;
|
||||
import org.apache.solr.cloud.DistributedQueue;
|
||||
|
@ -773,7 +773,7 @@ public class CollectionsHandler extends RequestHandlerBase {
|
|||
ZkNodeProps leaderProps = clusterState.getLeader(collection, shard);
|
||||
ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(leaderProps);
|
||||
|
||||
HttpSolrServer server = new HttpSolrServer(nodeProps.getBaseUrl());
|
||||
HttpSolrClient server = new HttpSolrClient(nodeProps.getBaseUrl());
|
||||
try {
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
|
|
|
@ -19,9 +19,9 @@ package org.apache.solr.handler.component;
|
|||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.LBHttpSolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.util.ClientUtils;
|
||||
import org.apache.solr.cloud.CloudDescriptor;
|
||||
|
@ -152,14 +152,14 @@ public class HttpShardHandler extends ShardHandler {
|
|||
if (urls.size() <= 1) {
|
||||
String url = urls.get(0);
|
||||
srsp.setShardAddress(url);
|
||||
SolrServer server = new HttpSolrServer(url, httpClient);
|
||||
SolrClient client = new HttpSolrClient(url, httpClient);
|
||||
try {
|
||||
ssr.nl = server.request(req);
|
||||
ssr.nl = client.request(req);
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
} else {
|
||||
LBHttpSolrServer.Rsp rsp = httpShardHandlerFactory.makeLoadBalancedRequest(req, urls);
|
||||
LBHttpSolrClient.Rsp rsp = httpShardHandlerFactory.makeLoadBalancedRequest(req, urls);
|
||||
ssr.nl = rsp.getResponse();
|
||||
srsp.setShardAddress(rsp.getServer());
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import org.apache.commons.lang.StringUtils;
|
|||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.LBHttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.ExecutorUtil;
|
||||
|
@ -64,7 +64,7 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
|
|||
);
|
||||
|
||||
protected HttpClient defaultClient;
|
||||
private LBHttpSolrServer loadbalancer;
|
||||
private LBHttpSolrClient loadbalancer;
|
||||
//default values:
|
||||
int soTimeout = 0;
|
||||
int connectionTimeout = 0;
|
||||
|
@ -162,8 +162,8 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
|
|||
return this.commExecutor;
|
||||
}
|
||||
|
||||
protected LBHttpSolrServer createLoadbalancer(HttpClient httpClient){
|
||||
return new LBHttpSolrServer(httpClient);
|
||||
protected LBHttpSolrClient createLoadbalancer(HttpClient httpClient){
|
||||
return new LBHttpSolrClient(httpClient);
|
||||
}
|
||||
|
||||
protected <T> T getParameter(NamedList initArgs, String configKey, T defaultValue) {
|
||||
|
@ -202,9 +202,9 @@ public class HttpShardHandlerFactory extends ShardHandlerFactory implements org.
|
|||
* @param urls The list of solr server urls to load balance across
|
||||
* @return The response from the request
|
||||
*/
|
||||
public LBHttpSolrServer.Rsp makeLoadBalancedRequest(final QueryRequest req, List<String> urls)
|
||||
public LBHttpSolrClient.Rsp makeLoadBalancedRequest(final QueryRequest req, List<String> urls)
|
||||
throws SolrServerException, IOException {
|
||||
return loadbalancer.request(new LBHttpSolrServer.Req(req, urls));
|
||||
return loadbalancer.request(new LBHttpSolrClient.Req(req, urls));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -24,9 +24,9 @@ import org.apache.lucene.analysis.util.TokenizerFactory;
|
|||
import org.apache.solr.analysis.TokenizerChain;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrResponse;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.cloud.ZkSolrResourceLoader;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -331,13 +331,13 @@ public final class ManagedIndexSchema extends IndexSchema {
|
|||
|
||||
@Override
|
||||
public Integer call() throws Exception {
|
||||
HttpSolrServer solr = new HttpSolrServer(coreUrl);
|
||||
HttpSolrClient solr = new HttpSolrClient(coreUrl);
|
||||
int remoteVersion = -1;
|
||||
try {
|
||||
// eventually, this loop will get killed by the ExecutorService's timeout
|
||||
while (remoteVersion == -1 || remoteVersion < expectedZkVersion) {
|
||||
try {
|
||||
HttpSolrServer.HttpUriRequestResponse mrr = solr.httpUriRequest(this);
|
||||
HttpSolrClient.HttpUriRequestResponse mrr = solr.httpUriRequest(this);
|
||||
NamedList<Object> zkversionResp = mrr.future.get();
|
||||
if (zkversionResp != null)
|
||||
remoteVersion = (Integer)zkversionResp.get("zkversion");
|
||||
|
@ -371,7 +371,7 @@ public final class ManagedIndexSchema extends IndexSchema {
|
|||
}
|
||||
|
||||
@Override
|
||||
public SolrResponse process(SolrServer server) throws SolrServerException, IOException {
|
||||
public SolrResponse process(SolrClient server) throws SolrServerException, IOException {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.apache.http.HeaderIterator;
|
|||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.Aliases;
|
||||
|
@ -311,7 +311,7 @@ public class SolrDispatchFilter extends BaseSolrFilter {
|
|||
String coreUrl = getRemotCoreUrl(cores, corename, origCorename);
|
||||
// don't proxy for internal update requests
|
||||
SolrParams queryParams = SolrRequestParsers.parseQueryString(req.getQueryString());
|
||||
checkStateIsValid(cores, queryParams.get(CloudSolrServer.STATE_VERSION));
|
||||
checkStateIsValid(cores, queryParams.get(CloudSolrClient.STATE_VERSION));
|
||||
if (coreUrl != null
|
||||
&& queryParams
|
||||
.get(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM) == null) {
|
||||
|
@ -372,7 +372,7 @@ public class SolrDispatchFilter extends BaseSolrFilter {
|
|||
if( "/select".equals( path ) || "/select/".equals( path ) ) {
|
||||
solrReq = parser.parse( core, path, req );
|
||||
|
||||
checkStateIsValid(cores,solrReq.getParams().get(CloudSolrServer.STATE_VERSION));
|
||||
checkStateIsValid(cores,solrReq.getParams().get(CloudSolrClient.STATE_VERSION));
|
||||
String qt = solrReq.getParams().get( CommonParams.QT );
|
||||
handler = core.getRequestHandler( qt );
|
||||
if( handler == null ) {
|
||||
|
|
|
@ -17,6 +17,24 @@ package org.apache.solr.update;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.BinaryResponseParser;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.Diagnostics;
|
||||
import org.apache.solr.update.processor.DistributedUpdateProcessor.RequestReplicationTracker;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.ConnectException;
|
||||
|
@ -31,30 +49,12 @@ import java.util.concurrent.ExecutorCompletionService;
|
|||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.BinaryResponseParser;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.Diagnostics;
|
||||
import org.apache.solr.update.processor.DistributedUpdateProcessor.RequestReplicationTracker;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
||||
public class SolrCmdDistributor {
|
||||
private static final int MAX_RETRIES_ON_FORWARD = 25;
|
||||
public static Logger log = LoggerFactory.getLogger(SolrCmdDistributor.class);
|
||||
|
||||
private StreamingSolrServers servers;
|
||||
private StreamingSolrClients clients;
|
||||
|
||||
private int retryPause = 500;
|
||||
private int maxRetriesOnForward = MAX_RETRIES_ON_FORWARD;
|
||||
|
@ -71,16 +71,16 @@ public class SolrCmdDistributor {
|
|||
}
|
||||
|
||||
public SolrCmdDistributor(UpdateShardHandler updateShardHandler) {
|
||||
this.servers = new StreamingSolrServers(updateShardHandler);
|
||||
this.clients = new StreamingSolrClients(updateShardHandler);
|
||||
this.updateExecutor = updateShardHandler.getUpdateExecutor();
|
||||
this.completionService = new ExecutorCompletionService<>(updateExecutor);
|
||||
}
|
||||
|
||||
public SolrCmdDistributor(StreamingSolrServers servers, int maxRetriesOnForward, int retryPause) {
|
||||
this.servers = servers;
|
||||
public SolrCmdDistributor(StreamingSolrClients clients, int maxRetriesOnForward, int retryPause) {
|
||||
this.clients = clients;
|
||||
this.maxRetriesOnForward = maxRetriesOnForward;
|
||||
this.retryPause = retryPause;
|
||||
this.updateExecutor = servers.getUpdateExecutor();
|
||||
this.updateExecutor = clients.getUpdateExecutor();
|
||||
completionService = new ExecutorCompletionService<>(updateExecutor);
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ public class SolrCmdDistributor {
|
|||
try {
|
||||
blockAndDoRetries();
|
||||
} finally {
|
||||
servers.shutdown();
|
||||
clients.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ public class SolrCmdDistributor {
|
|||
// NOTE: retries will be forwards to a single url
|
||||
|
||||
List<Error> errors = new ArrayList<>(this.errors);
|
||||
errors.addAll(servers.getErrors());
|
||||
errors.addAll(clients.getErrors());
|
||||
List<Error> resubmitList = new ArrayList<>();
|
||||
|
||||
for (Error err : errors) {
|
||||
|
@ -156,7 +156,7 @@ public class SolrCmdDistributor {
|
|||
}
|
||||
}
|
||||
|
||||
servers.clearErrors();
|
||||
clients.clearErrors();
|
||||
this.errors.clear();
|
||||
for (Error err : resubmitList) {
|
||||
submit(err.req, false);
|
||||
|
@ -225,7 +225,7 @@ public class SolrCmdDistributor {
|
|||
}
|
||||
|
||||
private void blockAndDoRetries() {
|
||||
servers.blockUntilFinished();
|
||||
clients.blockUntilFinished();
|
||||
|
||||
// wait for any async commits to complete
|
||||
while (pending != null && pending.size() > 0) {
|
||||
|
@ -253,14 +253,13 @@ public class SolrCmdDistributor {
|
|||
if (req.synchronous) {
|
||||
blockAndDoRetries();
|
||||
|
||||
HttpSolrServer server = new HttpSolrServer(req.node.getUrl(),
|
||||
servers.getHttpClient());
|
||||
HttpSolrClient client = new HttpSolrClient(req.node.getUrl(), clients.getHttpClient());
|
||||
try {
|
||||
server.request(req.uReq);
|
||||
client.request(req.uReq);
|
||||
} catch (Exception e) {
|
||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed synchronous update on shard " + req.node + " update: " + req.uReq , e);
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -292,8 +291,8 @@ public class SolrCmdDistributor {
|
|||
|
||||
private void doRequest(final Req req) {
|
||||
try {
|
||||
SolrServer solrServer = servers.getSolrServer(req);
|
||||
solrServer.request(req.uReq);
|
||||
SolrClient solrClient = clients.getSolrClient(req);
|
||||
solrClient.request(req.uReq);
|
||||
} catch (Exception e) {
|
||||
SolrException.log(log, e);
|
||||
Error error = new Error();
|
||||
|
|
|
@ -17,6 +17,19 @@ package org.apache.solr.update;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
|
||||
import org.apache.solr.client.solrj.impl.BinaryResponseParser;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.update.SolrCmdDistributor.Error;
|
||||
import org.apache.solr.update.processor.DistributedUpdateProcessor;
|
||||
import org.apache.solr.update.processor.DistributingUpdateProcessorFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -26,30 +39,17 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.BinaryRequestWriter;
|
||||
import org.apache.solr.client.solrj.impl.BinaryResponseParser;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.update.SolrCmdDistributor.Error;
|
||||
import org.apache.solr.update.processor.DistributedUpdateProcessor;
|
||||
import org.apache.solr.update.processor.DistributingUpdateProcessorFactory;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class StreamingSolrServers {
|
||||
public static Logger log = LoggerFactory.getLogger(StreamingSolrServers.class);
|
||||
public class StreamingSolrClients {
|
||||
public static Logger log = LoggerFactory.getLogger(StreamingSolrClients.class);
|
||||
|
||||
private HttpClient httpClient;
|
||||
|
||||
private Map<String,ConcurrentUpdateSolrServer> solrServers = new HashMap<>();
|
||||
private Map<String, ConcurrentUpdateSolrClient> solrClients = new HashMap<>();
|
||||
private List<Error> errors = Collections.synchronizedList(new ArrayList<Error>());
|
||||
|
||||
private ExecutorService updateExecutor;
|
||||
|
||||
public StreamingSolrServers(UpdateShardHandler updateShardHandler) {
|
||||
public StreamingSolrClients(UpdateShardHandler updateShardHandler) {
|
||||
this.updateExecutor = updateShardHandler.getUpdateExecutor();
|
||||
|
||||
httpClient = updateShardHandler.getHttpClient();
|
||||
|
@ -63,11 +63,11 @@ public class StreamingSolrServers {
|
|||
errors.clear();
|
||||
}
|
||||
|
||||
public synchronized SolrServer getSolrServer(final SolrCmdDistributor.Req req) {
|
||||
public synchronized SolrClient getSolrClient(final SolrCmdDistributor.Req req) {
|
||||
String url = getFullUrl(req.node.getUrl());
|
||||
ConcurrentUpdateSolrServer server = solrServers.get(url);
|
||||
if (server == null) {
|
||||
server = new ConcurrentUpdateSolrServer(url, httpClient, 100, 1, updateExecutor, true) {
|
||||
ConcurrentUpdateSolrClient client = solrClients.get(url);
|
||||
if (client == null) {
|
||||
client = new ConcurrentUpdateSolrClient(url, httpClient, 100, 1, updateExecutor, true) {
|
||||
@Override
|
||||
public void handleError(Throwable ex) {
|
||||
req.trackRequestResult(null, false);
|
||||
|
@ -85,28 +85,28 @@ public class StreamingSolrServers {
|
|||
req.trackRequestResult(resp, true);
|
||||
}
|
||||
};
|
||||
server.setParser(new BinaryResponseParser());
|
||||
server.setRequestWriter(new BinaryRequestWriter());
|
||||
server.setPollQueueTime(0);
|
||||
client.setParser(new BinaryResponseParser());
|
||||
client.setRequestWriter(new BinaryRequestWriter());
|
||||
client.setPollQueueTime(0);
|
||||
Set<String> queryParams = new HashSet<>(2);
|
||||
queryParams.add(DistributedUpdateProcessor.DISTRIB_FROM);
|
||||
queryParams.add(DistributingUpdateProcessorFactory.DISTRIB_UPDATE_PARAM);
|
||||
server.setQueryParams(queryParams);
|
||||
solrServers.put(url, server);
|
||||
client.setQueryParams(queryParams);
|
||||
solrClients.put(url, client);
|
||||
}
|
||||
|
||||
return server;
|
||||
return client;
|
||||
}
|
||||
|
||||
public synchronized void blockUntilFinished() {
|
||||
for (ConcurrentUpdateSolrServer server : solrServers.values()) {
|
||||
server.blockUntilFinished();
|
||||
for (ConcurrentUpdateSolrClient client : solrClients.values()) {
|
||||
client.blockUntilFinished();
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void shutdown() {
|
||||
for (ConcurrentUpdateSolrServer server : solrServers.values()) {
|
||||
server.shutdown();
|
||||
for (ConcurrentUpdateSolrClient client : solrClients.values()) {
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
|
|
@ -17,25 +17,6 @@ package org.apache.solr.util;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Enumeration;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.GnuParser;
|
||||
import org.apache.commons.cli.HelpFormatter;
|
||||
|
@ -61,9 +42,9 @@ import org.apache.log4j.LogManager;
|
|||
import org.apache.log4j.Logger;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -78,6 +59,25 @@ import org.noggit.JSONParser;
|
|||
import org.noggit.JSONWriter;
|
||||
import org.noggit.ObjectBuilder;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.net.ConnectException;
|
||||
import java.net.SocketException;
|
||||
import java.net.URL;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Enumeration;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipInputStream;
|
||||
|
||||
/**
|
||||
* Command-line utility for working with Solr.
|
||||
*/
|
||||
|
@ -93,7 +93,7 @@ public class SolrCLI {
|
|||
}
|
||||
|
||||
/**
|
||||
* Helps build SolrCloud aware tools by initializing a CloudSolrServer
|
||||
* Helps build SolrCloud aware tools by initializing a CloudSolrClient
|
||||
* instance before running the tool.
|
||||
*/
|
||||
public static abstract class SolrCloudTool implements Tool {
|
||||
|
@ -112,16 +112,16 @@ public class SolrCLI {
|
|||
|
||||
log.debug("Connecting to Solr cluster: " + zkHost);
|
||||
int exitStatus = 0;
|
||||
CloudSolrServer cloudSolrServer = null;
|
||||
CloudSolrClient cloudSolrClient = null;
|
||||
try {
|
||||
cloudSolrServer = new CloudSolrServer(zkHost);
|
||||
cloudSolrClient = new CloudSolrClient(zkHost);
|
||||
|
||||
String collection = cli.getOptionValue("collection");
|
||||
if (collection != null)
|
||||
cloudSolrServer.setDefaultCollection(collection);
|
||||
cloudSolrClient.setDefaultCollection(collection);
|
||||
|
||||
cloudSolrServer.connect();
|
||||
exitStatus = runCloudTool(cloudSolrServer, cli);
|
||||
cloudSolrClient.connect();
|
||||
exitStatus = runCloudTool(cloudSolrClient, cli);
|
||||
} catch (Exception exc) {
|
||||
// since this is a CLI, spare the user the stacktrace
|
||||
String excMsg = exc.getMessage();
|
||||
|
@ -132,9 +132,9 @@ public class SolrCLI {
|
|||
throw exc;
|
||||
}
|
||||
} finally {
|
||||
if (cloudSolrServer != null) {
|
||||
if (cloudSolrClient != null) {
|
||||
try {
|
||||
cloudSolrServer.shutdown();
|
||||
cloudSolrClient.shutdown();
|
||||
} catch (Exception ignore) {}
|
||||
}
|
||||
}
|
||||
|
@ -145,7 +145,7 @@ public class SolrCLI {
|
|||
/**
|
||||
* Runs a SolrCloud tool with CloudSolrServer initialized
|
||||
*/
|
||||
protected abstract int runCloudTool(CloudSolrServer cloudSolrServer, CommandLine cli)
|
||||
protected abstract int runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli)
|
||||
throws Exception;
|
||||
}
|
||||
|
||||
|
@ -872,7 +872,7 @@ public class SolrCLI {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected int runCloudTool(CloudSolrServer cloudSolrServer, CommandLine cli) throws Exception {
|
||||
protected int runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception {
|
||||
|
||||
String collection = cli.getOptionValue("collection");
|
||||
if (collection == null)
|
||||
|
@ -880,7 +880,7 @@ public class SolrCLI {
|
|||
|
||||
log.info("Running healthcheck for "+collection);
|
||||
|
||||
ZkStateReader zkStateReader = cloudSolrServer.getZkStateReader();
|
||||
ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
|
||||
|
||||
ClusterState clusterState = zkStateReader.getClusterState();
|
||||
Set<String> liveNodes = clusterState.getLiveNodes();
|
||||
|
@ -890,7 +890,7 @@ public class SolrCLI {
|
|||
|
||||
SolrQuery q = new SolrQuery("*:*");
|
||||
q.setRows(0);
|
||||
QueryResponse qr = cloudSolrServer.query(q);
|
||||
QueryResponse qr = cloudSolrClient.query(q);
|
||||
String collErr = null;
|
||||
long docCount = -1;
|
||||
try {
|
||||
|
@ -930,7 +930,7 @@ public class SolrCLI {
|
|||
replicaStatus = ZkStateReader.DOWN;
|
||||
} else {
|
||||
// query this replica directly to get doc count and assess health
|
||||
HttpSolrServer solr = new HttpSolrServer(coreUrl);
|
||||
HttpSolrClient solr = new HttpSolrClient(coreUrl);
|
||||
String solrUrl = solr.getBaseURL();
|
||||
q = new SolrQuery("*:*");
|
||||
q.setRows(0);
|
||||
|
@ -1103,9 +1103,9 @@ public class SolrCLI {
|
|||
}
|
||||
|
||||
int toolExitStatus = 0;
|
||||
CloudSolrServer cloudSolrServer = null;
|
||||
CloudSolrClient cloudSolrServer = null;
|
||||
try {
|
||||
cloudSolrServer = new CloudSolrServer(zkHost);
|
||||
cloudSolrServer = new CloudSolrClient(zkHost);
|
||||
System.out.println("Connecting to ZooKeeper at " + zkHost);
|
||||
cloudSolrServer.connect();
|
||||
toolExitStatus = runCloudTool(cloudSolrServer, cli);
|
||||
|
@ -1129,8 +1129,8 @@ public class SolrCLI {
|
|||
return toolExitStatus;
|
||||
}
|
||||
|
||||
protected int runCloudTool(CloudSolrServer cloudSolrServer, CommandLine cli) throws Exception {
|
||||
Set<String> liveNodes = cloudSolrServer.getZkStateReader().getClusterState().getLiveNodes();
|
||||
protected int runCloudTool(CloudSolrClient cloudSolrClient, CommandLine cli) throws Exception {
|
||||
Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
|
||||
if (liveNodes.isEmpty())
|
||||
throw new IllegalStateException("No live nodes found! Cannot create a collection until " +
|
||||
"there is at least 1 live node in the cluster.");
|
||||
|
@ -1183,13 +1183,13 @@ public class SolrCLI {
|
|||
}
|
||||
|
||||
// test to see if that config exists in ZK
|
||||
if (!cloudSolrServer.getZkStateReader().getZkClient().exists("/configs/"+configSetNameInZk, true)) {
|
||||
if (!cloudSolrClient.getZkStateReader().getZkClient().exists("/configs/"+configSetNameInZk, true)) {
|
||||
System.out.println("Uploading "+confDir.getAbsolutePath()+
|
||||
" for config "+configSetNameInZk+" to ZooKeeper at "+cloudSolrServer.getZkHost());
|
||||
ZkController.uploadConfigDir(cloudSolrServer.getZkStateReader().getZkClient(), confDir, configSetNameInZk);
|
||||
" for config "+configSetNameInZk+" to ZooKeeper at "+cloudSolrClient.getZkHost());
|
||||
ZkController.uploadConfigDir(cloudSolrClient.getZkStateReader().getZkClient(), confDir, configSetNameInZk);
|
||||
}
|
||||
|
||||
String baseUrl = cloudSolrServer.getZkStateReader().getBaseUrlForNodeName(firstLiveNode);
|
||||
String baseUrl = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(firstLiveNode);
|
||||
String collectionName = cli.getOptionValue("name");
|
||||
|
||||
// since creating a collection is a heavy-weight operation, check for existence first
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer;
|
||||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest.ACTION;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
|
@ -137,7 +137,7 @@ public class AnalysisAfterCoreReloadTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
}
|
||||
|
||||
protected SolrServer getSolrCore() {
|
||||
protected SolrClient getSolrCore() {
|
||||
return new EmbeddedSolrServer(h.getCore());
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.solr;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.params.CommonParams;
|
||||
|
@ -243,7 +243,7 @@ public class TestDistributedGrouping extends BaseDistributedSearchTestCase {
|
|||
setDistributedParams(params);
|
||||
|
||||
int which = r.nextInt(clients.size());
|
||||
SolrServer client = clients.get(which);
|
||||
SolrClient client = clients.get(which);
|
||||
QueryResponse rsp = client.query(params);
|
||||
NamedList nl = (NamedList<?>) rsp.getResponse().get("grouped");
|
||||
nl = (NamedList<?>) nl.getVal(0);
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
package org.apache.solr;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -25,10 +24,10 @@ import java.util.Map;
|
|||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.response.FacetField;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.client.solrj.response.RangeFacet;
|
||||
|
@ -471,7 +470,7 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
|
|||
for(int numDownServers = 0; numDownServers < jettys.size()-1; numDownServers++)
|
||||
{
|
||||
List<JettySolrRunner> upJettys = new ArrayList<>(jettys);
|
||||
List<SolrServer> upClients = new ArrayList<>(clients);
|
||||
List<SolrClient> upClients = new ArrayList<>(clients);
|
||||
List<JettySolrRunner> downJettys = new ArrayList<>();
|
||||
List<String> upShards = new ArrayList<>(Arrays.asList(shardsArr));
|
||||
for(int i=0; i<numDownServers; i++)
|
||||
|
@ -554,7 +553,7 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
|
|||
"stats.field", tdate_a,
|
||||
"stats.field", tdate_b,
|
||||
"stats.calcdistinct", "true");
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
if (e.getMessage().startsWith("java.lang.NullPointerException")) {
|
||||
fail("NullPointerException with stats request on empty index");
|
||||
} else {
|
||||
|
@ -590,7 +589,7 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
|
|||
}
|
||||
|
||||
protected void queryPartialResults(final List<String> upShards,
|
||||
final List<SolrServer> upClients,
|
||||
final List<SolrClient> upClients,
|
||||
Object... q) throws Exception {
|
||||
|
||||
final ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
@ -622,7 +621,7 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
|
|||
public void run() {
|
||||
for (int j = 0; j < stress; j++) {
|
||||
int which = r.nextInt(upClients.size());
|
||||
SolrServer client = upClients.get(which);
|
||||
SolrClient client = upClients.get(which);
|
||||
try {
|
||||
QueryResponse rsp = client.query(new ModifiableSolrParams(params));
|
||||
if (verifyStress) {
|
||||
|
@ -643,10 +642,10 @@ public class TestDistributedSearch extends BaseDistributedSearchTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
protected QueryResponse queryRandomUpServer(ModifiableSolrParams params, List<SolrServer> upClients) throws SolrServerException {
|
||||
protected QueryResponse queryRandomUpServer(ModifiableSolrParams params, List<SolrClient> upClients) throws SolrServerException {
|
||||
// query a random "up" server
|
||||
int which = r.nextInt(upClients.size());
|
||||
SolrServer client = upClients.get(which);
|
||||
SolrClient client = upClients.get(which);
|
||||
QueryResponse rsp = client.query(params);
|
||||
return rsp;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TestSolrCoreProperties extends SolrJettyTestBase {
|
|||
public void testSimple() throws Exception {
|
||||
SolrParams params = params("q", "*:*",
|
||||
"echoParams", "all");
|
||||
QueryResponse res = getSolrServer().query(params);
|
||||
QueryResponse res = getSolrClient().query(params);
|
||||
assertEquals(0, res.getResults().getNumFound());
|
||||
|
||||
NamedList echoedParams = (NamedList) res.getHeader().get("params");
|
||||
|
|
|
@ -6,9 +6,9 @@ import java.io.OutputStream;
|
|||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -40,8 +40,8 @@ import org.junit.BeforeClass;
|
|||
|
||||
public class TestTolerantSearch extends SolrJettyTestBase {
|
||||
|
||||
private static SolrServer collection1;
|
||||
private static SolrServer collection2;
|
||||
private static SolrClient collection1;
|
||||
private static SolrClient collection2;
|
||||
private static String shard1;
|
||||
private static String shard2;
|
||||
private static File solrHome;
|
||||
|
@ -60,8 +60,8 @@ public class TestTolerantSearch extends SolrJettyTestBase {
|
|||
solrHome = createSolrHome();
|
||||
createJetty(solrHome.getAbsolutePath(), null, null);
|
||||
String url = jetty.getBaseUrl().toString();
|
||||
collection1 = new HttpSolrServer(url);
|
||||
collection2 = new HttpSolrServer(url + "/collection2");
|
||||
collection1 = new HttpSolrClient(url);
|
||||
collection2 = new HttpSolrClient(url + "/collection2");
|
||||
|
||||
String urlCollection1 = jetty.getBaseUrl().toString() + "/" + "collection1";
|
||||
String urlCollection2 = jetty.getBaseUrl().toString() + "/" + "collection2";
|
||||
|
|
|
@ -17,18 +17,13 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.CreateAlias;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.DeleteAlias;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
|
@ -43,6 +38,10 @@ import org.junit.AfterClass;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Test sync phase that occurs when Leader goes down and a new Leader is
|
||||
* elected.
|
||||
|
@ -138,38 +137,38 @@ public class AliasIntegrationTest extends AbstractFullDistribZkTestBase {
|
|||
query.set("collection", "testalias");
|
||||
JettySolrRunner jetty = jettys.get(random().nextInt(jettys.size()));
|
||||
int port = jetty.getLocalPort();
|
||||
HttpSolrServer server = new HttpSolrServer(buildUrl(port) + "/testalias");
|
||||
res = server.query(query);
|
||||
HttpSolrClient client = new HttpSolrClient(buildUrl(port) + "/testalias");
|
||||
res = client.query(query);
|
||||
assertEquals(3, res.getResults().getNumFound());
|
||||
server.shutdown();
|
||||
server = null;
|
||||
client.shutdown();
|
||||
client = null;
|
||||
|
||||
// now without collections param
|
||||
query = new SolrQuery("*:*");
|
||||
jetty = jettys.get(random().nextInt(jettys.size()));
|
||||
port = jetty.getLocalPort();
|
||||
server = new HttpSolrServer(buildUrl(port) + "/testalias");
|
||||
res = server.query(query);
|
||||
client = new HttpSolrClient(buildUrl(port) + "/testalias");
|
||||
res = client.query(query);
|
||||
assertEquals(3, res.getResults().getNumFound());
|
||||
server.shutdown();
|
||||
server = null;
|
||||
client.shutdown();
|
||||
client = null;
|
||||
|
||||
// create alias, collection2 first because it's not on every node
|
||||
createAlias("testalias", "collection2,collection1");
|
||||
|
||||
// search with new cloud client
|
||||
CloudSolrServer cloudSolrServer = new CloudSolrServer(zkServer.getZkAddress(), random().nextBoolean());
|
||||
cloudSolrServer.setParallelUpdates(random().nextBoolean());
|
||||
CloudSolrClient cloudSolrClient = new CloudSolrClient(zkServer.getZkAddress(), random().nextBoolean());
|
||||
cloudSolrClient.setParallelUpdates(random().nextBoolean());
|
||||
query = new SolrQuery("*:*");
|
||||
query.set("collection", "testalias");
|
||||
res = cloudSolrServer.query(query);
|
||||
res = cloudSolrClient.query(query);
|
||||
assertEquals(5, res.getResults().getNumFound());
|
||||
|
||||
// Try with setDefaultCollection
|
||||
query = new SolrQuery("*:*");
|
||||
cloudSolrServer.setDefaultCollection("testalias");
|
||||
res = cloudSolrServer.query(query);
|
||||
cloudSolrServer.shutdown();
|
||||
cloudSolrClient.setDefaultCollection("testalias");
|
||||
res = cloudSolrClient.query(query);
|
||||
cloudSolrClient.shutdown();
|
||||
assertEquals(5, res.getResults().getNumFound());
|
||||
|
||||
// search for alias with random non cloud client
|
||||
|
@ -177,8 +176,8 @@ public class AliasIntegrationTest extends AbstractFullDistribZkTestBase {
|
|||
query.set("collection", "testalias");
|
||||
jetty = jettys.get(random().nextInt(jettys.size()));
|
||||
port = jetty.getLocalPort();
|
||||
server = new HttpSolrServer(buildUrl(port) + "/testalias");
|
||||
res = server.query(query);
|
||||
client = new HttpSolrClient(buildUrl(port) + "/testalias");
|
||||
res = client.query(query);
|
||||
assertEquals(5, res.getResults().getNumFound());
|
||||
|
||||
|
||||
|
@ -186,11 +185,11 @@ public class AliasIntegrationTest extends AbstractFullDistribZkTestBase {
|
|||
query = new SolrQuery("*:*");
|
||||
jetty = jettys.get(random().nextInt(jettys.size()));
|
||||
port = jetty.getLocalPort();
|
||||
server = new HttpSolrServer(buildUrl(port) + "/testalias");
|
||||
res = server.query(query);
|
||||
client = new HttpSolrClient(buildUrl(port) + "/testalias");
|
||||
res = client.query(query);
|
||||
assertEquals(5, res.getResults().getNumFound());
|
||||
server.shutdown();
|
||||
server = null;
|
||||
client.shutdown();
|
||||
client = null;
|
||||
|
||||
// update alias
|
||||
createAlias("testalias", "collection2");
|
||||
|
@ -214,26 +213,26 @@ public class AliasIntegrationTest extends AbstractFullDistribZkTestBase {
|
|||
// try a std client
|
||||
// search 1 and 2, but have no collections param
|
||||
query = new SolrQuery("*:*");
|
||||
HttpSolrServer client = new HttpSolrServer(getBaseUrl((HttpSolrServer) clients.get(0)) + "/testalias");
|
||||
res = client.query(query);
|
||||
HttpSolrClient httpclient = new HttpSolrClient(getBaseUrl((HttpSolrClient) clients.get(0)) + "/testalias");
|
||||
res = httpclient.query(query);
|
||||
assertEquals(5, res.getResults().getNumFound());
|
||||
client.shutdown();
|
||||
client = null;
|
||||
httpclient.shutdown();
|
||||
httpclient = null;
|
||||
|
||||
createAlias("testalias", "collection2");
|
||||
|
||||
// a second alias
|
||||
createAlias("testalias2", "collection2");
|
||||
|
||||
client = new HttpSolrServer(getBaseUrl((HttpSolrServer) clients.get(0)) + "/testalias");
|
||||
httpclient = new HttpSolrClient(getBaseUrl((HttpSolrClient) clients.get(0)) + "/testalias");
|
||||
SolrInputDocument doc8 = getDoc(id, 11, i1, -600, tlong, 600, t1,
|
||||
"humpty dumpy4 sat on a walls");
|
||||
client.add(doc8);
|
||||
client.commit();
|
||||
httpclient.add(doc8);
|
||||
httpclient.commit();
|
||||
res = client.query(query);
|
||||
assertEquals(3, res.getResults().getNumFound());
|
||||
client.shutdown();
|
||||
client = null;
|
||||
httpclient.shutdown();
|
||||
httpclient = null;
|
||||
|
||||
createAlias("testalias", "collection2,collection1");
|
||||
|
||||
|
@ -256,8 +255,8 @@ public class AliasIntegrationTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
private void createAlias(String alias, String collections)
|
||||
throws SolrServerException, IOException {
|
||||
SolrServer server = createNewSolrServer("",
|
||||
getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("",
|
||||
getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
if (random().nextBoolean()) {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("collections", collections);
|
||||
|
@ -265,33 +264,33 @@ public class AliasIntegrationTest extends AbstractFullDistribZkTestBase {
|
|||
params.set("action", CollectionAction.CREATEALIAS.toString());
|
||||
QueryRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
server.request(request);
|
||||
client.request(request);
|
||||
} else {
|
||||
CreateAlias request = new CreateAlias();
|
||||
request.setAliasName(alias);
|
||||
request.setAliasedCollections(collections);
|
||||
request.process(server);
|
||||
request.process(client);
|
||||
}
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
private void deleteAlias(String alias) throws SolrServerException,
|
||||
IOException {
|
||||
SolrServer server = createNewSolrServer("",
|
||||
getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("",
|
||||
getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
if (random().nextBoolean()) {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("name", alias);
|
||||
params.set("action", CollectionAction.DELETEALIAS.toString());
|
||||
QueryRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
server.request(request);
|
||||
client.request(request);
|
||||
} else {
|
||||
DeleteAlias request = new DeleteAlias();
|
||||
request.setAliasName(alias);
|
||||
request.process(server);
|
||||
request.process(client);
|
||||
}
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
protected void indexDoc(List<CloudJettyRunner> skipServers, Object... fields) throws IOException,
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.cloud;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.params.CollectionParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
|
@ -113,14 +113,14 @@ public class AsyncMigrateRouteKeyTest extends MigrateRouteKeyTest {
|
|||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
.getBaseURL();
|
||||
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
|
||||
|
||||
HttpSolrServer baseServer = null;
|
||||
HttpSolrClient baseServer = null;
|
||||
|
||||
try {
|
||||
baseServer = new HttpSolrServer(baseUrl);
|
||||
baseServer = new HttpSolrClient(baseUrl);
|
||||
baseServer.setConnectionTimeout(15000);
|
||||
return baseServer.request(request);
|
||||
} finally {
|
||||
|
|
|
@ -17,23 +17,15 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.impl.client.BasicResponseHandler;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
|
@ -46,7 +38,13 @@ import org.apache.solr.common.params.CommonParams;
|
|||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.handler.ReplicationHandler;
|
||||
import org.apache.solr.util.AbstractSolrTestCase;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* This test simply does a bunch of basic things in solrcloud mode and asserts things
|
||||
|
@ -134,7 +132,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
String nodeName = leaderProps.getStr(ZkStateReader.NODE_NAME_PROP);
|
||||
chaosMonkey.stopShardExcept(SHARD2, nodeName);
|
||||
|
||||
SolrServer client = getClient(nodeName);
|
||||
SolrClient client = getClient(nodeName);
|
||||
|
||||
index_specific(client, "id", docId + 1, t1, "what happens here?");
|
||||
|
||||
|
@ -161,17 +159,17 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
private void testNodeWithoutCollectionForwarding() throws Exception,
|
||||
SolrServerException, IOException {
|
||||
try {
|
||||
final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
|
||||
HttpSolrServer server = new HttpSolrServer(baseUrl);
|
||||
server.setConnectionTimeout(30000);
|
||||
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
|
||||
HttpSolrClient client = new HttpSolrClient(baseUrl);
|
||||
client.setConnectionTimeout(30000);
|
||||
Create createCmd = new Create();
|
||||
createCmd.setRoles("none");
|
||||
createCmd.setCoreName(ONE_NODE_COLLECTION + "core");
|
||||
createCmd.setCollection(ONE_NODE_COLLECTION);
|
||||
createCmd.setNumShards(1);
|
||||
createCmd.setDataDir(getDataDir(createTempDir(ONE_NODE_COLLECTION).toFile().getAbsolutePath()));
|
||||
server.request(createCmd);
|
||||
server.shutdown();
|
||||
client.request(createCmd);
|
||||
client.shutdown();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
fail(e.getMessage());
|
||||
|
@ -183,8 +181,8 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
cloudClient.getZkStateReader().getLeaderRetry(ONE_NODE_COLLECTION, SHARD1, 30000);
|
||||
|
||||
int docs = 2;
|
||||
for (SolrServer client : clients) {
|
||||
final String baseUrl = getBaseUrl((HttpSolrServer) client);
|
||||
for (SolrClient client : clients) {
|
||||
final String baseUrl = getBaseUrl((HttpSolrClient) client);
|
||||
addAndQueryDocs(baseUrl, docs);
|
||||
docs += 2;
|
||||
}
|
||||
|
@ -193,7 +191,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
// 2 docs added every call
|
||||
private void addAndQueryDocs(final String baseUrl, int docs)
|
||||
throws Exception {
|
||||
HttpSolrServer qclient = new HttpSolrServer(baseUrl + "/onenodecollection" + "core");
|
||||
HttpSolrClient qclient = new HttpSolrClient(baseUrl + "/onenodecollection" + "core");
|
||||
|
||||
// it might take a moment for the proxy node to see us in their cloud state
|
||||
waitForNon403or404or503(qclient);
|
||||
|
@ -209,7 +207,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
assertEquals(docs - 1, results.getResults().getNumFound());
|
||||
qclient.shutdown();
|
||||
|
||||
qclient = new HttpSolrServer(baseUrl + "/onenodecollection");
|
||||
qclient = new HttpSolrClient(baseUrl + "/onenodecollection");
|
||||
results = qclient.query(query);
|
||||
assertEquals(docs - 1, results.getResults().getNumFound());
|
||||
|
||||
|
@ -351,7 +349,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
System.err.println(controlClient.query(new SolrQuery("*:*")).getResults()
|
||||
.getNumFound());
|
||||
|
||||
for (SolrServer client : clients) {
|
||||
for (SolrClient client : clients) {
|
||||
try {
|
||||
SolrQuery q = new SolrQuery("*:*");
|
||||
q.set("distrib", false);
|
||||
|
@ -411,7 +409,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
checkShardConsistency(true, false);
|
||||
|
||||
// try a backup command
|
||||
final HttpSolrServer client = (HttpSolrServer) shardToJetty.get(SHARD2).get(0).client.solrClient;
|
||||
final HttpSolrClient client = (HttpSolrClient) shardToJetty.get(SHARD2).get(0).client.solrClient;
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("qt", "/replication");
|
||||
params.set("command", "backup");
|
||||
|
@ -424,7 +422,7 @@ public class BasicDistributedZk2Test extends AbstractFullDistribZkTestBase {
|
|||
checkForBackupSuccess(client, location);
|
||||
}
|
||||
|
||||
private void checkForBackupSuccess(final HttpSolrServer client, File location)
|
||||
private void checkForBackupSuccess(final HttpSolrClient client, File location)
|
||||
throws InterruptedException, IOException {
|
||||
class CheckStatus extends Thread {
|
||||
volatile String fail = null;
|
||||
|
|
|
@ -17,33 +17,16 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CompletionService;
|
||||
import java.util.concurrent.ExecutorCompletionService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.JSONTestUtil;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.AbstractUpdateRequest;
|
||||
import org.apache.solr.client.solrj.request.ContentStreamUpdateRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
|
||||
|
@ -70,6 +53,23 @@ import org.apache.solr.util.DefaultSolrThreadFactory;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.CompletionService;
|
||||
import java.util.concurrent.ExecutorCompletionService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
|
||||
/**
|
||||
* This test simply does a bunch of basic things in solrcloud mode and asserts things
|
||||
|
@ -88,7 +88,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
String oddField="oddField_s";
|
||||
String missingField="ignore_exception__missing_but_valid_field_t";
|
||||
|
||||
private Map<String,List<SolrServer>> otherCollectionClients = new HashMap<>();
|
||||
private Map<String,List<SolrClient>> otherCollectionClients = new HashMap<>();
|
||||
|
||||
private String oneInstanceCollection = "oneInstanceCollection";
|
||||
private String oneInstanceCollection2 = "oneInstanceCollection2";
|
||||
|
@ -163,9 +163,9 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
// ask every individual replica of every shard to update+commit the same doc id
|
||||
// with an incrementing counter on each update+commit
|
||||
int foo_i_counter = 0;
|
||||
for (SolrServer server : clients) {
|
||||
for (SolrClient client : clients) {
|
||||
foo_i_counter++;
|
||||
indexDoc(server, params("commit", "true"), // SOLR-4923
|
||||
indexDoc(client, params("commit", "true"), // SOLR-4923
|
||||
sdoc(id,1, i1,100, tlong,100, "foo_i", foo_i_counter));
|
||||
// after every update+commit, check all the shards consistency
|
||||
queryAndCompareShards(params("q", "id:1", "distrib", "false",
|
||||
|
@ -341,8 +341,8 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
Thread.sleep(100);
|
||||
}
|
||||
|
||||
for (SolrServer client : clients) {
|
||||
assertEquals("commitWithin did not work on node: " + ((HttpSolrServer)client).getBaseURL(), before + 1, client.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
for (SolrClient client : clients) {
|
||||
assertEquals("commitWithin did not work on node: " + ((HttpSolrClient)client).getBaseURL(), before + 1, client.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
}
|
||||
|
||||
// TODO: This test currently fails because debug info is obtained only
|
||||
|
@ -381,14 +381,14 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
createCmd.setSchemaName("nonexistent_schema.xml");
|
||||
|
||||
String url = getBaseUrl(clients.get(0));
|
||||
final HttpSolrServer server = new HttpSolrServer(url);
|
||||
final HttpSolrClient client = new HttpSolrClient(url);
|
||||
try {
|
||||
server.request(createCmd);
|
||||
client.request(createCmd);
|
||||
fail("Expected SolrCore create to fail");
|
||||
} catch (Exception e) {
|
||||
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
long timeout = System.currentTimeMillis() + 15000;
|
||||
|
@ -410,9 +410,9 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
for (String shard : shardToJetty.keySet()) {
|
||||
// every client should give the same numDocs for this shard
|
||||
// shffle the clients in a diff order for each shard
|
||||
List<SolrServer> solrclients = new ArrayList<>(this.clients);
|
||||
List<SolrClient> solrclients = new ArrayList<>(this.clients);
|
||||
Collections.shuffle(solrclients, random());
|
||||
for (SolrServer client : solrclients) {
|
||||
for (SolrClient client : solrclients) {
|
||||
query.set("shards", shard);
|
||||
long numDocs = client.query(query).getResults().getNumFound();
|
||||
assertTrue("numDocs < 0 for shard "+shard+" via "+client,
|
||||
|
@ -484,7 +484,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
String randShards = StringUtils.join(randomShards, ",");
|
||||
query.set("shards", randShards);
|
||||
for (SolrServer client : this.clients) {
|
||||
for (SolrClient client : this.clients) {
|
||||
assertEquals("numDocs for "+randShards+" via "+client,
|
||||
randomShardCountsExpected,
|
||||
client.query(query).getResults().getNumFound());
|
||||
|
@ -496,7 +496,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
for (Long c : shardCounts.values()) {
|
||||
totalShardNumDocs += c;
|
||||
}
|
||||
for (SolrServer client : clients) {
|
||||
for (SolrClient client : clients) {
|
||||
assertEquals("sum of shard numDocs on client: " + client,
|
||||
totalShardNumDocs,
|
||||
client.query(query).getResults().getNumFound());
|
||||
|
@ -507,22 +507,22 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
|
||||
private void testStopAndStartCoresInOneInstance() throws Exception {
|
||||
SolrServer client = clients.get(0);
|
||||
SolrClient client = clients.get(0);
|
||||
String url3 = getBaseUrl(client);
|
||||
final HttpSolrServer server = new HttpSolrServer(url3);
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
final HttpSolrClient httpSolrClient = new HttpSolrClient(url3);
|
||||
httpSolrClient.setConnectionTimeout(15000);
|
||||
httpSolrClient.setSoTimeout(60000);
|
||||
ThreadPoolExecutor executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
|
||||
5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
|
||||
new DefaultSolrThreadFactory("testExecutor"));
|
||||
int cnt = 3;
|
||||
|
||||
// create the cores
|
||||
createCores(server, executor, "multiunload2", 1, cnt);
|
||||
createCores(httpSolrClient, executor, "multiunload2", 1, cnt);
|
||||
|
||||
executor.shutdown();
|
||||
executor.awaitTermination(120, TimeUnit.SECONDS);
|
||||
server.shutdown();
|
||||
httpSolrClient.shutdown();
|
||||
|
||||
ChaosMonkey.stop(cloudJettys.get(0).jetty);
|
||||
printLayout();
|
||||
|
@ -541,7 +541,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
}
|
||||
|
||||
protected void createCores(final HttpSolrServer server,
|
||||
protected void createCores(final HttpSolrClient client,
|
||||
ThreadPoolExecutor executor, final String collection, final int numShards, int cnt) {
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
final int freezeI = i;
|
||||
|
@ -558,7 +558,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
String core3dataDir = createTempDir(collection).toFile().getAbsolutePath();
|
||||
createCmd.setDataDir(getDataDir(core3dataDir));
|
||||
|
||||
server.request(createCmd);
|
||||
client.request(createCmd);
|
||||
} catch (SolrServerException e) {
|
||||
throw new RuntimeException(e);
|
||||
} catch (IOException e) {
|
||||
|
@ -570,17 +570,17 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
}
|
||||
|
||||
protected String getBaseUrl(SolrServer client) {
|
||||
String url2 = ((HttpSolrServer) client).getBaseURL()
|
||||
protected String getBaseUrl(SolrClient client) {
|
||||
String url2 = ((HttpSolrClient) client).getBaseURL()
|
||||
.substring(
|
||||
0,
|
||||
((HttpSolrServer) client).getBaseURL().length()
|
||||
((HttpSolrClient) client).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() -1);
|
||||
return url2;
|
||||
}
|
||||
|
||||
protected CollectionAdminResponse createCollection(Map<String, List<Integer>> collectionInfos,
|
||||
String collectionName, int numShards, int numReplicas, int maxShardsPerNode, SolrServer client, String createNodeSetStr) throws SolrServerException, IOException {
|
||||
String collectionName, int numShards, int numReplicas, int maxShardsPerNode, SolrClient client, String createNodeSetStr) throws SolrServerException, IOException {
|
||||
// TODO: Use CollectionAdminRequest for this test
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionAction.CREATE.toString());
|
||||
|
@ -603,12 +603,12 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
CollectionAdminResponse res = new CollectionAdminResponse();
|
||||
if (client == null) {
|
||||
final String baseUrl = ((HttpSolrServer) clients.get(clientIndex)).getBaseURL().substring(
|
||||
final String baseUrl = ((HttpSolrClient) clients.get(clientIndex)).getBaseURL().substring(
|
||||
0,
|
||||
((HttpSolrServer) clients.get(clientIndex)).getBaseURL().length()
|
||||
((HttpSolrClient) clients.get(clientIndex)).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() - 1);
|
||||
|
||||
SolrServer aClient = createNewSolrServer("", baseUrl);
|
||||
SolrClient aClient = createNewSolrClient("", baseUrl);
|
||||
res.setResponse(aClient.request(request));
|
||||
aClient.shutdown();
|
||||
} else {
|
||||
|
@ -618,7 +618,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
|
||||
protected ZkCoreNodeProps getLeaderUrlFromZk(String collection, String slice) {
|
||||
ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
|
||||
ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
|
||||
ZkNodeProps leader = clusterState.getLeader(collection, slice);
|
||||
if (leader == null) {
|
||||
throw new RuntimeException("Could not find leader:" + collection + " " + slice);
|
||||
|
@ -647,7 +647,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
for (int i = 1; i < numLoops; i++) {
|
||||
// add doc to random client
|
||||
SolrServer updateClient = clients.get(random().nextInt(clients.size()));
|
||||
SolrClient updateClient = clients.get(random().nextInt(clients.size()));
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
addFields(doc, id, i, fieldA, val, fieldB, val);
|
||||
UpdateResponse ures = add(updateClient, updateParams, doc);
|
||||
|
@ -683,7 +683,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
indexDoc(sd);
|
||||
|
||||
ignoreException("version conflict");
|
||||
for (SolrServer client : clients) {
|
||||
for (SolrClient client : clients) {
|
||||
try {
|
||||
client.add(sd);
|
||||
fail();
|
||||
|
@ -700,14 +700,14 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
List<Integer> expected = new ArrayList<>();
|
||||
int val = 0;
|
||||
for (SolrServer client : clients) {
|
||||
for (SolrClient client : clients) {
|
||||
val += 10;
|
||||
client.add(sdoc("id", 1000, "val_i", map("add",val), "foo_i",val));
|
||||
expected.add(val);
|
||||
}
|
||||
|
||||
QueryRequest qr = new QueryRequest(params("qt", "/get", "id","1000"));
|
||||
for (SolrServer client : clients) {
|
||||
for (SolrClient client : clients) {
|
||||
val += 10;
|
||||
NamedList rsp = client.request(qr);
|
||||
String match = JSONTestUtil.matchObj("/val_i", rsp.get("doc"), expected);
|
||||
|
@ -718,7 +718,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
private void testNumberOfCommitsWithCommitAfterAdd()
|
||||
throws SolrServerException, IOException {
|
||||
log.info("### STARTING testNumberOfCommitsWithCommitAfterAdd");
|
||||
long startCommits = getNumCommits((HttpSolrServer) clients.get(0));
|
||||
long startCommits = getNumCommits((HttpSolrClient) clients.get(0));
|
||||
|
||||
ContentStreamUpdateRequest up = new ContentStreamUpdateRequest("/update");
|
||||
up.addFile(getFile("books_numeric_ids.csv"), "application/csv");
|
||||
|
@ -726,38 +726,38 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
up.setAction(AbstractUpdateRequest.ACTION.COMMIT, true, true);
|
||||
NamedList<Object> result = clients.get(0).request(up);
|
||||
|
||||
long endCommits = getNumCommits((HttpSolrServer) clients.get(0));
|
||||
long endCommits = getNumCommits((HttpSolrClient) clients.get(0));
|
||||
|
||||
assertEquals(startCommits + 1L, endCommits);
|
||||
}
|
||||
|
||||
private Long getNumCommits(HttpSolrServer solrServer) throws
|
||||
private Long getNumCommits(HttpSolrClient sourceClient) throws
|
||||
SolrServerException, IOException {
|
||||
HttpSolrServer server = new HttpSolrServer(solrServer.getBaseURL());
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
HttpSolrClient client = new HttpSolrClient(sourceClient.getBaseURL());
|
||||
client.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("qt", "/admin/mbeans?key=updateHandler&stats=true");
|
||||
// use generic request to avoid extra processing of queries
|
||||
QueryRequest req = new QueryRequest(params);
|
||||
NamedList<Object> resp = server.request(req);
|
||||
NamedList<Object> resp = client.request(req);
|
||||
NamedList mbeans = (NamedList) resp.get("solr-mbeans");
|
||||
NamedList uhandlerCat = (NamedList) mbeans.get("UPDATEHANDLER");
|
||||
NamedList uhandler = (NamedList) uhandlerCat.get("updateHandler");
|
||||
NamedList stats = (NamedList) uhandler.get("stats");
|
||||
Long commits = (Long) stats.get("commits");
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
return commits;
|
||||
}
|
||||
|
||||
private void testANewCollectionInOneInstanceWithManualShardAssignement() throws Exception {
|
||||
log.info("### STARTING testANewCollectionInOneInstanceWithManualShardAssignement");
|
||||
System.clearProperty("numShards");
|
||||
List<SolrServer> collectionClients = new ArrayList<>();
|
||||
SolrServer client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring(
|
||||
List<SolrClient> collectionClients = new ArrayList<>();
|
||||
SolrClient client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
|
||||
0,
|
||||
((HttpSolrServer) client).getBaseURL().length()
|
||||
((HttpSolrClient) client).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() - 1);
|
||||
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 1, "slice1");
|
||||
createSolrCore(oneInstanceCollection2, collectionClients, baseUrl, 2, "slice2");
|
||||
|
@ -770,16 +770,16 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
pending.remove(future);
|
||||
}
|
||||
|
||||
SolrServer client1 = collectionClients.get(0);
|
||||
SolrServer client2 = collectionClients.get(1);
|
||||
SolrServer client3 = collectionClients.get(2);
|
||||
SolrServer client4 = collectionClients.get(3);
|
||||
SolrClient client1 = collectionClients.get(0);
|
||||
SolrClient client2 = collectionClients.get(1);
|
||||
SolrClient client3 = collectionClients.get(2);
|
||||
SolrClient client4 = collectionClients.get(3);
|
||||
|
||||
|
||||
// no one should be recovering
|
||||
waitForRecoveriesToFinish(oneInstanceCollection2, getCommonCloudSolrServer().getZkStateReader(), false, true);
|
||||
waitForRecoveriesToFinish(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader(), false, true);
|
||||
|
||||
assertAllActive(oneInstanceCollection2, getCommonCloudSolrServer().getZkStateReader());
|
||||
assertAllActive(oneInstanceCollection2, getCommonCloudSolrClient().getZkStateReader());
|
||||
|
||||
//printLayout();
|
||||
|
||||
|
@ -800,7 +800,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
query.set("collection", oneInstanceCollection2);
|
||||
query.set("distrib", true);
|
||||
long allDocs = getCommonCloudSolrServer().query(query).getResults().getNumFound();
|
||||
long allDocs = getCommonCloudSolrClient().query(query).getResults().getNumFound();
|
||||
|
||||
// System.out.println("1:" + oneDocs);
|
||||
// System.out.println("2:" + twoDocs);
|
||||
|
@ -814,7 +814,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
assertEquals(3, allDocs);
|
||||
|
||||
// we added a role of none on these creates - check for it
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
zkStateReader.updateClusterState(true);
|
||||
Map<String,Slice> slices = zkStateReader.getClusterState().getSlicesMap(oneInstanceCollection2);
|
||||
assertNotNull(slices);
|
||||
|
@ -822,19 +822,19 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
assertEquals("none", roles);
|
||||
|
||||
|
||||
ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrServer().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "slice1"));
|
||||
ZkCoreNodeProps props = new ZkCoreNodeProps(getCommonCloudSolrClient().getZkStateReader().getClusterState().getLeader(oneInstanceCollection2, "slice1"));
|
||||
|
||||
// now test that unloading a core gets us a new leader
|
||||
HttpSolrServer server = new HttpSolrServer(baseUrl);
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
HttpSolrClient unloadClient = new HttpSolrClient(baseUrl);
|
||||
unloadClient.setConnectionTimeout(15000);
|
||||
unloadClient.setSoTimeout(60000);
|
||||
Unload unloadCmd = new Unload(true);
|
||||
unloadCmd.setCoreName(props.getCoreName());
|
||||
|
||||
String leader = props.getCoreUrl();
|
||||
|
||||
server.request(unloadCmd);
|
||||
server.shutdown();
|
||||
unloadClient.request(unloadCmd);
|
||||
unloadClient.shutdown();
|
||||
|
||||
int tries = 50;
|
||||
while (leader.equals(zkStateReader.getLeaderUrl(oneInstanceCollection2, "slice1", 10000))) {
|
||||
|
@ -844,7 +844,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
}
|
||||
|
||||
for (SolrServer aClient : collectionClients) {
|
||||
for (SolrClient aClient : collectionClients) {
|
||||
aClient.shutdown();
|
||||
}
|
||||
|
||||
|
@ -852,15 +852,15 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
private void testSearchByCollectionName() throws SolrServerException {
|
||||
log.info("### STARTING testSearchByCollectionName");
|
||||
SolrServer client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring(
|
||||
SolrClient client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
|
||||
0,
|
||||
((HttpSolrServer) client).getBaseURL().length()
|
||||
((HttpSolrClient) client).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() - 1);
|
||||
|
||||
// the cores each have different names, but if we add the collection name to the url
|
||||
// we should get mapped to the right core
|
||||
SolrServer client1 = createNewSolrServer(oneInstanceCollection, baseUrl);
|
||||
SolrClient client1 = createNewSolrClient(oneInstanceCollection, baseUrl);
|
||||
SolrQuery query = new SolrQuery("*:*");
|
||||
long oneDocs = client1.query(query).getResults().getNumFound();
|
||||
assertEquals(3, oneDocs);
|
||||
|
@ -869,27 +869,27 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
private void testUpdateByCollectionName() throws SolrServerException, IOException {
|
||||
log.info("### STARTING testUpdateByCollectionName");
|
||||
SolrServer client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring(
|
||||
SolrClient client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
|
||||
0,
|
||||
((HttpSolrServer) client).getBaseURL().length()
|
||||
((HttpSolrClient) client).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() - 1);
|
||||
|
||||
// the cores each have different names, but if we add the collection name to the url
|
||||
// we should get mapped to the right core
|
||||
// test hitting an update url
|
||||
SolrServer client1 = createNewSolrServer(oneInstanceCollection, baseUrl);
|
||||
SolrClient client1 = createNewSolrClient(oneInstanceCollection, baseUrl);
|
||||
client1.commit();
|
||||
client1.shutdown();
|
||||
}
|
||||
|
||||
private void testANewCollectionInOneInstance() throws Exception {
|
||||
log.info("### STARTING testANewCollectionInOneInstance");
|
||||
List<SolrServer> collectionClients = new ArrayList<>();
|
||||
SolrServer client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrServer) client).getBaseURL().substring(
|
||||
List<SolrClient> collectionClients = new ArrayList<>();
|
||||
SolrClient client = clients.get(0);
|
||||
final String baseUrl = ((HttpSolrClient) client).getBaseURL().substring(
|
||||
0,
|
||||
((HttpSolrServer) client).getBaseURL().length()
|
||||
((HttpSolrClient) client).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() - 1);
|
||||
createCollection(oneInstanceCollection, collectionClients, baseUrl, 1);
|
||||
createCollection(oneInstanceCollection, collectionClients, baseUrl, 2);
|
||||
|
@ -903,13 +903,13 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
pending.remove(future);
|
||||
}
|
||||
|
||||
SolrServer client1 = collectionClients.get(0);
|
||||
SolrServer client2 = collectionClients.get(1);
|
||||
SolrServer client3 = collectionClients.get(2);
|
||||
SolrServer client4 = collectionClients.get(3);
|
||||
SolrClient client1 = collectionClients.get(0);
|
||||
SolrClient client2 = collectionClients.get(1);
|
||||
SolrClient client3 = collectionClients.get(2);
|
||||
SolrClient client4 = collectionClients.get(3);
|
||||
|
||||
waitForRecoveriesToFinish(oneInstanceCollection, getCommonCloudSolrServer().getZkStateReader(), false);
|
||||
assertAllActive(oneInstanceCollection, getCommonCloudSolrServer().getZkStateReader());
|
||||
waitForRecoveriesToFinish(oneInstanceCollection, getCommonCloudSolrClient().getZkStateReader(), false);
|
||||
assertAllActive(oneInstanceCollection, getCommonCloudSolrClient().getZkStateReader());
|
||||
|
||||
client2.add(getDoc(id, "1"));
|
||||
client3.add(getDoc(id, "2"));
|
||||
|
@ -925,7 +925,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
query.set("collection", oneInstanceCollection);
|
||||
query.set("distrib", true);
|
||||
long allDocs = getCommonCloudSolrServer().query(query).getResults().getNumFound();
|
||||
long allDocs = getCommonCloudSolrClient().query(query).getResults().getNumFound();
|
||||
|
||||
// System.out.println("1:" + oneDocs);
|
||||
// System.out.println("2:" + twoDocs);
|
||||
|
@ -934,26 +934,26 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
// System.out.println("All Docs:" + allDocs);
|
||||
|
||||
assertEquals(3, allDocs);
|
||||
for(SolrServer newCollectionClient:collectionClients) {
|
||||
for(SolrClient newCollectionClient:collectionClients) {
|
||||
newCollectionClient.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
private void createCollection(String collection,
|
||||
List<SolrServer> collectionClients, String baseUrl, int num) {
|
||||
List<SolrClient> collectionClients, String baseUrl, int num) {
|
||||
createSolrCore(collection, collectionClients, baseUrl, num, null);
|
||||
}
|
||||
|
||||
private void createSolrCore(final String collection,
|
||||
List<SolrServer> collectionClients, final String baseUrl, final int num,
|
||||
List<SolrClient> collectionClients, final String baseUrl, final int num,
|
||||
final String shardId) {
|
||||
Callable call = new Callable() {
|
||||
@Override
|
||||
public Object call() {
|
||||
HttpSolrServer server = null;
|
||||
HttpSolrClient client = null;
|
||||
try {
|
||||
server = new HttpSolrServer(baseUrl);
|
||||
server.setConnectionTimeout(15000);
|
||||
client = new HttpSolrClient(baseUrl);
|
||||
client.setConnectionTimeout(15000);
|
||||
Create createCmd = new Create();
|
||||
createCmd.setRoles("none");
|
||||
createCmd.setCoreName(collection + num);
|
||||
|
@ -971,13 +971,13 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
if (shardId != null) {
|
||||
createCmd.setShardId(shardId);
|
||||
}
|
||||
server.request(createCmd);
|
||||
client.request(createCmd);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
//fail
|
||||
} finally {
|
||||
if (server != null) {
|
||||
server.shutdown();
|
||||
if (client != null) {
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
|
@ -987,7 +987,7 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
pending.add(completionService.submit(call));
|
||||
|
||||
|
||||
collectionClients.add(createNewSolrServer(collection + num, baseUrl));
|
||||
collectionClients.add(createNewSolrClient(collection + num, baseUrl));
|
||||
}
|
||||
|
||||
private void testMultipleCollections() throws Exception {
|
||||
|
@ -1006,21 +1006,21 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
indexDoc("collection2", getDoc(id, "10000000"));
|
||||
indexDoc("collection2", getDoc(id, "10000001"));
|
||||
indexDoc("collection2", getDoc(id, "10000003"));
|
||||
getCommonCloudSolrServer().setDefaultCollection("collection2");
|
||||
getCommonCloudSolrServer().add(getDoc(id, "10000004"));
|
||||
getCommonCloudSolrServer().setDefaultCollection(null);
|
||||
getCommonCloudSolrClient().setDefaultCollection("collection2");
|
||||
getCommonCloudSolrClient().add(getDoc(id, "10000004"));
|
||||
getCommonCloudSolrClient().setDefaultCollection(null);
|
||||
|
||||
indexDoc("collection3", getDoc(id, "20000000"));
|
||||
indexDoc("collection3", getDoc(id, "20000001"));
|
||||
getCommonCloudSolrServer().setDefaultCollection("collection3");
|
||||
getCommonCloudSolrServer().add(getDoc(id, "10000005"));
|
||||
getCommonCloudSolrServer().setDefaultCollection(null);
|
||||
getCommonCloudSolrClient().setDefaultCollection("collection3");
|
||||
getCommonCloudSolrClient().add(getDoc(id, "10000005"));
|
||||
getCommonCloudSolrClient().setDefaultCollection(null);
|
||||
|
||||
otherCollectionClients.get("collection2").get(0).commit();
|
||||
otherCollectionClients.get("collection3").get(0).commit();
|
||||
|
||||
getCommonCloudSolrServer().setDefaultCollection("collection1");
|
||||
long collection1Docs = getCommonCloudSolrServer().query(new SolrQuery("*:*")).getResults()
|
||||
getCommonCloudSolrClient().setDefaultCollection("collection1");
|
||||
long collection1Docs = getCommonCloudSolrClient().query(new SolrQuery("*:*")).getResults()
|
||||
.getNumFound();
|
||||
|
||||
long collection2Docs = otherCollectionClients.get("collection2").get(0)
|
||||
|
@ -1041,19 +1041,19 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
assertEquals(collection1Docs + collection2Docs + collection3Docs, found);
|
||||
|
||||
// try to search multiple with cloud client
|
||||
found = getCommonCloudSolrServer().query(query).getResults().getNumFound();
|
||||
found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
|
||||
assertEquals(collection1Docs + collection2Docs + collection3Docs, found);
|
||||
|
||||
query.set("collection", "collection2,collection3");
|
||||
found = getCommonCloudSolrServer().query(query).getResults().getNumFound();
|
||||
found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
|
||||
assertEquals(collection2Docs + collection3Docs, found);
|
||||
|
||||
query.set("collection", "collection3");
|
||||
found = getCommonCloudSolrServer().query(query).getResults().getNumFound();
|
||||
found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
|
||||
assertEquals(collection3Docs, found);
|
||||
|
||||
query.remove("collection");
|
||||
found = getCommonCloudSolrServer().query(query).getResults().getNumFound();
|
||||
found = getCommonCloudSolrClient().query(query).getResults().getNumFound();
|
||||
assertEquals(collection1Docs, found);
|
||||
|
||||
assertEquals(collection3Docs, collection2Docs - 1);
|
||||
|
@ -1066,49 +1066,49 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
|
||||
protected void indexDoc(String collection, SolrInputDocument doc) throws IOException, SolrServerException {
|
||||
List<SolrServer> clients = otherCollectionClients.get(collection);
|
||||
List<SolrClient> clients = otherCollectionClients.get(collection);
|
||||
int which = (doc.getField(id).toString().hashCode() & 0x7fffffff) % clients.size();
|
||||
SolrServer client = clients.get(which);
|
||||
SolrClient client = clients.get(which);
|
||||
client.add(doc);
|
||||
}
|
||||
|
||||
private void createNewCollection(final String collection) throws InterruptedException {
|
||||
final List<SolrServer> collectionClients = new ArrayList<>();
|
||||
final List<SolrClient> collectionClients = new ArrayList<>();
|
||||
otherCollectionClients.put(collection, collectionClients);
|
||||
int unique = 0;
|
||||
for (final SolrServer client : clients) {
|
||||
for (final SolrClient client : clients) {
|
||||
unique++;
|
||||
final String baseUrl = ((HttpSolrServer) client).getBaseURL()
|
||||
final String baseUrl = ((HttpSolrClient) client).getBaseURL()
|
||||
.substring(
|
||||
0,
|
||||
((HttpSolrServer) client).getBaseURL().length()
|
||||
((HttpSolrClient) client).getBaseURL().length()
|
||||
- DEFAULT_COLLECTION.length() -1);
|
||||
final int frozeUnique = unique;
|
||||
Callable call = new Callable() {
|
||||
@Override
|
||||
public Object call() {
|
||||
HttpSolrServer server = null;
|
||||
HttpSolrClient client = null;
|
||||
try {
|
||||
server = new HttpSolrServer(baseUrl);
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
client = new HttpSolrClient(baseUrl);
|
||||
client.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
Create createCmd = new Create();
|
||||
createCmd.setCoreName(collection);
|
||||
createCmd.setDataDir(getDataDir(createTempDir(collection).toFile().getAbsolutePath()));
|
||||
server.request(createCmd);
|
||||
client.request(createCmd);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
//fails
|
||||
} finally {
|
||||
if (server != null) {
|
||||
server.shutdown();
|
||||
if (client != null) {
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
collectionClients.add(createNewSolrServer(collection, baseUrl));
|
||||
collectionClients.add(createNewSolrClient(collection, baseUrl));
|
||||
pending.add(completionService.submit(call));
|
||||
while (pending != null && pending.size() > 0) {
|
||||
|
||||
|
@ -1119,33 +1119,33 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
}
|
||||
|
||||
protected SolrServer createNewSolrServer(String collection, String baseUrl) {
|
||||
protected SolrClient createNewSolrClient(String collection, String baseUrl) {
|
||||
try {
|
||||
// setup the server...
|
||||
HttpSolrServer s = new HttpSolrServer(baseUrl + "/" + collection);
|
||||
s.setSoTimeout(120000);
|
||||
s.setDefaultMaxConnectionsPerHost(100);
|
||||
s.setMaxTotalConnections(100);
|
||||
return s;
|
||||
HttpSolrClient client = new HttpSolrClient(baseUrl + "/" + collection);
|
||||
client.setSoTimeout(120000);
|
||||
client.setDefaultMaxConnectionsPerHost(100);
|
||||
client.setMaxTotalConnections(100);
|
||||
return client;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
volatile CloudSolrServer commondCloudSolrServer;
|
||||
protected CloudSolrServer getCommonCloudSolrServer() {
|
||||
if (commondCloudSolrServer == null) {
|
||||
volatile CloudSolrClient commondCloudSolrClient;
|
||||
protected CloudSolrClient getCommonCloudSolrClient() {
|
||||
if (commondCloudSolrClient == null) {
|
||||
synchronized(this) {
|
||||
commondCloudSolrServer = new CloudSolrServer(zkServer.getZkAddress(), random().nextBoolean());
|
||||
commondCloudSolrServer.setParallelUpdates(random().nextBoolean());
|
||||
commondCloudSolrServer.setDefaultCollection(DEFAULT_COLLECTION);
|
||||
commondCloudSolrServer.getLbServer().setConnectionTimeout(15000);
|
||||
commondCloudSolrServer.getLbServer().setSoTimeout(30000);
|
||||
commondCloudSolrServer.connect();
|
||||
commondCloudSolrClient = new CloudSolrClient(zkServer.getZkAddress(), random().nextBoolean());
|
||||
commondCloudSolrClient.setParallelUpdates(random().nextBoolean());
|
||||
commondCloudSolrClient.setDefaultCollection(DEFAULT_COLLECTION);
|
||||
commondCloudSolrClient.getLbClient().setConnectionTimeout(15000);
|
||||
commondCloudSolrClient.getLbClient().setSoTimeout(30000);
|
||||
commondCloudSolrClient.connect();
|
||||
}
|
||||
}
|
||||
return commondCloudSolrServer;
|
||||
return commondCloudSolrClient;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1157,19 +1157,19 @@ public class BasicDistributedZkTest extends AbstractFullDistribZkTestBase {
|
|||
if (r.nextBoolean())
|
||||
params.set("collection",DEFAULT_COLLECTION);
|
||||
|
||||
QueryResponse rsp = getCommonCloudSolrServer().query(params);
|
||||
QueryResponse rsp = getCommonCloudSolrClient().query(params);
|
||||
return rsp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
if (commondCloudSolrServer != null) {
|
||||
commondCloudSolrServer.shutdown();
|
||||
if (commondCloudSolrClient != null) {
|
||||
commondCloudSolrClient.shutdown();
|
||||
}
|
||||
if (otherCollectionClients != null) {
|
||||
for (List<SolrServer> clientList : otherCollectionClients.values()) {
|
||||
for (SolrServer client : clientList) {
|
||||
for (List<SolrClient> clientList : otherCollectionClients.values()) {
|
||||
for (SolrClient client : clientList) {
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,22 +17,16 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.net.ConnectException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
|
||||
import org.apache.http.client.HttpClient;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.core.Diagnostics;
|
||||
|
@ -44,7 +38,12 @@ import org.junit.BeforeClass;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
|
||||
import java.net.ConnectException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
@Slow
|
||||
@SuppressSSL
|
||||
|
@ -253,7 +252,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
zkServer.run();
|
||||
}
|
||||
|
||||
CloudSolrServer client = createCloudClient("collection1");
|
||||
CloudSolrClient client = createCloudClient("collection1");
|
||||
try {
|
||||
createCollection(null, "testcollection",
|
||||
1, 1, 1, client, null, "conf1");
|
||||
|
@ -294,11 +293,11 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
private HttpClient httpClient = HttpClientUtil.createClient(null);
|
||||
private volatile boolean stop = false;
|
||||
int clientIndex = 0;
|
||||
private ConcurrentUpdateSolrServer suss;
|
||||
private List<SolrServer> clients;
|
||||
private ConcurrentUpdateSolrClient cusc;
|
||||
private List<SolrClient> clients;
|
||||
private AtomicInteger fails = new AtomicInteger();
|
||||
|
||||
public FullThrottleStopableIndexingThread(List<SolrServer> clients,
|
||||
public FullThrottleStopableIndexingThread(List<SolrClient> clients,
|
||||
String id, boolean doDeletes) {
|
||||
super(controlClient, cloudClient, id, doDeletes);
|
||||
setName("FullThrottleStopableIndexingThread");
|
||||
|
@ -306,12 +305,12 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
this.clients = clients;
|
||||
HttpClientUtil.setConnectionTimeout(httpClient, 15000);
|
||||
HttpClientUtil.setSoTimeout(httpClient, 15000);
|
||||
suss = new ConcurrentUpdateSolrServer(
|
||||
((HttpSolrServer) clients.get(0)).getBaseURL(), httpClient, 8,
|
||||
cusc = new ConcurrentUpdateSolrClient(
|
||||
((HttpSolrClient) clients.get(0)).getBaseURL(), httpClient, 8,
|
||||
2) {
|
||||
@Override
|
||||
public void handleError(Throwable ex) {
|
||||
log.warn("suss error", ex);
|
||||
log.warn("cusc error", ex);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -330,7 +329,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
String delete = deletes.remove(0);
|
||||
try {
|
||||
numDeletes++;
|
||||
suss.deleteById(delete);
|
||||
cusc.deleteById(delete);
|
||||
} catch (Exception e) {
|
||||
changeUrlOnError(e);
|
||||
//System.err.println("REQUEST FAILED:");
|
||||
|
@ -350,7 +349,7 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
50,
|
||||
t1,
|
||||
"Saxon heptarchies that used to rip around so in old times and raise Cain. My, you ought to seen old Henry the Eight when he was in bloom. He WAS a blossom. He used to marry a new wife every day, and chop off her head next morning. And he would do it just as indifferent as if ");
|
||||
suss.add(doc);
|
||||
cusc.add(doc);
|
||||
} catch (Exception e) {
|
||||
changeUrlOnError(e);
|
||||
//System.err.println("REQUEST FAILED:");
|
||||
|
@ -373,13 +372,13 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
if (clientIndex > clients.size() - 1) {
|
||||
clientIndex = 0;
|
||||
}
|
||||
suss.shutdownNow();
|
||||
suss = new ConcurrentUpdateSolrServer(
|
||||
((HttpSolrServer) clients.get(clientIndex)).getBaseURL(),
|
||||
cusc.shutdownNow();
|
||||
cusc = new ConcurrentUpdateSolrClient(
|
||||
((HttpSolrClient) clients.get(clientIndex)).getBaseURL(),
|
||||
httpClient, 30, 3) {
|
||||
@Override
|
||||
public void handleError(Throwable ex) {
|
||||
log.warn("suss error", ex);
|
||||
log.warn("cusc error", ex);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -388,8 +387,8 @@ public class ChaosMonkeyNothingIsSafeTest extends AbstractFullDistribZkTestBase
|
|||
@Override
|
||||
public void safeStop() {
|
||||
stop = true;
|
||||
suss.blockUntilFinished();
|
||||
suss.shutdownNow();
|
||||
cusc.blockUntilFinished();
|
||||
cusc.shutdownNow();
|
||||
httpClient.getConnectionManager().shutdown();
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.core.Diagnostics;
|
||||
import org.apache.solr.update.SolrCmdDistributor;
|
||||
|
@ -172,7 +172,7 @@ public class ChaosMonkeySafeLeaderTest extends AbstractFullDistribZkTestBase {
|
|||
zkServer.run();
|
||||
}
|
||||
|
||||
CloudSolrServer client = createCloudClient("collection1");
|
||||
CloudSolrClient client = createCloudClient("collection1");
|
||||
try {
|
||||
createCollection(null, "testcollection",
|
||||
1, 1, 1, client, null, "conf1");
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.solr.cloud;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
|
@ -77,7 +77,7 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
|
|||
Thread indexThread = null;
|
||||
OverseerRestarter killer = null;
|
||||
Thread killerThread = null;
|
||||
final SolrServer solrServer = clients.get(0);
|
||||
final SolrClient solrClient = clients.get(0);
|
||||
|
||||
try {
|
||||
del("*:*");
|
||||
|
@ -146,8 +146,8 @@ public class ChaosMonkeyShardSplitTest extends ShardSplitTest {
|
|||
} finally {
|
||||
if (indexThread != null)
|
||||
indexThread.join();
|
||||
if (solrServer != null)
|
||||
solrServer.commit();
|
||||
if (solrClient != null)
|
||||
solrClient.commit();
|
||||
if (killer != null) {
|
||||
killer.run = false;
|
||||
if (killerThread != null) {
|
||||
|
|
|
@ -18,9 +18,9 @@ package org.apache.solr.cloud;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.RequestStatus;
|
||||
|
@ -69,16 +69,16 @@ public class CollectionsAPIAsyncDistributedZkTest extends AbstractFullDistribZkT
|
|||
}
|
||||
|
||||
private void testSolrJAPICalls() throws Exception {
|
||||
SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
|
||||
Create createCollectionRequest = new Create();
|
||||
createCollectionRequest.setCollectionName("testasynccollectioncreation");
|
||||
createCollectionRequest.setNumShards(1);
|
||||
createCollectionRequest.setConfigName("conf1");
|
||||
createCollectionRequest.setAsyncId("1001");
|
||||
createCollectionRequest.process(server);
|
||||
createCollectionRequest.process(client);
|
||||
|
||||
String state = getRequestStateAfterCompletion("1001", MAX_TIMEOUT_SECONDS, server);
|
||||
String state = getRequestStateAfterCompletion("1001", MAX_TIMEOUT_SECONDS, client);
|
||||
|
||||
assertEquals("CreateCollection task did not complete!", "completed", state);
|
||||
|
||||
|
@ -88,9 +88,9 @@ public class CollectionsAPIAsyncDistributedZkTest extends AbstractFullDistribZkT
|
|||
createCollectionRequest.setNumShards(1);
|
||||
createCollectionRequest.setConfigName("conf1");
|
||||
createCollectionRequest.setAsyncId("1002");
|
||||
createCollectionRequest.process(server);
|
||||
createCollectionRequest.process(client);
|
||||
|
||||
state = getRequestStateAfterCompletion("1002", MAX_TIMEOUT_SECONDS, server);
|
||||
state = getRequestStateAfterCompletion("1002", MAX_TIMEOUT_SECONDS, client);
|
||||
|
||||
assertEquals("Recreating a collection with the same name didn't fail, should have.", "failed", state);
|
||||
|
||||
|
@ -98,8 +98,8 @@ public class CollectionsAPIAsyncDistributedZkTest extends AbstractFullDistribZkT
|
|||
addReplica.setCollectionName("testasynccollectioncreation");
|
||||
addReplica.setShardName("shard1");
|
||||
addReplica.setAsyncId("1003");
|
||||
server.request(addReplica);
|
||||
state = getRequestStateAfterCompletion("1003", MAX_TIMEOUT_SECONDS, server);
|
||||
client.request(addReplica);
|
||||
state = getRequestStateAfterCompletion("1003", MAX_TIMEOUT_SECONDS, client);
|
||||
assertEquals("Add replica did not complete", "completed", state);
|
||||
|
||||
|
||||
|
@ -107,18 +107,18 @@ public class CollectionsAPIAsyncDistributedZkTest extends AbstractFullDistribZkT
|
|||
splitShardRequest.setCollectionName("testasynccollectioncreation");
|
||||
splitShardRequest.setShardName("shard1");
|
||||
splitShardRequest.setAsyncId("1004");
|
||||
splitShardRequest.process(server);
|
||||
splitShardRequest.process(client);
|
||||
|
||||
state = getRequestStateAfterCompletion("1004", MAX_TIMEOUT_SECONDS * 2, server);
|
||||
state = getRequestStateAfterCompletion("1004", MAX_TIMEOUT_SECONDS * 2, client);
|
||||
|
||||
assertEquals("Shard split did not complete. Last recorded state: " + state, "completed", state);
|
||||
}
|
||||
|
||||
private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrServer server)
|
||||
private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client)
|
||||
throws IOException, SolrServerException {
|
||||
String state = null;
|
||||
while(waitForSeconds-- > 0) {
|
||||
state = getRequestState(requestId, server);
|
||||
state = getRequestState(requestId, client);
|
||||
if(state.equals("completed") || state.equals("failed"))
|
||||
return state;
|
||||
try {
|
||||
|
@ -129,10 +129,10 @@ public class CollectionsAPIAsyncDistributedZkTest extends AbstractFullDistribZkT
|
|||
return state;
|
||||
}
|
||||
|
||||
private String getRequestState(String requestId, SolrServer server) throws IOException, SolrServerException {
|
||||
private String getRequestState(String requestId, SolrClient client) throws IOException, SolrServerException {
|
||||
RequestStatus request = new RequestStatus();
|
||||
request.setRequestId(requestId);
|
||||
CollectionAdminResponse response = request.process(server);
|
||||
CollectionAdminResponse response = request.process(client);
|
||||
NamedList innerResponse = (NamedList) response.getResponse().get("status");
|
||||
return (String) innerResponse.get("state");
|
||||
}
|
||||
|
|
|
@ -17,39 +17,15 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MBeanServerFactory;
|
||||
import javax.management.ObjectName;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CompletionService;
|
||||
import java.util.concurrent.ExecutorCompletionService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer.RemoteSolrException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient.RemoteSolrException;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
|
||||
|
@ -84,6 +60,31 @@ import org.apache.solr.util.DefaultSolrThreadFactory;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MBeanServerFactory;
|
||||
import javax.management.ObjectName;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CompletionService;
|
||||
import java.util.concurrent.ExecutorCompletionService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
|
||||
import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
|
||||
|
@ -213,7 +214,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
private void deleteCollectionRemovesStaleZkCollectionsNode() throws Exception {
|
||||
|
||||
// we can use this client because we just want base url
|
||||
final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
|
||||
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
|
||||
|
||||
String collectionName = "out_of_sync_collection";
|
||||
|
||||
|
@ -230,7 +231,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
QueryRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
try {
|
||||
NamedList<Object> resp = createNewSolrServer("", baseUrl)
|
||||
NamedList<Object> resp = createNewSolrClient("", baseUrl)
|
||||
.request(request);
|
||||
fail("Expected to fail, because collection is not in clusterstate");
|
||||
} catch (RemoteSolrException e) {
|
||||
|
@ -244,7 +245,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
}
|
||||
|
||||
private void deletePartiallyCreatedCollection() throws Exception {
|
||||
final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
|
||||
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
|
||||
String collectionName = "halfdeletedcollection";
|
||||
Create createCmd = new Create();
|
||||
createCmd.setCoreName("halfdeletedcollection_shard1_replica1");
|
||||
|
@ -255,7 +256,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
if (secondConfigSet) {
|
||||
createCmd.setCollectionConfigName("conf1");
|
||||
}
|
||||
createNewSolrServer("", baseUrl).request(createCmd);
|
||||
createNewSolrClient("", baseUrl).request(createCmd);
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionAction.DELETE.toString());
|
||||
|
@ -263,7 +264,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
QueryRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
NamedList<Object> resp = createNewSolrServer("", baseUrl).request(request);
|
||||
NamedList<Object> resp = createNewSolrClient("", baseUrl).request(request);
|
||||
|
||||
checkForMissingCollection(collectionName);
|
||||
|
||||
|
@ -277,19 +278,19 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
if (secondConfigSet) {
|
||||
params.set("collection.configName", "conf1");
|
||||
}
|
||||
resp = createNewSolrServer("", baseUrl).request(request);
|
||||
resp = createNewSolrClient("", baseUrl).request(request);
|
||||
}
|
||||
|
||||
|
||||
private void deleteCollectionWithDownNodes() throws Exception {
|
||||
String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
|
||||
String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
|
||||
// now try to remove a collection when a couple of its nodes are down
|
||||
if (secondConfigSet) {
|
||||
createCollection(null, "halfdeletedcollection2", 3, 3, 6,
|
||||
createNewSolrServer("", baseUrl), null, "conf2");
|
||||
createNewSolrClient("", baseUrl), null, "conf2");
|
||||
} else {
|
||||
createCollection(null, "halfdeletedcollection2", 3, 3, 6,
|
||||
createNewSolrServer("", baseUrl), null);
|
||||
createNewSolrClient("", baseUrl), null);
|
||||
}
|
||||
|
||||
waitForRecoveriesToFinish("halfdeletedcollection2", false);
|
||||
|
@ -303,7 +304,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
cloudClient.getZkStateReader().getLeaderRetry("halfdeletedcollection2", "shard" + i, 30000);
|
||||
}
|
||||
|
||||
baseUrl = getBaseUrl((HttpSolrServer) clients.get(2));
|
||||
baseUrl = getBaseUrl((HttpSolrClient) clients.get(2));
|
||||
|
||||
// remove a collection
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
@ -312,7 +313,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
QueryRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
createNewSolrClient("", baseUrl).request(request);
|
||||
|
||||
long timeout = System.currentTimeMillis() + 10000;
|
||||
while (cloudClient.getZkStateReader().getClusterState().hasCollection("halfdeletedcollection2")) {
|
||||
|
@ -329,7 +330,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
}
|
||||
|
||||
private void testErrorHandling() throws Exception {
|
||||
final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
|
||||
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
|
||||
|
||||
|
||||
// try a bad action
|
||||
|
@ -343,7 +344,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
boolean gotExp = false;
|
||||
NamedList<Object> resp = null;
|
||||
try {
|
||||
resp = createNewSolrServer("", baseUrl).request(request);
|
||||
resp = createNewSolrClient("", baseUrl).request(request);
|
||||
} catch (SolrException e) {
|
||||
gotExp = true;
|
||||
}
|
||||
|
@ -365,7 +366,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
gotExp = false;
|
||||
resp = null;
|
||||
try {
|
||||
resp = createNewSolrServer("", baseUrl).request(request);
|
||||
resp = createNewSolrClient("", baseUrl).request(request);
|
||||
} catch (SolrException e) {
|
||||
gotExp = true;
|
||||
}
|
||||
|
@ -385,7 +386,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
request.setPath("/admin/collections");
|
||||
gotExp = false;
|
||||
try {
|
||||
resp = createNewSolrServer("", baseUrl).request(request);
|
||||
resp = createNewSolrClient("", baseUrl).request(request);
|
||||
} catch (SolrException e) {
|
||||
gotExp = true;
|
||||
}
|
||||
|
@ -405,7 +406,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
gotExp = false;
|
||||
resp = null;
|
||||
try {
|
||||
resp = createNewSolrServer("", baseUrl).request(request);
|
||||
resp = createNewSolrClient("", baseUrl).request(request);
|
||||
} catch (SolrException e) {
|
||||
gotExp = true;
|
||||
}
|
||||
|
@ -426,7 +427,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
gotExp = false;
|
||||
resp = null;
|
||||
try {
|
||||
resp = createNewSolrServer("", baseUrl).request(request);
|
||||
resp = createNewSolrClient("", baseUrl).request(request);
|
||||
} catch (SolrException e) {
|
||||
gotExp = true;
|
||||
}
|
||||
|
@ -445,7 +446,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
if (secondConfigSet) {
|
||||
createCmd.setCollectionConfigName("conf1");
|
||||
}
|
||||
createNewSolrServer("", baseUrl).request(createCmd);
|
||||
createNewSolrClient("", baseUrl).request(createCmd);
|
||||
|
||||
createCmd = new Create();
|
||||
createCmd.setCoreName("halfcollection_shard1_replica1");
|
||||
|
@ -456,7 +457,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
if (secondConfigSet) {
|
||||
createCmd.setCollectionConfigName("conf1");
|
||||
}
|
||||
createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(1))).request(createCmd);
|
||||
createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(1))).request(createCmd);
|
||||
|
||||
params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionAction.CREATE.toString());
|
||||
|
@ -476,7 +477,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
gotExp = false;
|
||||
resp = createNewSolrServer("", baseUrl).request(request);
|
||||
resp = createNewSolrClient("", baseUrl).request(request);
|
||||
|
||||
SimpleOrderedMap success = (SimpleOrderedMap) resp.get("success");
|
||||
SimpleOrderedMap failure = (SimpleOrderedMap) resp.get("failure");
|
||||
|
@ -506,14 +507,14 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
createCmd.setCollectionConfigName("conf1");
|
||||
}
|
||||
|
||||
createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(1)))
|
||||
createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(1)))
|
||||
.request(createCmd);
|
||||
|
||||
// try and create a SolrCore with no collection name
|
||||
createCmd.setCollection(null);
|
||||
createCmd.setCoreName("corewithnocollection2");
|
||||
|
||||
createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(1)))
|
||||
createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(1)))
|
||||
.request(createCmd);
|
||||
|
||||
// in both cases, the collection should have default to the core name
|
||||
|
@ -524,7 +525,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
private void testNodesUsedByCreate() throws Exception {
|
||||
// we can use this client because we just want base url
|
||||
final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
|
||||
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionAction.CREATE.toString());
|
||||
|
@ -541,7 +542,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
QueryRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
createNewSolrClient("", baseUrl).request(request);
|
||||
|
||||
List<Integer> numShardsNumReplicaList = new ArrayList<>();
|
||||
numShardsNumReplicaList.add(2);
|
||||
|
@ -572,7 +573,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
private void testCollectionsAPI() throws Exception {
|
||||
|
||||
boolean disableLegacy = random().nextBoolean();
|
||||
CloudSolrServer client1 = null;
|
||||
CloudSolrClient client1 = null;
|
||||
|
||||
if (disableLegacy) {
|
||||
log.info("legacyCloud=false");
|
||||
|
@ -592,11 +593,11 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
for (int i = 0; i < cnt; i++) {
|
||||
int numShards = TestUtil.nextInt(random(), 0, shardCount) + 1;
|
||||
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
try {
|
||||
if (i == 0) {
|
||||
// Test if we can create a collection through CloudSolrServer where
|
||||
|
@ -637,7 +638,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
String url = getUrlFromZk(collection);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
// poll for a second - it can take a moment before we are ready to serve
|
||||
waitForNon403or404or503(collectionClient);
|
||||
|
@ -657,7 +658,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
String url = getUrlFromZk(collection);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
// poll for a second - it can take a moment before we are ready to serve
|
||||
waitForNon403or404or503(collectionClient);
|
||||
|
@ -678,7 +679,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
ChaosMonkey.causeConnectionLoss(jetty);
|
||||
}
|
||||
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
for (int j = 0; j < cnt; j++) {
|
||||
waitForRecoveriesToFinish("awholynewcollection_" + j, zkStateReader, false);
|
||||
|
||||
|
@ -704,7 +705,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
String url = getUrlFromZk(collectionName);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
|
||||
// lets try and use the solrj client to index a couple documents
|
||||
|
@ -740,9 +741,9 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
request.setPath("/admin/collections");
|
||||
|
||||
// we can use this client because we just want base url
|
||||
final String baseUrl = getBaseUrl((HttpSolrServer) clients.get(0));
|
||||
final String baseUrl = getBaseUrl((HttpSolrClient) clients.get(0));
|
||||
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
createNewSolrClient("", baseUrl).request(request);
|
||||
|
||||
// reloads make take a short while
|
||||
boolean allTimesAreCorrect = waitForReloads(collectionName, urlToTimeBefore);
|
||||
|
@ -758,7 +759,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
createNewSolrClient("", baseUrl).request(request);
|
||||
|
||||
// ensure its out of the state
|
||||
checkForMissingCollection(collectionName);
|
||||
|
@ -774,7 +775,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
boolean exp = false;
|
||||
try {
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
createNewSolrClient("", baseUrl).request(request);
|
||||
} catch (SolrException e) {
|
||||
exp = true;
|
||||
}
|
||||
|
@ -794,7 +795,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
}
|
||||
request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
createNewSolrServer("", baseUrl).request(request);
|
||||
createNewSolrClient("", baseUrl).request(request);
|
||||
|
||||
List<Integer> list = new ArrayList<>(2);
|
||||
list.add(1);
|
||||
|
@ -803,7 +804,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
url = getUrlFromZk(collectionName);
|
||||
|
||||
collectionClient = new HttpSolrServer(url);
|
||||
collectionClient = new HttpSolrClient(url);
|
||||
|
||||
// poll for a second - it can take a moment before we are ready to serve
|
||||
waitForNon403or404or503(collectionClient);
|
||||
|
@ -815,12 +816,12 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
}
|
||||
|
||||
// test maxShardsPerNode
|
||||
int numLiveNodes = getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
|
||||
int numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
|
||||
int numShards = (numLiveNodes/2) + 1;
|
||||
int replicationFactor = 2;
|
||||
int maxShardsPerNode = 1;
|
||||
collectionInfos = new HashMap<>();
|
||||
CloudSolrServer client = createCloudClient("awholynewcollection_" + cnt);
|
||||
CloudSolrClient client = createCloudClient("awholynewcollection_" + cnt);
|
||||
try {
|
||||
exp = false;
|
||||
try {
|
||||
|
@ -836,12 +837,12 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
|
||||
// Test createNodeSet
|
||||
numLiveNodes = getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes().size();
|
||||
numLiveNodes = getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes().size();
|
||||
List<String> createNodeList = new ArrayList<>();
|
||||
int numOfCreateNodes = numLiveNodes/2;
|
||||
assertFalse("createNodeSet test is pointless with only " + numLiveNodes + " nodes running", numOfCreateNodes == 0);
|
||||
int i = 0;
|
||||
for (String liveNode : getCommonCloudSolrServer().getZkStateReader().getClusterState().getLiveNodes()) {
|
||||
for (String liveNode : getCommonCloudSolrClient().getZkStateReader().getClusterState().getLiveNodes()) {
|
||||
if (i < numOfCreateNodes) {
|
||||
createNodeList.add(liveNode);
|
||||
i++;
|
||||
|
@ -888,10 +889,10 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
String collectionName = "awholynewstresscollection_" + name + "_" + i;
|
||||
int numShards = TestUtil.nextInt(random(), 0, shardCount * 2) + 1;
|
||||
int replicationFactor = TestUtil.nextInt(random(), 0, 3) + 1;
|
||||
int maxShardsPerNode = (((numShards * 2 * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = (((numShards * 2 * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
try {
|
||||
if (i == 0) {
|
||||
client = createCloudClient(null);
|
||||
|
@ -993,7 +994,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
private void collectStartTimes(String collectionName,
|
||||
Map<String,Long> urlToTime) throws SolrServerException, IOException {
|
||||
ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader()
|
||||
ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader()
|
||||
.getClusterState();
|
||||
// Map<String,DocCollection> collections = clusterState.getCollectionStates();
|
||||
if (clusterState.hasCollection(collectionName)) {
|
||||
|
@ -1008,7 +1009,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
while (shardIt.hasNext()) {
|
||||
Entry<String,Replica> shardEntry = shardIt.next();
|
||||
ZkCoreNodeProps coreProps = new ZkCoreNodeProps(shardEntry.getValue());
|
||||
HttpSolrServer server = new HttpSolrServer(coreProps.getBaseUrl());
|
||||
HttpSolrClient server = new HttpSolrClient(coreProps.getBaseUrl());
|
||||
CoreAdminResponse mcr;
|
||||
try {
|
||||
mcr = CoreAdminRequest.getStatus(coreProps.getCoreName(), server);
|
||||
|
@ -1026,7 +1027,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
}
|
||||
|
||||
private String getUrlFromZk(String collection) {
|
||||
ClusterState clusterState = getCommonCloudSolrServer().getZkStateReader().getClusterState();
|
||||
ClusterState clusterState = getCommonCloudSolrClient().getZkStateReader().getClusterState();
|
||||
Map<String,Slice> slices = clusterState.getSlicesMap(collection);
|
||||
|
||||
if (slices == null) {
|
||||
|
@ -1122,6 +1123,58 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
|
||||
}
|
||||
|
||||
private void addReplicaTest() throws Exception {
|
||||
String collectionName = "addReplicaColl";
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
createCollection(collectionName, client, 2, 2);
|
||||
String newReplicaName = Assign.assignNode(collectionName, client.getZkStateReader().getClusterState());
|
||||
ArrayList<String> nodeList = new ArrayList<>(client.getZkStateReader().getClusterState().getLiveNodes());
|
||||
Collections.shuffle(nodeList, random());
|
||||
CollectionAdminRequest.AddReplica addReplica = new CollectionAdminRequest.AddReplica();
|
||||
addReplica.setCollectionName(collectionName);
|
||||
addReplica.setShardName("shard1");
|
||||
addReplica.setNode(nodeList.get(0));
|
||||
client.request(addReplica);
|
||||
|
||||
long timeout = System.currentTimeMillis() + 3000;
|
||||
Replica newReplica = null;
|
||||
|
||||
for (; System.currentTimeMillis() < timeout; ) {
|
||||
Slice slice = client.getZkStateReader().getClusterState().getSlice(collectionName, "shard1");
|
||||
newReplica = slice.getReplica(newReplicaName);
|
||||
}
|
||||
|
||||
assertNotNull(newReplica);
|
||||
|
||||
log.info("newReplica {},\n{} ", newReplica, client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)));
|
||||
|
||||
assertEquals("Replica should be created on the right node",
|
||||
client.getZkStateReader().getBaseUrlForNodeName(nodeList.get(0)), newReplica.getStr(ZkStateReader.BASE_URL_PROP));
|
||||
|
||||
newReplicaName = Assign.assignNode(collectionName, client.getZkStateReader().getClusterState());
|
||||
addReplica = new CollectionAdminRequest.AddReplica();
|
||||
addReplica.setCollectionName(collectionName);
|
||||
addReplica.setShardName("shard2");
|
||||
client.request(addReplica);
|
||||
|
||||
timeout = System.currentTimeMillis() + 3000;
|
||||
newReplica = null;
|
||||
|
||||
for (; System.currentTimeMillis() < timeout; ) {
|
||||
Slice slice = client.getZkStateReader().getClusterState().getSlice(collectionName, "shard2");
|
||||
newReplica = slice.getReplica(newReplicaName);
|
||||
}
|
||||
|
||||
assertNotNull(newReplica);
|
||||
|
||||
|
||||
} finally {
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
protected QueryResponse queryServer(ModifiableSolrParams params) throws SolrServerException {
|
||||
|
||||
|
@ -1131,12 +1184,12 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
if (r.nextBoolean())
|
||||
params.set("collection",DEFAULT_COLLECTION);
|
||||
|
||||
QueryResponse rsp = getCommonCloudSolrServer().query(params);
|
||||
QueryResponse rsp = getCommonCloudSolrClient().query(params);
|
||||
return rsp;
|
||||
}
|
||||
|
||||
protected void createCollection(String COLL_NAME, CloudSolrServer client,int replicationFactor , int numShards ) throws Exception {
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
|
||||
protected void createCollection(String COLL_NAME, CloudSolrClient client,int replicationFactor , int numShards ) throws Exception {
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
Map<String, Object> props = makeMap(
|
||||
|
@ -1160,7 +1213,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
}
|
||||
|
||||
private void clusterPropTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
|
||||
assertTrue("cluster property not set", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, "false"));
|
||||
assertTrue("cluster property not unset ", setClusterProp(client, ZkStateReader.LEGACY_CLOUD, null));
|
||||
|
@ -1168,7 +1221,7 @@ public class CollectionsAPIDistributedZkTest extends AbstractFullDistribZkTestBa
|
|||
client.shutdown();
|
||||
}
|
||||
|
||||
public static boolean setClusterProp(CloudSolrServer client, String name , String val) throws SolrServerException, IOException, InterruptedException {
|
||||
public static boolean setClusterProp(CloudSolrClient client, String name , String val) throws SolrServerException, IOException, InterruptedException {
|
||||
Map m = makeMap(
|
||||
"action", CollectionAction.CLUSTERPROP.toLower(),
|
||||
"name",name);
|
||||
|
|
|
@ -21,7 +21,7 @@ import org.apache.commons.codec.binary.StringUtils;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
||||
|
@ -269,7 +269,7 @@ public class CollectionsAPISolrJTests extends AbstractFullDistribZkTestBase {
|
|||
|
||||
Replica replica1 = testCollection.getReplica("core_node1");
|
||||
|
||||
HttpSolrServer solrServer = new HttpSolrServer(replica1.getStr("base_url"));
|
||||
HttpSolrClient solrServer = new HttpSolrClient(replica1.getStr("base_url"));
|
||||
try {
|
||||
CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), solrServer);
|
||||
NamedList<Object> coreStatus = status.getCoreStatus(replica1.getStr("core"));
|
||||
|
|
|
@ -43,8 +43,8 @@ import org.apache.lucene.util.TestUtil;
|
|||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
|
@ -149,11 +149,11 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
int numShards = 3;
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
try {
|
||||
if (i == 0) {
|
||||
// Test if we can create a collection through CloudSolrServer where
|
||||
|
@ -193,15 +193,15 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
List<Integer> list = entry.getValue();
|
||||
checkForCollection(collection, list, null);
|
||||
|
||||
String url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collection);
|
||||
String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collection);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
// poll for a second - it can take a moment before we are ready to serve
|
||||
waitForNon403or404or503(collectionClient);
|
||||
collectionClient.shutdown();
|
||||
}
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
for (int j = 0; j < cnt; j++) {
|
||||
waitForRecoveriesToFinish(COLL_PREFIX + j, zkStateReader, false);
|
||||
}
|
||||
|
@ -221,9 +221,9 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
String collectionName = collectionNameList.get(random().nextInt(collectionNameList.size()));
|
||||
|
||||
String url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
|
||||
String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
|
||||
// lets try and use the solrj client to index a couple documents
|
||||
|
@ -271,7 +271,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
params.set("shard", "x");
|
||||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))).request(request);
|
||||
createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0))).request(request);
|
||||
waitForCollection(zkStateReader,collectionName,4);
|
||||
//wait for all the replicas to become active
|
||||
int attempts = 0;
|
||||
|
@ -295,11 +295,11 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
int numShards = 4;
|
||||
replicationFactor = TestUtil.nextInt(random(), 0, 3) + 2;
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
String shard_fld = "shard_s";
|
||||
try {
|
||||
client = createCloudClient(null);
|
||||
|
@ -320,10 +320,10 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
checkForCollection(collectionName, list, null);
|
||||
|
||||
|
||||
url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
|
||||
url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
|
||||
|
||||
collectionClient.shutdown();
|
||||
collectionClient = new HttpSolrServer(url);
|
||||
collectionClient = new HttpSolrClient(url);
|
||||
|
||||
// poll for a second - it can take a moment before we are ready to serve
|
||||
waitForNon403or404or503(collectionClient);
|
||||
|
@ -331,7 +331,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
|
||||
collectionClient.shutdown();
|
||||
collectionClient = new HttpSolrServer(url);
|
||||
collectionClient = new HttpSolrClient(url);
|
||||
|
||||
|
||||
// lets try and use the solrj client to index a couple documents
|
||||
|
@ -358,11 +358,11 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
String collectionName = "routeFieldColl";
|
||||
int numShards = 4;
|
||||
int replicationFactor = 2;
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
String shard_fld = "shard_s";
|
||||
try {
|
||||
client = createCloudClient(null);
|
||||
|
@ -381,16 +381,16 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
checkForCollection(collectionName, list, null);
|
||||
|
||||
|
||||
String url = getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
|
||||
String url = getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
// poll for a second - it can take a moment before we are ready to serve
|
||||
waitForNon403or404or503(collectionClient);
|
||||
collectionClient.shutdown();
|
||||
|
||||
|
||||
collectionClient = new HttpSolrServer(url);
|
||||
collectionClient = new HttpSolrClient(url);
|
||||
|
||||
|
||||
// lets try and use the solrj client to index a couple documents
|
||||
|
@ -422,7 +422,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
private void testCreateShardRepFactor() throws Exception {
|
||||
String collectionName = "testCreateShardRepFactor";
|
||||
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
try {
|
||||
client = createCloudClient(null);
|
||||
Map<String, Object> props = ZkNodeProps.makeMap(
|
||||
|
@ -436,7 +436,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
} finally {
|
||||
if (client != null) client.shutdown();
|
||||
}
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
waitForRecoveriesToFinish(collectionName, zkStateReader, false);
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
@ -445,7 +445,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
params.set("shard", "x");
|
||||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0))).request(request);
|
||||
createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0))).request(request);
|
||||
|
||||
waitForRecoveriesToFinish(collectionName, zkStateReader, false);
|
||||
|
||||
|
@ -473,7 +473,7 @@ public class CustomCollectionTest extends AbstractFullDistribZkTestBase {
|
|||
if (r.nextBoolean())
|
||||
params.set("collection",DEFAULT_COLLECTION);
|
||||
|
||||
QueryResponse rsp = getCommonCloudSolrServer().query(params);
|
||||
QueryResponse rsp = getCommonCloudSolrClient().query(params);
|
||||
return rsp;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,16 +17,10 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import static org.apache.solr.cloud.CollectionsAPIDistributedZkTest.setClusterProp;
|
||||
import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
|
||||
|
||||
import java.net.URL;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
|
@ -38,6 +32,12 @@ import org.apache.solr.common.util.NamedList;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.net.URL;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.solr.cloud.CollectionsAPIDistributedZkTest.setClusterProp;
|
||||
import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
|
||||
|
||||
//@Ignore("Not currently valid see SOLR-5580")
|
||||
public class DeleteInactiveReplicaTest extends DeleteReplicaTest{
|
||||
|
||||
|
@ -57,7 +57,7 @@ public class DeleteInactiveReplicaTest extends DeleteReplicaTest{
|
|||
}
|
||||
|
||||
private void deleteInactiveReplicaTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
|
||||
String collectionName = "delDeadColl";
|
||||
|
||||
|
@ -131,12 +131,12 @@ public class DeleteInactiveReplicaTest extends DeleteReplicaTest{
|
|||
|
||||
Map m = makeMap("qt", "/admin/cores", "action", "status");
|
||||
|
||||
SolrServer server = new HttpSolrServer(replica1.getStr(ZkStateReader.BASE_URL_PROP));
|
||||
NamedList<Object> resp = server.request(new QueryRequest(new MapSolrParams(m)));
|
||||
SolrClient queryClient = new HttpSolrClient(replica1.getStr(ZkStateReader.BASE_URL_PROP));
|
||||
NamedList<Object> resp = queryClient.request(new QueryRequest(new MapSolrParams(m)));
|
||||
assertNull("The core is up and running again",
|
||||
((NamedList) resp.get("status")).get(replica1.getStr("core")));
|
||||
server.shutdown();
|
||||
server = null;
|
||||
queryClient.shutdown();
|
||||
queryClient = null;
|
||||
|
||||
|
||||
Exception exp = null;
|
||||
|
|
|
@ -19,13 +19,12 @@ package org.apache.solr.cloud;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.ImplicitDocRouter;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.params.CollectionParams;
|
||||
import org.apache.solr.common.params.MapSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.junit.After;
|
||||
|
@ -47,7 +46,7 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.DE
|
|||
|
||||
@Ignore("SOLR-6347")
|
||||
public class DeleteLastCustomShardedReplicaTest extends AbstractFullDistribZkTestBase {
|
||||
private CloudSolrServer client;
|
||||
private CloudSolrClient client;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeThisClass2() throws Exception {
|
||||
|
@ -102,7 +101,7 @@ public class DeleteLastCustomShardedReplicaTest extends AbstractFullDistribZkTes
|
|||
|
||||
waitForRecoveriesToFinish(collectionName, false);
|
||||
|
||||
DocCollection testcoll = getCommonCloudSolrServer().getZkStateReader()
|
||||
DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
|
||||
.getClusterState().getCollection(collectionName);
|
||||
Replica replica = testcoll.getSlice("a").getReplicas().iterator().next();
|
||||
|
||||
|
@ -121,7 +120,7 @@ public class DeleteLastCustomShardedReplicaTest extends AbstractFullDistribZkTes
|
|||
boolean success = false;
|
||||
DocCollection testcoll = null;
|
||||
while (System.currentTimeMillis() < endAt) {
|
||||
testcoll = getCommonCloudSolrServer().getZkStateReader()
|
||||
testcoll = getCommonCloudSolrClient().getZkStateReader()
|
||||
.getClusterState().getCollection(COLL_NAME);
|
||||
// In case of a custom sharded collection, the last replica deletion would also lead to
|
||||
// the deletion of the slice.
|
||||
|
|
|
@ -17,22 +17,10 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
|
||||
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
|
||||
import static org.apache.solr.cloud.OverseerCollectionProcessor.ONLY_IF_DOWN;
|
||||
import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.CoreAdminResponse;
|
||||
|
@ -48,8 +36,20 @@ import org.junit.After;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
|
||||
import static org.apache.solr.cloud.OverseerCollectionProcessor.ONLY_IF_DOWN;
|
||||
import static org.apache.solr.common.cloud.ZkNodeProps.makeMap;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
|
||||
import static org.apache.solr.common.params.CollectionParams.CollectionAction.DELETEREPLICA;
|
||||
|
||||
public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
|
||||
private CloudSolrServer client;
|
||||
private CloudSolrClient client;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeThisClass2() throws Exception {
|
||||
|
@ -91,13 +91,13 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
private void deleteLiveReplicaTest() throws Exception {
|
||||
String collectionName = "delLiveColl";
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
createCollection(collectionName, client);
|
||||
|
||||
waitForRecoveriesToFinish(collectionName, false);
|
||||
|
||||
DocCollection testcoll = getCommonCloudSolrServer().getZkStateReader()
|
||||
DocCollection testcoll = getCommonCloudSolrClient().getZkStateReader()
|
||||
.getClusterState().getCollection(collectionName);
|
||||
|
||||
Slice shard1 = null;
|
||||
|
@ -120,14 +120,14 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
if (replica1 == null) fail("no active replicas found");
|
||||
|
||||
HttpSolrServer replica1Server = new HttpSolrServer(replica1.getStr("base_url"));
|
||||
HttpSolrClient replica1Client = new HttpSolrClient(replica1.getStr("base_url"));
|
||||
String dataDir = null;
|
||||
try {
|
||||
CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), replica1Server);
|
||||
CoreAdminResponse status = CoreAdminRequest.getStatus(replica1.getStr("core"), replica1Client);
|
||||
NamedList<Object> coreStatus = status.getCoreStatus(replica1.getStr("core"));
|
||||
dataDir = (String) coreStatus.get("dataDir");
|
||||
} finally {
|
||||
replica1Server.shutdown();
|
||||
replica1Client.shutdown();
|
||||
}
|
||||
try {
|
||||
// Should not be able to delete a replica that is up if onlyIfDown=true.
|
||||
|
@ -149,7 +149,7 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
}
|
||||
|
||||
protected void tryToRemoveOnlyIfDown(String collectionName, CloudSolrServer client, Replica replica, String shard) throws IOException, SolrServerException {
|
||||
protected void tryToRemoveOnlyIfDown(String collectionName, CloudSolrClient client, Replica replica, String shard) throws IOException, SolrServerException {
|
||||
Map m = makeMap("collection", collectionName,
|
||||
"action", DELETEREPLICA.toLower(),
|
||||
"shard", shard,
|
||||
|
@ -162,7 +162,7 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
|
||||
protected void removeAndWaitForReplicaGone(String COLL_NAME,
|
||||
CloudSolrServer client, Replica replica, String shard)
|
||||
CloudSolrClient client, Replica replica, String shard)
|
||||
throws SolrServerException, IOException, InterruptedException {
|
||||
Map m = makeMap("collection", COLL_NAME, "action", DELETEREPLICA.toLower(), "shard",
|
||||
shard, "replica", replica.getName());
|
||||
|
@ -174,7 +174,7 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
|
|||
boolean success = false;
|
||||
DocCollection testcoll = null;
|
||||
while (System.currentTimeMillis() < endAt) {
|
||||
testcoll = getCommonCloudSolrServer().getZkStateReader()
|
||||
testcoll = getCommonCloudSolrClient().getZkStateReader()
|
||||
.getClusterState().getCollection(COLL_NAME);
|
||||
success = testcoll.getSlice(shard).getReplica(replica.getName()) == null;
|
||||
if (success) {
|
||||
|
@ -188,10 +188,10 @@ public class DeleteReplicaTest extends AbstractFullDistribZkTestBase {
|
|||
assertTrue("Replica not cleaned up", success);
|
||||
}
|
||||
|
||||
protected void createCollection(String COLL_NAME, CloudSolrServer client) throws Exception {
|
||||
protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
|
||||
int replicationFactor = 2;
|
||||
int numShards = 2;
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
Map<String, Object> props = makeMap(
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.cloud;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -94,7 +94,7 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
|
|||
try {
|
||||
deleteShard(SHARD1);
|
||||
fail("Deleting an active shard should not have succeeded");
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
|
@ -143,15 +143,15 @@ public class DeleteShardTest extends AbstractFullDistribZkTestBase {
|
|||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
.getBaseURL();
|
||||
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
|
||||
|
||||
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
|
||||
baseServer.setConnectionTimeout(15000);
|
||||
baseServer.setSoTimeout(60000);
|
||||
baseServer.request(request);
|
||||
baseServer.shutdown();
|
||||
HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
|
||||
baseClient.setConnectionTimeout(15000);
|
||||
baseClient.setSoTimeout(60000);
|
||||
baseClient.request(request);
|
||||
baseClient.shutdown();
|
||||
}
|
||||
|
||||
protected void setSliceState(String slice, String state) throws SolrServerException, IOException,
|
||||
|
|
|
@ -18,10 +18,8 @@ package org.apache.solr.cloud;
|
|||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.lucene.util.SentinelIntSet;
|
||||
import org.apache.solr.CursorPagingTest;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.request.LukeRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
|
@ -636,7 +634,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase {
|
|||
/**
|
||||
* Given a QueryResponse returned by SolrServer.query, asserts that the
|
||||
* numFound on the doc list matches the expectation
|
||||
* @see SolrServer#query
|
||||
* @see org.apache.solr.client.solrj.SolrClient#query
|
||||
*/
|
||||
private void assertNumFound(int expected, QueryResponse rsp) {
|
||||
assertEquals(expected, extractDocList(rsp).getNumFound());
|
||||
|
@ -645,7 +643,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase {
|
|||
/**
|
||||
* Given a QueryResponse returned by SolrServer.query, asserts that the
|
||||
* start on the doc list matches the expectation
|
||||
* @see SolrServer#query
|
||||
* @see org.apache.solr.client.solrj.SolrClient#query
|
||||
*/
|
||||
private void assertStartsAt(int expected, QueryResponse rsp) {
|
||||
assertEquals(expected, extractDocList(rsp).getStart());
|
||||
|
@ -654,7 +652,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase {
|
|||
/**
|
||||
* Given a QueryResponse returned by SolrServer.query, asserts that the
|
||||
* "id" of the list of documents returned matches the expected list
|
||||
* @see SolrServer#query
|
||||
* @see org.apache.solr.client.solrj.SolrClient#query
|
||||
*/
|
||||
private void assertDocList(QueryResponse rsp, Object... ids) {
|
||||
SolrDocumentList docs = extractDocList(rsp);
|
||||
|
@ -669,7 +667,7 @@ public class DistribCursorPagingTest extends AbstractFullDistribZkTestBase {
|
|||
/**
|
||||
* Given a QueryResponse returned by SolrServer.query, asserts that the
|
||||
* response does include {@link #CURSOR_MARK_NEXT} key and returns it
|
||||
* @see SolrServer#query
|
||||
* @see org.apache.solr.client.solrj.SolrClient#query
|
||||
*/
|
||||
private String assertHashNextCursorMark(QueryResponse rsp) {
|
||||
String r = rsp.getNextCursorMark();
|
||||
|
|
|
@ -17,16 +17,8 @@
|
|||
package org.apache.solr.cloud;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.SolrDocumentList;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrException.ErrorCode;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
|
@ -39,8 +31,6 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
|
|
|
@ -17,7 +17,7 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
|
@ -30,7 +30,7 @@ import org.junit.Before;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
public class ExternalCollectionsTest extends AbstractFullDistribZkTestBase {
|
||||
private CloudSolrServer client;
|
||||
private CloudSolrClient client;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeThisClass2() throws Exception {
|
||||
|
|
|
@ -17,18 +17,13 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.BadApple;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
|
@ -38,13 +33,17 @@ import org.apache.solr.common.SolrInputDocument;
|
|||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.CollectionParams.CollectionAction;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.update.VersionInfo;
|
||||
import org.apache.solr.update.processor.DistributedUpdateProcessor;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Super basic testing, no shard restarting or anything.
|
||||
*/
|
||||
|
@ -130,9 +129,9 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
|
|||
|
||||
docId = testIndexQueryDeleteHierarchical(docId);
|
||||
|
||||
docId = testIndexingDocPerRequestWithHttpSolrServer(docId);
|
||||
docId = testIndexingDocPerRequestWithHttpSolrClient(docId);
|
||||
|
||||
testIndexingWithSuss(docId);
|
||||
testConcurrentIndexing(docId);
|
||||
|
||||
// TODO: testOptimisticUpdate(results);
|
||||
|
||||
|
@ -141,7 +140,7 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
|
|||
docId = testThatCantForwardToLeaderFails(docId);
|
||||
|
||||
|
||||
docId = testIndexingBatchPerRequestWithHttpSolrServer(docId);
|
||||
docId = testIndexingBatchPerRequestWithHttpSolrClient(docId);
|
||||
}
|
||||
|
||||
private long testThatCantForwardToLeaderFails(long docId) throws Exception {
|
||||
|
@ -316,7 +315,7 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
|
|||
}
|
||||
|
||||
|
||||
private long testIndexingDocPerRequestWithHttpSolrServer(long docId) throws Exception {
|
||||
private long testIndexingDocPerRequestWithHttpSolrClient(long docId) throws Exception {
|
||||
int docs = random().nextInt(TEST_NIGHTLY ? 4013 : 97) + 1;
|
||||
for (int i = 0; i < docs; i++) {
|
||||
UpdateRequest uReq;
|
||||
|
@ -335,7 +334,7 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
|
|||
return docId++;
|
||||
}
|
||||
|
||||
private long testIndexingBatchPerRequestWithHttpSolrServer(long docId) throws Exception {
|
||||
private long testIndexingBatchPerRequestWithHttpSolrClient(long docId) throws Exception {
|
||||
|
||||
// remove collection
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
@ -432,25 +431,25 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
|
|||
return -1;
|
||||
}
|
||||
|
||||
private long testIndexingWithSuss(long docId) throws Exception {
|
||||
ConcurrentUpdateSolrServer suss = new ConcurrentUpdateSolrServer(
|
||||
((HttpSolrServer) clients.get(0)).getBaseURL(), 10, 2);
|
||||
private long testConcurrentIndexing(long docId) throws Exception {
|
||||
ConcurrentUpdateSolrClient concurrentClient = new ConcurrentUpdateSolrClient(
|
||||
((HttpSolrClient) clients.get(0)).getBaseURL(), 10, 2);
|
||||
QueryResponse results = query(cloudClient);
|
||||
long beforeCount = results.getResults().getNumFound();
|
||||
int cnt = TEST_NIGHTLY ? 2933 : 313;
|
||||
try {
|
||||
suss.setConnectionTimeout(120000);
|
||||
concurrentClient.setConnectionTimeout(120000);
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
index_specific(suss, id, docId++, "text_t", "some text so that it not's negligent work to parse this doc, even though it's still a pretty short doc");
|
||||
index_specific(concurrentClient, id, docId++, "text_t", "some text so that it not's negligent work to parse this doc, even though it's still a pretty short doc");
|
||||
}
|
||||
suss.blockUntilFinished();
|
||||
concurrentClient.blockUntilFinished();
|
||||
|
||||
commit();
|
||||
|
||||
checkShardConsistency();
|
||||
assertDocCounts(VERBOSE);
|
||||
} finally {
|
||||
suss.shutdown();
|
||||
concurrentClient.shutdown();
|
||||
}
|
||||
results = query(cloudClient);
|
||||
assertEquals(beforeCount + cnt, results.getResults().getNumFound());
|
||||
|
@ -497,9 +496,9 @@ public class FullSolrCloudDistribCmdsTest extends AbstractFullDistribZkTestBase
|
|||
assertEquals(1, res.getResults().getNumFound());
|
||||
}
|
||||
|
||||
private QueryResponse query(SolrServer server) throws SolrServerException {
|
||||
private QueryResponse query(SolrClient client) throws SolrServerException {
|
||||
SolrQuery query = new SolrQuery("*:*");
|
||||
return server.query(query);
|
||||
return client.query(query);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,22 +17,11 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.JSONTestUtil;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
|
@ -52,6 +41,17 @@ import org.junit.Before;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.File;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Simulates HTTP partitions between a leader and replica but the replica does
|
||||
* not lose its ZooKeeper connection.
|
||||
|
@ -341,7 +341,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
testCollectionName+"; clusterState: "+printClusterStateInfo(testCollectionName), leader);
|
||||
JettySolrRunner leaderJetty = getJettyOnPort(getReplicaPort(leader));
|
||||
|
||||
HttpSolrServer leaderSolr = getHttpSolrServer(leader, testCollectionName);
|
||||
HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField(id, String.valueOf(2));
|
||||
doc.addField("a_t", "hello" + 2);
|
||||
|
@ -377,7 +377,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
leaderSolr.shutdown();
|
||||
|
||||
// if the add worked, then the doc must exist on the new leader
|
||||
HttpSolrServer newLeaderSolr = getHttpSolrServer(currentLeader, testCollectionName);
|
||||
HttpSolrClient newLeaderSolr = getHttpSolrClient(currentLeader, testCollectionName);
|
||||
try {
|
||||
assertDocExists(newLeaderSolr, testCollectionName, "2");
|
||||
} finally {
|
||||
|
@ -386,7 +386,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
} catch (SolrException exc) {
|
||||
// this is ok provided the doc doesn't exist on the current leader
|
||||
leaderSolr = getHttpSolrServer(currentLeader, testCollectionName);
|
||||
leaderSolr = getHttpSolrClient(currentLeader, testCollectionName);
|
||||
try {
|
||||
leaderSolr.add(doc); // this should work
|
||||
} finally {
|
||||
|
@ -439,18 +439,18 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
throws Exception {
|
||||
Replica leader =
|
||||
cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard1", 10000);
|
||||
HttpSolrServer leaderSolr = getHttpSolrServer(leader, testCollectionName);
|
||||
List<HttpSolrServer> replicas =
|
||||
new ArrayList<HttpSolrServer>(notLeaders.size());
|
||||
HttpSolrClient leaderSolr = getHttpSolrClient(leader, testCollectionName);
|
||||
List<HttpSolrClient> replicas =
|
||||
new ArrayList<HttpSolrClient>(notLeaders.size());
|
||||
|
||||
for (Replica r : notLeaders) {
|
||||
replicas.add(getHttpSolrServer(r, testCollectionName));
|
||||
replicas.add(getHttpSolrClient(r, testCollectionName));
|
||||
}
|
||||
try {
|
||||
for (int d = firstDocId; d <= lastDocId; d++) {
|
||||
String docId = String.valueOf(d);
|
||||
assertDocExists(leaderSolr, testCollectionName, docId);
|
||||
for (HttpSolrServer replicaSolr : replicas) {
|
||||
for (HttpSolrClient replicaSolr : replicas) {
|
||||
assertDocExists(replicaSolr, testCollectionName, docId);
|
||||
}
|
||||
}
|
||||
|
@ -458,16 +458,16 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
if (leaderSolr != null) {
|
||||
leaderSolr.shutdown();
|
||||
}
|
||||
for (HttpSolrServer replicaSolr : replicas) {
|
||||
for (HttpSolrClient replicaSolr : replicas) {
|
||||
replicaSolr.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected HttpSolrServer getHttpSolrServer(Replica replica, String coll) throws Exception {
|
||||
protected HttpSolrClient getHttpSolrClient(Replica replica, String coll) throws Exception {
|
||||
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
|
||||
String url = zkProps.getBaseUrl() + "/" + coll;
|
||||
return new HttpSolrServer(url);
|
||||
return new HttpSolrClient(url);
|
||||
}
|
||||
|
||||
protected void sendDoc(int docId) throws Exception {
|
||||
|
@ -486,7 +486,7 @@ public class HttpPartitionTest extends AbstractFullDistribZkTestBase {
|
|||
* exists in the provided server, using distrib=false so it doesn't route to another replica.
|
||||
*/
|
||||
@SuppressWarnings("rawtypes")
|
||||
protected void assertDocExists(HttpSolrServer solr, String coll, String docId) throws Exception {
|
||||
protected void assertDocExists(HttpSolrClient solr, String coll, String docId) throws Exception {
|
||||
QueryRequest qr = new QueryRequest(params("qt", "/get", "id", docId, "distrib", "false"));
|
||||
NamedList rsp = solr.request(qr);
|
||||
String match = JSONTestUtil.matchObj("/id", rsp.get("doc"), new Integer(docId));
|
||||
|
|
|
@ -110,8 +110,8 @@ public class LeaderFailoverAfterPartitionTest extends HttpPartitionTest {
|
|||
// doc should be on leader and 1 replica
|
||||
sendDoc(5);
|
||||
|
||||
assertDocExists(getHttpSolrServer(leader, testCollectionName), testCollectionName, "5");
|
||||
assertDocExists(getHttpSolrServer(notLeaders.get(1), testCollectionName), testCollectionName, "5");
|
||||
assertDocExists(getHttpSolrClient(leader, testCollectionName), testCollectionName, "5");
|
||||
assertDocExists(getHttpSolrClient(notLeaders.get(1), testCollectionName), testCollectionName, "5");
|
||||
|
||||
Thread.sleep(sleepMsBeforeHealPartition);
|
||||
|
||||
|
|
|
@ -17,17 +17,17 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.ZkCoreNodeProps;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.List;
|
||||
|
||||
public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest {
|
||||
|
||||
private static final long sleepMsBeforeHealPartition = 2000L;
|
||||
|
@ -91,8 +91,8 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
|
|||
|
||||
// let's find the leader of shard2 and ask him to commit
|
||||
Replica shard2Leader = cloudClient.getZkStateReader().getLeaderRetry(testCollectionName, "shard2");
|
||||
HttpSolrServer server = new HttpSolrServer(ZkCoreNodeProps.getCoreUrl(shard2Leader.getStr("base_url"), shard2Leader.getStr("core")));
|
||||
server.commit();
|
||||
HttpSolrClient client = new HttpSolrClient(ZkCoreNodeProps.getCoreUrl(shard2Leader.getStr("base_url"), shard2Leader.getStr("core")));
|
||||
client.commit();
|
||||
|
||||
Thread.sleep(sleepMsBeforeHealPartition);
|
||||
|
||||
|
@ -133,8 +133,8 @@ public class LeaderInitiatedRecoveryOnCommitTest extends BasicDistributedZkTest
|
|||
leaderProxy.close();
|
||||
|
||||
Replica replica = notLeaders.get(0);
|
||||
HttpSolrServer server = new HttpSolrServer(ZkCoreNodeProps.getCoreUrl(replica.getStr("base_url"), replica.getStr("core")));
|
||||
server.commit();
|
||||
HttpSolrClient client = new HttpSolrClient(ZkCoreNodeProps.getCoreUrl(replica.getStr("base_url"), replica.getStr("core")));
|
||||
client.commit();
|
||||
|
||||
Thread.sleep(sleepMsBeforeHealPartition);
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@ package org.apache.solr.cloud;
|
|||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
|
@ -30,7 +30,6 @@ import org.apache.solr.common.cloud.ClusterState;
|
|||
import org.apache.solr.common.cloud.RoutingRule;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.ZkNodeProps;
|
||||
import org.apache.solr.common.params.CollectionParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.update.DirectUpdateHandler2;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
@ -43,8 +42,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
|
||||
|
||||
public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
||||
|
||||
|
@ -103,8 +102,8 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
|||
ClusterState state;Slice slice;
|
||||
boolean ruleRemoved = false;
|
||||
while (System.currentTimeMillis() - finishTime < 60000) {
|
||||
getCommonCloudSolrServer().getZkStateReader().updateClusterState(true);
|
||||
state = getCommonCloudSolrServer().getZkStateReader().getClusterState();
|
||||
getCommonCloudSolrClient().getZkStateReader().updateClusterState(true);
|
||||
state = getCommonCloudSolrClient().getZkStateReader().getClusterState();
|
||||
slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
|
||||
Map<String,RoutingRule> routingRules = slice.getRoutingRules();
|
||||
if (routingRules == null || routingRules.isEmpty() || !routingRules.containsKey(splitKey)) {
|
||||
|
@ -133,20 +132,20 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
|||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
.getBaseURL();
|
||||
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
|
||||
|
||||
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
|
||||
baseServer.setConnectionTimeout(15000);
|
||||
baseServer.setSoTimeout(60000 * 5);
|
||||
baseServer.request(request);
|
||||
baseServer.shutdown();
|
||||
HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
|
||||
baseClient.setConnectionTimeout(15000);
|
||||
baseClient.setSoTimeout(60000 * 5);
|
||||
baseClient.request(request);
|
||||
baseClient.shutdown();
|
||||
}
|
||||
|
||||
private void createCollection(String targetCollection) throws Exception {
|
||||
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
try {
|
||||
client = createCloudClient(null);
|
||||
Map<String, Object> props = ZkNodeProps.makeMap(
|
||||
|
@ -193,8 +192,8 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
|||
Indexer indexer = new Indexer(cloudClient, splitKey, 1, 30);
|
||||
indexer.start();
|
||||
|
||||
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), targetCollection);
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), targetCollection);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
SolrQuery solrQuery = new SolrQuery("*:*");
|
||||
assertEquals("DocCount on target collection does not match", 0, collectionClient.query(solrQuery).getResults().getNumFound());
|
||||
|
@ -221,8 +220,8 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
|||
collectionClient.shutdown();
|
||||
collectionClient = null;
|
||||
|
||||
getCommonCloudSolrServer().getZkStateReader().updateClusterState(true);
|
||||
ClusterState state = getCommonCloudSolrServer().getZkStateReader().getClusterState();
|
||||
getCommonCloudSolrClient().getZkStateReader().updateClusterState(true);
|
||||
ClusterState state = getCommonCloudSolrClient().getZkStateReader().getClusterState();
|
||||
Slice slice = state.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD2);
|
||||
assertNotNull("Routing rule map is null", slice.getRoutingRules());
|
||||
assertFalse("Routing rule map is empty", slice.getRoutingRules().isEmpty());
|
||||
|
@ -234,12 +233,12 @@ public class MigrateRouteKeyTest extends BasicDistributedZkTest {
|
|||
|
||||
static class Indexer extends Thread {
|
||||
final int seconds;
|
||||
final CloudSolrServer cloudClient;
|
||||
final CloudSolrClient cloudClient;
|
||||
final String splitKey;
|
||||
int splitKeyCount = 0;
|
||||
final int bitSep;
|
||||
|
||||
public Indexer(CloudSolrServer cloudClient, String splitKey, int bitSep, int seconds) {
|
||||
public Indexer(CloudSolrClient cloudClient, String splitKey, int bitSep, int seconds) {
|
||||
this.seconds = seconds;
|
||||
this.cloudClient = cloudClient;
|
||||
this.splitKey = splitKey;
|
||||
|
|
|
@ -18,9 +18,9 @@ package org.apache.solr.cloud;
|
|||
*/
|
||||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.Create;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.RequestStatus;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest.SplitShard;
|
||||
|
@ -76,7 +76,7 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
|
||||
private void testParallelCollectionAPICalls() throws IOException, SolrServerException {
|
||||
SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
|
||||
for(int i = 1 ; i <= NUM_COLLECTIONS ; i++) {
|
||||
Create createCollectionRequest = new Create();
|
||||
|
@ -84,7 +84,7 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
createCollectionRequest.setNumShards(4);
|
||||
createCollectionRequest.setConfigName("conf1");
|
||||
createCollectionRequest.setAsyncId(String.valueOf(i));
|
||||
createCollectionRequest.process(server);
|
||||
createCollectionRequest.process(client);
|
||||
}
|
||||
|
||||
boolean pass = false;
|
||||
|
@ -92,7 +92,7 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
while(true) {
|
||||
int numRunningTasks = 0;
|
||||
for (int i = 1; i <= NUM_COLLECTIONS; i++)
|
||||
if (getRequestState(i + "", server).equals("running"))
|
||||
if (getRequestState(i + "", client).equals("running"))
|
||||
numRunningTasks++;
|
||||
if(numRunningTasks > 1) {
|
||||
pass = true;
|
||||
|
@ -107,38 +107,38 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
assertTrue("More than one tasks were supposed to be running in parallel but they weren't.", pass);
|
||||
for(int i=1;i<=NUM_COLLECTIONS;i++) {
|
||||
String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
|
||||
String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
|
||||
assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
|
||||
}
|
||||
}
|
||||
|
||||
private void testTaskExclusivity() throws IOException, SolrServerException {
|
||||
SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
Create createCollectionRequest = new Create();
|
||||
createCollectionRequest.setCollectionName("ocptest_shardsplit");
|
||||
createCollectionRequest.setNumShards(4);
|
||||
createCollectionRequest.setConfigName("conf1");
|
||||
createCollectionRequest.setAsyncId("1000");
|
||||
createCollectionRequest.process(server);
|
||||
createCollectionRequest.process(client);
|
||||
|
||||
SplitShard splitShardRequest = new SplitShard();
|
||||
splitShardRequest.setCollectionName("ocptest_shardsplit");
|
||||
splitShardRequest.setShardName(SHARD1);
|
||||
splitShardRequest.setAsyncId("1001");
|
||||
splitShardRequest.process(server);
|
||||
splitShardRequest.process(client);
|
||||
|
||||
splitShardRequest = new SplitShard();
|
||||
splitShardRequest.setCollectionName("ocptest_shardsplit");
|
||||
splitShardRequest.setShardName(SHARD2);
|
||||
splitShardRequest.setAsyncId("1002");
|
||||
splitShardRequest.process(server);
|
||||
splitShardRequest.process(client);
|
||||
|
||||
int iterations = 0;
|
||||
while(true) {
|
||||
int runningTasks = 0;
|
||||
int completedTasks = 0;
|
||||
for (int i=1001;i<=1002;i++) {
|
||||
String state = getRequestState(i, server);
|
||||
String state = getRequestState(i, client);
|
||||
if (state.equals("running"))
|
||||
runningTasks++;
|
||||
if (state.equals("completed"))
|
||||
|
@ -161,45 +161,45 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
}
|
||||
for (int i=1001;i<=1002;i++) {
|
||||
String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
|
||||
String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
|
||||
assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
|
||||
}
|
||||
}
|
||||
|
||||
private void testDeduplicationOfSubmittedTasks() throws IOException, SolrServerException {
|
||||
SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
Create createCollectionRequest = new Create();
|
||||
createCollectionRequest.setCollectionName("ocptest_shardsplit2");
|
||||
createCollectionRequest.setNumShards(4);
|
||||
createCollectionRequest.setConfigName("conf1");
|
||||
createCollectionRequest.setAsyncId("3000");
|
||||
createCollectionRequest.process(server);
|
||||
createCollectionRequest.process(client);
|
||||
|
||||
SplitShard splitShardRequest = new SplitShard();
|
||||
splitShardRequest.setCollectionName("ocptest_shardsplit2");
|
||||
splitShardRequest.setShardName(SHARD1);
|
||||
splitShardRequest.setAsyncId("3001");
|
||||
splitShardRequest.process(server);
|
||||
splitShardRequest.process(client);
|
||||
|
||||
splitShardRequest = new SplitShard();
|
||||
splitShardRequest.setCollectionName("ocptest_shardsplit2");
|
||||
splitShardRequest.setShardName(SHARD2);
|
||||
splitShardRequest.setAsyncId("3002");
|
||||
splitShardRequest.process(server);
|
||||
splitShardRequest.process(client);
|
||||
|
||||
// Now submit another task with the same id. At this time, hopefully the previous 3002 should still be in the queue.
|
||||
splitShardRequest = new SplitShard();
|
||||
splitShardRequest.setCollectionName("ocptest_shardsplit2");
|
||||
splitShardRequest.setShardName(SHARD1);
|
||||
splitShardRequest.setAsyncId("3002");
|
||||
CollectionAdminResponse response = splitShardRequest.process(server);
|
||||
CollectionAdminResponse response = splitShardRequest.process(client);
|
||||
|
||||
NamedList r = response.getResponse();
|
||||
assertEquals("Duplicate request was supposed to exist but wasn't found. De-duplication of submitted task failed.",
|
||||
"Task with the same requestid already exists.", r.get("error"));
|
||||
|
||||
for (int i=3001;i<=3002;i++) {
|
||||
String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, server);
|
||||
String state = getRequestStateAfterCompletion(i + "", REQUEST_STATUS_TIMEOUT, client);
|
||||
assertTrue("Task " + i + " did not complete, final state: " + state,state.equals("completed"));
|
||||
}
|
||||
}
|
||||
|
@ -224,16 +224,16 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
try {
|
||||
|
||||
SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
SplitShard splitShardRequest = new SplitShard();
|
||||
splitShardRequest.setCollectionName("collection1");
|
||||
splitShardRequest.setShardName(SHARD1);
|
||||
splitShardRequest.setAsyncId("2000");
|
||||
splitShardRequest.process(server);
|
||||
splitShardRequest.process(client);
|
||||
|
||||
String state = getRequestState("2000", server);
|
||||
String state = getRequestState("2000", client);
|
||||
while (state.equals("submitted")) {
|
||||
state = getRequestState("2000", server);
|
||||
state = getRequestState("2000", client);
|
||||
Thread.sleep(10);
|
||||
}
|
||||
assertTrue("SplitShard task [2000] was supposed to be in [running] but isn't. It is [" + state + "]", state.equals("running"));
|
||||
|
@ -246,9 +246,9 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
server.request(request);
|
||||
client.request(request);
|
||||
|
||||
state = getRequestState("2000", server);
|
||||
state = getRequestState("2000", client);
|
||||
|
||||
assertTrue("After invoking OVERSEERSTATUS, SplitShard task [2000] was still supposed to be in [running] but isn't." +
|
||||
"It is [" + state + "]", state.equals("running"));
|
||||
|
@ -267,13 +267,13 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
// todo - target diff servers and use cloud clients as well as non-cloud clients
|
||||
}
|
||||
|
||||
private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrServer server)
|
||||
private String getRequestStateAfterCompletion(String requestId, int waitForSeconds, SolrClient client)
|
||||
throws IOException, SolrServerException {
|
||||
String state = null;
|
||||
long maxWait = System.nanoTime() + TimeUnit.NANOSECONDS.convert(waitForSeconds, TimeUnit.SECONDS);
|
||||
|
||||
while (System.nanoTime() < maxWait) {
|
||||
state = getRequestState(requestId, server);
|
||||
state = getRequestState(requestId, client);
|
||||
if(state.equals("completed") || state.equals("failed"))
|
||||
return state;
|
||||
try {
|
||||
|
@ -285,14 +285,14 @@ public class MultiThreadedOCPTest extends AbstractFullDistribZkTestBase {
|
|||
return state;
|
||||
}
|
||||
|
||||
private String getRequestState(int requestId, SolrServer server) throws IOException, SolrServerException {
|
||||
return getRequestState(String.valueOf(requestId), server);
|
||||
private String getRequestState(int requestId, SolrClient client) throws IOException, SolrServerException {
|
||||
return getRequestState(String.valueOf(requestId), client);
|
||||
}
|
||||
|
||||
private String getRequestState(String requestId, SolrServer server) throws IOException, SolrServerException {
|
||||
private String getRequestState(String requestId, SolrClient client) throws IOException, SolrServerException {
|
||||
RequestStatus requestStatusRequest = new RequestStatus();
|
||||
requestStatusRequest.setRequestId(requestId);
|
||||
CollectionAdminResponse response = requestStatusRequest.process(server);
|
||||
CollectionAdminResponse response = requestStatusRequest.process(client);
|
||||
|
||||
NamedList innerResponse = (NamedList) response.getResponse().get("status");
|
||||
return (String) innerResponse.get("state");
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.lucene.util.LuceneTestCase;
|
|||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.cloud.overseer.OverseerAction;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
|
@ -53,7 +53,7 @@ import org.junit.BeforeClass;
|
|||
@LuceneTestCase.Slow
|
||||
@SuppressSSL // See SOLR-5776
|
||||
public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
|
||||
private CloudSolrServer client;
|
||||
private CloudSolrClient client;
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeThisClass2() throws Exception {
|
||||
|
@ -228,10 +228,10 @@ public class OverseerRolesTest extends AbstractFullDistribZkTestBase{
|
|||
}
|
||||
|
||||
|
||||
protected void createCollection(String COLL_NAME, CloudSolrServer client) throws Exception {
|
||||
protected void createCollection(String COLL_NAME, CloudSolrClient client) throws Exception {
|
||||
int replicationFactor = 2;
|
||||
int numShards = 4;
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
Map<String, Object> props = makeMap(
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.cloud;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
||||
import org.apache.solr.common.cloud.DocRouter;
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.solr.cloud;
|
|||
*/
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
||||
|
@ -56,17 +56,17 @@ public class RemoteQueryErrorTest extends AbstractFullDistribZkTestBase {
|
|||
checkForCollection("collection2", numShardsNumReplicaList, null);
|
||||
waitForRecoveriesToFinish("collection2", true);
|
||||
|
||||
for (SolrServer solrServer : clients) {
|
||||
for (SolrClient solrClient : clients) {
|
||||
try {
|
||||
SolrInputDocument emptyDoc = new SolrInputDocument();
|
||||
solrServer.add(emptyDoc);
|
||||
solrClient.add(emptyDoc);
|
||||
fail("Expected unique key exceptoin");
|
||||
} catch (SolrException ex) {
|
||||
assertThat(ex.getMessage(), containsString("Document is missing mandatory uniqueKey field: id"));
|
||||
} catch(Exception ex) {
|
||||
fail("Expected a SolrException to occur, instead received: " + ex.getClass());
|
||||
} finally {
|
||||
solrServer.shutdown();
|
||||
solrClient.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,15 +17,9 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
|
@ -35,12 +29,18 @@ import org.apache.solr.common.params.ModifiableSolrParams;
|
|||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
// Collect useful operations for testing assigning properties to individual replicas
|
||||
// Could probably expand this to do something creative with getting random slices
|
||||
// and shards, but for now this will do.
|
||||
public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBase {
|
||||
|
||||
public static NamedList<Object> doPropertyAction(CloudSolrServer client, String... paramsIn) throws IOException, SolrServerException {
|
||||
public static NamedList<Object> doPropertyAction(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
|
||||
assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
for (int idx = 0; idx < paramsIn.length; idx += 2) {
|
||||
|
@ -51,7 +51,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
|
|||
return client.request(request);
|
||||
}
|
||||
|
||||
public static void verifyPropertyNotPresent(CloudSolrServer client, String collectionName, String replicaName,
|
||||
public static void verifyPropertyNotPresent(CloudSolrClient client, String collectionName, String replicaName,
|
||||
String property)
|
||||
throws KeeperException, InterruptedException {
|
||||
ClusterState clusterState = null;
|
||||
|
@ -76,7 +76,7 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
|
|||
// collection
|
||||
// shard
|
||||
// replica
|
||||
public static void verifyPropertyVal(CloudSolrServer client, String collectionName,
|
||||
public static void verifyPropertyVal(CloudSolrClient client, String collectionName,
|
||||
String replicaName, String property, String val)
|
||||
throws InterruptedException, KeeperException {
|
||||
Replica replica = null;
|
||||
|
@ -102,16 +102,17 @@ public abstract class ReplicaPropertiesBase extends AbstractFullDistribZkTestBas
|
|||
// Verify that
|
||||
// 1> the property is only set once in all the replicas in a slice.
|
||||
// 2> the property is balanced evenly across all the nodes hosting collection
|
||||
public static void verifyUniqueAcrossCollection(CloudSolrServer client, String collectionName,
|
||||
public static void verifyUniqueAcrossCollection(CloudSolrClient client, String collectionName,
|
||||
String property) throws KeeperException, InterruptedException {
|
||||
verifyUnique(client, collectionName, property, true);
|
||||
}
|
||||
|
||||
public static void verifyUniquePropertyWithinCollection(CloudSolrServer client, String collectionName,
|
||||
public static void verifyUniquePropertyWithinCollection(CloudSolrClient client, String collectionName,
|
||||
String property) throws KeeperException, InterruptedException {
|
||||
verifyUnique(client, collectionName, property, false);
|
||||
}
|
||||
public static void verifyUnique(CloudSolrServer client, String collectionName, String property, boolean balanced)
|
||||
|
||||
public static void verifyUnique(CloudSolrClient client, String collectionName, String property, boolean balanced)
|
||||
throws KeeperException, InterruptedException {
|
||||
|
||||
DocCollection col = null;
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
|||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
|
@ -205,11 +205,11 @@ public class ReplicationFactorTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
@SuppressWarnings("rawtypes")
|
||||
protected void sendNonDirectUpdateRequestReplica(Replica replica, UpdateRequest up, int expectedRf, String collection) throws Exception {
|
||||
HttpSolrServer solrServer = null;
|
||||
HttpSolrClient solrServer = null;
|
||||
try {
|
||||
ZkCoreNodeProps zkProps = new ZkCoreNodeProps(replica);
|
||||
String url = zkProps.getBaseUrl() + "/" + collection;
|
||||
solrServer = new HttpSolrServer(url);
|
||||
solrServer = new HttpSolrClient(url);
|
||||
|
||||
NamedList resp = solrServer.request(up);
|
||||
NamedList hdr = (NamedList) resp.get("responseHeader");
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.commons.lang.StringUtils;
|
|||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpClientUtil;
|
||||
import org.apache.solr.client.solrj.impl.LBHttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.LBHttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
|
@ -115,7 +115,7 @@ public class SSLMigrationTest extends AbstractFullDistribZkTestBase {
|
|||
urls.add(replica.getStr(ZkStateReader.BASE_URL_PROP));
|
||||
}
|
||||
//Create new SolrServer to configure new HttpClient w/ SSL config
|
||||
new LBHttpSolrServer(urls.toArray(new String[]{})).request(request);
|
||||
new LBHttpSolrClient(urls.toArray(new String[]{})).request(request);
|
||||
}
|
||||
|
||||
}
|
|
@ -18,10 +18,10 @@ package org.apache.solr.cloud;
|
|||
*/
|
||||
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
|
@ -313,7 +313,7 @@ public class ShardRoutingTest extends AbstractFullDistribZkTestBase {
|
|||
assertEquals(8, nClients);
|
||||
|
||||
int expectedVal = 0;
|
||||
for (SolrServer client : clients) {
|
||||
for (SolrClient client : clients) {
|
||||
client.add(sdoc("id", "b!doc", "foo_i", map("inc",1)));
|
||||
expectedVal++;
|
||||
|
||||
|
|
|
@ -18,12 +18,13 @@ package org.apache.solr.cloud;
|
|||
*/
|
||||
|
||||
import org.apache.http.params.CoreConnectionPNames;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
|
@ -42,7 +43,6 @@ import org.junit.After;
|
|||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
|
@ -51,10 +51,9 @@ import java.util.Map;
|
|||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import static org.apache.solr.cloud.OverseerCollectionProcessor.NUM_SLICES;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
|
||||
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
|
||||
|
||||
@Slow
|
||||
public class ShardSplitTest extends BasicDistributedZkTest {
|
||||
|
@ -126,7 +125,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
try {
|
||||
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
|
||||
fail("Shard splitting with just one custom hash range should not succeed");
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
log.info("Expected exception:", e);
|
||||
}
|
||||
subRanges.clear();
|
||||
|
@ -137,7 +136,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
try {
|
||||
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
|
||||
fail("Shard splitting with missing hashes in between given ranges should not succeed");
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
log.info("Expected exception:", e);
|
||||
}
|
||||
subRanges.clear();
|
||||
|
@ -150,7 +149,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
try {
|
||||
splitShard(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1, subRanges, null);
|
||||
fail("Shard splitting with overlapping ranges should not succeed");
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
log.info("Expected exception:", e);
|
||||
}
|
||||
subRanges.clear();
|
||||
|
@ -220,7 +219,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
log.info("Layout after split: \n");
|
||||
printLayout();
|
||||
break;
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
if (e.code() != 500) {
|
||||
throw e;
|
||||
}
|
||||
|
@ -248,11 +247,11 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
String collectionName = "routeFieldColl";
|
||||
int numShards = 4;
|
||||
int replicationFactor = 2;
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
String shard_fld = "shard_s";
|
||||
try {
|
||||
client = createCloudClient(null);
|
||||
|
@ -272,9 +271,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
|
||||
waitForRecoveriesToFinish(false);
|
||||
|
||||
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
|
||||
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
||||
final DocRouter router = clusterState.getCollection(collectionName).getRouter();
|
||||
|
@ -304,7 +303,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
try {
|
||||
splitShard(collectionName, SHARD1, null, null);
|
||||
break;
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
if (e.code() != 500) {
|
||||
throw e;
|
||||
}
|
||||
|
@ -327,11 +326,11 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
String collectionName = "splitByRouteKeyTest";
|
||||
int numShards = 4;
|
||||
int replicationFactor = 2;
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrServer()
|
||||
int maxShardsPerNode = (((numShards * replicationFactor) / getCommonCloudSolrClient()
|
||||
.getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
|
||||
|
||||
HashMap<String, List<Integer>> collectionInfos = new HashMap<>();
|
||||
CloudSolrServer client = null;
|
||||
CloudSolrClient client = null;
|
||||
try {
|
||||
client = createCloudClient(null);
|
||||
Map<String, Object> props = ZkNodeProps.makeMap(
|
||||
|
@ -349,9 +348,9 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
|
||||
waitForRecoveriesToFinish(false);
|
||||
|
||||
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrServer().getZkStateReader().getClusterState(), collectionName);
|
||||
String url = CustomCollectionTest.getUrlFromZk(getCommonCloudSolrClient().getZkStateReader().getClusterState(), collectionName);
|
||||
|
||||
HttpSolrServer collectionClient = new HttpSolrServer(url);
|
||||
HttpSolrClient collectionClient = new HttpSolrClient(url);
|
||||
|
||||
String splitKey = "b!";
|
||||
|
||||
|
@ -389,7 +388,7 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
try {
|
||||
splitShard(collectionName, null, null, splitKey);
|
||||
break;
|
||||
} catch (HttpSolrServer.RemoteSolrException e) {
|
||||
} catch (HttpSolrClient.RemoteSolrException e) {
|
||||
if (e.code() != 500) {
|
||||
throw e;
|
||||
}
|
||||
|
@ -447,23 +446,23 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
query.set("distrib", false);
|
||||
|
||||
ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
|
||||
HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
|
||||
HttpSolrClient shard1_0Client = new HttpSolrClient(shard1_0.getCoreUrl());
|
||||
QueryResponse response;
|
||||
try {
|
||||
response = shard1_0Server.query(query);
|
||||
response = shard1_0Client.query(query);
|
||||
} finally {
|
||||
shard1_0Server.shutdown();
|
||||
shard1_0Client.shutdown();
|
||||
}
|
||||
long shard10Count = response.getResults().getNumFound();
|
||||
|
||||
ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(
|
||||
AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
|
||||
HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
|
||||
HttpSolrClient shard1_1Client = new HttpSolrClient(shard1_1.getCoreUrl());
|
||||
QueryResponse response2;
|
||||
try {
|
||||
response2 = shard1_1Server.query(query);
|
||||
response2 = shard1_1Client.query(query);
|
||||
} finally {
|
||||
shard1_1Server.shutdown();
|
||||
shard1_1Client.shutdown();
|
||||
}
|
||||
long shard11Count = response2.getResults().getNumFound();
|
||||
|
||||
|
@ -483,12 +482,12 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
int c = 0;
|
||||
for (Replica replica : slice.getReplicas()) {
|
||||
String coreUrl = new ZkCoreNodeProps(replica).getCoreUrl();
|
||||
HttpSolrServer server = new HttpSolrServer(coreUrl);
|
||||
HttpSolrClient client = new HttpSolrClient(coreUrl);
|
||||
QueryResponse response;
|
||||
try {
|
||||
response = server.query(query);
|
||||
response = client.query(query);
|
||||
} finally {
|
||||
server.shutdown();
|
||||
client.shutdown();
|
||||
}
|
||||
numFound[c++] = response.getResults().getNumFound();
|
||||
log.info("Shard: " + shard + " Replica: {} has {} docs", coreUrl, String.valueOf(response.getResults().getNumFound()));
|
||||
|
@ -522,15 +521,15 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
.getBaseURL();
|
||||
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
|
||||
|
||||
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
|
||||
baseServer.setConnectionTimeout(30000);
|
||||
baseServer.setSoTimeout(60000 * 5);
|
||||
baseServer.request(request);
|
||||
baseServer.shutdown();
|
||||
HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
|
||||
baseClient.setConnectionTimeout(30000);
|
||||
baseClient.setSoTimeout(60000 * 5);
|
||||
baseClient.request(request);
|
||||
baseClient.shutdown();
|
||||
}
|
||||
|
||||
protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n) throws Exception {
|
||||
|
@ -600,23 +599,23 @@ public class ShardSplitTest extends BasicDistributedZkTest {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected SolrServer createNewSolrServer(String collection, String baseUrl) {
|
||||
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl);
|
||||
server.setSoTimeout(5 * 60 * 1000);
|
||||
return server;
|
||||
protected SolrClient createNewSolrClient(String collection, String baseUrl) {
|
||||
HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(collection, baseUrl);
|
||||
client.setSoTimeout(5 * 60 * 1000);
|
||||
return client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SolrServer createNewSolrServer(int port) {
|
||||
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port);
|
||||
server.setSoTimeout(5 * 60 * 1000);
|
||||
return server;
|
||||
protected SolrClient createNewSolrClient(int port) {
|
||||
HttpSolrClient client = (HttpSolrClient) super.createNewSolrClient(port);
|
||||
client.setSoTimeout(5 * 60 * 1000);
|
||||
return client;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CloudSolrServer createCloudClient(String defaultCollection) {
|
||||
CloudSolrServer client = super.createCloudClient(defaultCollection);
|
||||
client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
|
||||
protected CloudSolrClient createCloudClient(String defaultCollection) {
|
||||
CloudSolrClient client = super.createCloudClient(defaultCollection);
|
||||
client.getLbClient().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
|
||||
return client;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ public class SharedFSAutoReplicaFailoverTest extends AbstractFullDistribZkTestBa
|
|||
createCollectionRequest.setConfigName("conf1");
|
||||
createCollectionRequest.setRouterField("myOwnField");
|
||||
createCollectionRequest.setAutoAddReplicas(false);
|
||||
CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrServer());
|
||||
CollectionAdminResponse response2 = createCollectionRequest.process(getCommonCloudSolrClient());
|
||||
|
||||
assertEquals(0, response2.getStatus());
|
||||
assertTrue(response2.isSuccess());
|
||||
|
|
|
@ -17,13 +17,10 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
|
||||
public class SimpleCollectionCreateDeleteTest extends AbstractFullDistribZkTestBase {
|
||||
|
|
|
@ -17,18 +17,11 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
@ -44,6 +37,13 @@ import org.junit.AfterClass;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Test sync phase that occurs when Leader goes down and a new Leader is
|
||||
* elected.
|
||||
|
@ -128,16 +128,16 @@ public class SyncSliceTest extends AbstractFullDistribZkTestBase {
|
|||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
String baseUrl = ((HttpSolrServer) shardToJetty.get("shard1").get(2).client.solrClient)
|
||||
String baseUrl = ((HttpSolrClient) shardToJetty.get("shard1").get(2).client.solrClient)
|
||||
.getBaseURL();
|
||||
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
|
||||
|
||||
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
|
||||
HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
|
||||
// we only set the connect timeout, not so timeout
|
||||
baseServer.setConnectionTimeout(30000);
|
||||
baseServer.request(request);
|
||||
baseServer.shutdown();
|
||||
baseServer = null;
|
||||
baseClient.setConnectionTimeout(30000);
|
||||
baseClient.request(request);
|
||||
baseClient.shutdown();
|
||||
baseClient = null;
|
||||
|
||||
waitForThingsToLevelOut(15);
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.solr.cloud;
|
|||
import com.google.common.collect.Lists;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
@ -64,7 +64,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
|
||||
@Override
|
||||
public void doTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
createCollection(null, COLLECTION_NAME, 2, 2, 2, client, null, "conf1");
|
||||
createCollection(null, COLLECTION_NAME1, 1, 1, 1, client, null, "conf1");
|
||||
|
@ -89,7 +89,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void clusterStatusWithCollectionAndShard() throws IOException, SolrServerException {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
|
||||
|
@ -119,7 +119,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
|
||||
|
||||
private void listCollection() throws IOException, SolrServerException {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionParams.CollectionAction.LIST.toString());
|
||||
|
@ -141,7 +141,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void clusterStatusNoCollection() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
|
||||
|
@ -167,7 +167,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void clusterStatusWithCollection() throws IOException, SolrServerException {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
|
||||
|
@ -189,7 +189,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void clusterStatusWithRouteKey() throws IOException, SolrServerException {
|
||||
CloudSolrServer client = createCloudClient(DEFAULT_COLLECTION);
|
||||
CloudSolrClient client = createCloudClient(DEFAULT_COLLECTION);
|
||||
try {
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField("id", "a!123"); // goes to shard2. see ShardRoutingTest for details
|
||||
|
@ -222,7 +222,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void clusterStatusAliasTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionParams.CollectionAction.CREATEALIAS.toString());
|
||||
|
@ -259,7 +259,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void clusterStatusRolesTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
client.connect();
|
||||
Replica replica = client.getZkStateReader().getLeaderRetry(DEFAULT_COLLECTION, SHARD1);
|
||||
|
@ -293,7 +293,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void replicaPropTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
client.connect();
|
||||
Map<String, Slice> slices = client.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlicesMap();
|
||||
|
@ -577,7 +577,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
|
||||
|
||||
// Expects the map will have keys, but blank values.
|
||||
private Map<String, String> getProps(CloudSolrServer client, String collectionName, String replicaName, String... props)
|
||||
private Map<String, String> getProps(CloudSolrClient client, String collectionName, String replicaName, String... props)
|
||||
throws KeeperException, InterruptedException {
|
||||
|
||||
client.getZkStateReader().updateClusterState(true);
|
||||
|
@ -592,7 +592,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
|
|||
}
|
||||
return propMap;
|
||||
}
|
||||
private void missingParamsError(CloudSolrServer client, ModifiableSolrParams origParams)
|
||||
private void missingParamsError(CloudSolrClient client, ModifiableSolrParams origParams)
|
||||
throws IOException, SolrServerException {
|
||||
|
||||
SolrRequest request;
|
||||
|
|
|
@ -17,13 +17,11 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.params.ShardParams;
|
||||
import org.apache.solr.common.util.StrUtils;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
|
@ -116,9 +114,9 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
log.info("### STARTING doTestHardFail");
|
||||
|
||||
// use a leader so we test both forwarding and non-forwarding logic
|
||||
ss = shardToLeaderJetty.get(bucket1).client.solrClient;
|
||||
solrClient = shardToLeaderJetty.get(bucket1).client.solrClient;
|
||||
|
||||
// ss = cloudClient; CloudSolrServer doesn't currently support propagating error codes
|
||||
// solrClient = cloudClient; CloudSolrServer doesn't currently support propagating error codes
|
||||
|
||||
doTestHardFail("p!doc1");
|
||||
doTestHardFail("q!doc1");
|
||||
|
@ -139,7 +137,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
log.info("### STARTING doTestDocVersions");
|
||||
assertEquals(2, cloudClient.getZkStateReader().getClusterState().getCollection(DEFAULT_COLLECTION).getSlices().size());
|
||||
|
||||
ss = cloudClient;
|
||||
solrClient = cloudClient;
|
||||
|
||||
vadd("b!doc1", 10);
|
||||
vadd("c!doc2", 11);
|
||||
|
@ -183,7 +181,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
// now test with a non-smart client
|
||||
//
|
||||
// use a leader so we test both forwarding and non-forwarding logic
|
||||
ss = shardToLeaderJetty.get(bucket1).client.solrClient;
|
||||
solrClient = shardToLeaderJetty.get(bucket1).client.solrClient;
|
||||
|
||||
vadd("b!doc5", 10);
|
||||
vadd("c!doc6", 11);
|
||||
|
@ -237,7 +235,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
|
||||
}
|
||||
|
||||
SolrServer ss;
|
||||
SolrClient solrClient;
|
||||
|
||||
void vdelete(String id, long version, String... params) throws Exception {
|
||||
UpdateRequest req = new UpdateRequest();
|
||||
|
@ -246,7 +244,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
for (int i=0; i<params.length; i+=2) {
|
||||
req.setParam( params[i], params[i+1]);
|
||||
}
|
||||
ss.request(req);
|
||||
solrClient.request(req);
|
||||
// req.process(cloudClient);
|
||||
}
|
||||
|
||||
|
@ -256,7 +254,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
for (int i=0; i<params.length; i+=2) {
|
||||
req.setParam( params[i], params[i+1]);
|
||||
}
|
||||
ss.request(req);
|
||||
solrClient.request(req);
|
||||
}
|
||||
|
||||
void vaddFail(String id, long version, int errCode, String... params) throws Exception {
|
||||
|
@ -315,7 +313,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
expectedIds.put(strs.get(i), Long.valueOf(verS.get(i)));
|
||||
}
|
||||
|
||||
ss.query(params("qt","/get", "ids",ids));
|
||||
solrClient.query(params("qt", "/get", "ids", ids));
|
||||
|
||||
QueryResponse rsp = cloudClient.query(params("qt","/get", "ids",ids));
|
||||
Map<String, Object> obtainedIds = new HashMap<>();
|
||||
|
@ -327,7 +325,7 @@ public class TestDistribDocBasedVersion extends AbstractFullDistribZkTestBase {
|
|||
}
|
||||
|
||||
void doRTG(String ids) throws Exception {
|
||||
ss.query(params("qt","/get", "ids",ids));
|
||||
solrClient.query(params("qt", "/get", "ids", ids));
|
||||
|
||||
Set<String> expectedIds = new HashSet<>( StrUtils.splitSmart(ids, ",", true) );
|
||||
|
||||
|
|
|
@ -17,18 +17,13 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressSysoutChecks;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
@ -52,7 +47,11 @@ import org.junit.rules.TestRule;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
|
||||
import java.io.File;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Test of the MiniSolrCloudCluster functionality. Keep in mind,
|
||||
|
@ -118,11 +117,11 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
|||
assertTrue(startedServer.isRunning());
|
||||
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
|
||||
|
||||
CloudSolrServer cloudSolrServer = null;
|
||||
CloudSolrClient cloudSolrClient = null;
|
||||
SolrZkClient zkClient = null;
|
||||
try {
|
||||
cloudSolrServer = new CloudSolrServer(miniCluster.getZkServer().getZkAddress(), true);
|
||||
cloudSolrServer.connect();
|
||||
cloudSolrClient = new CloudSolrClient(miniCluster.getZkServer().getZkAddress(), true);
|
||||
cloudSolrClient.connect();
|
||||
zkClient = new SolrZkClient(miniCluster.getZkServer().getZkAddress(),
|
||||
AbstractZkTestCase.TIMEOUT, 45000, null);
|
||||
|
||||
|
@ -131,20 +130,20 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
|||
String configName = "solrCloudCollectionConfig";
|
||||
System.setProperty("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
|
||||
uploadConfigToZk(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf", configName);
|
||||
createCollection(cloudSolrServer, collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName);
|
||||
createCollection(cloudSolrClient, collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName);
|
||||
|
||||
// modify/query collection
|
||||
cloudSolrServer.setDefaultCollection(collectionName);
|
||||
cloudSolrClient.setDefaultCollection(collectionName);
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.setField("id", "1");
|
||||
|
||||
ZkStateReader zkStateReader = new ZkStateReader(zkClient);
|
||||
waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||
cloudSolrServer.add(doc);
|
||||
cloudSolrServer.commit();
|
||||
cloudSolrClient.add(doc);
|
||||
cloudSolrClient.commit();
|
||||
SolrQuery query = new SolrQuery();
|
||||
query.setQuery("*:*");
|
||||
QueryResponse rsp = cloudSolrServer.query(query);
|
||||
QueryResponse rsp = cloudSolrClient.query(query);
|
||||
assertEquals(1, rsp.getResults().getNumFound());
|
||||
|
||||
// remove a server not hosting any replicas
|
||||
|
@ -173,8 +172,8 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
if (cloudSolrServer != null) {
|
||||
cloudSolrServer.shutdown();
|
||||
if (cloudSolrClient != null) {
|
||||
cloudSolrClient.shutdown();
|
||||
}
|
||||
if (zkClient != null) {
|
||||
zkClient.close();
|
||||
|
@ -217,7 +216,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
|||
zkClient.makePath(ZkController.CONFIGS_ZKNODE + "/" + configName + "/" + nameInZk, file, false, true);
|
||||
}
|
||||
|
||||
protected NamedList<Object> createCollection(CloudSolrServer server, String name, int numShards,
|
||||
protected NamedList<Object> createCollection(CloudSolrClient client, String name, int numShards,
|
||||
int replicationFactor, String configName) throws Exception {
|
||||
ModifiableSolrParams modParams = new ModifiableSolrParams();
|
||||
modParams.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name());
|
||||
|
@ -227,7 +226,7 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
|||
modParams.set("collection.configName", configName);
|
||||
QueryRequest request = new QueryRequest(modParams);
|
||||
request.setPath("/admin/collections");
|
||||
return server.request(request);
|
||||
return client.request(request);
|
||||
}
|
||||
|
||||
protected void waitForRecoveriesToFinish(String collection,
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.solr.cloud;
|
|||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
|
@ -39,7 +39,7 @@ public class TestModifyConfFiles extends AbstractFullDistribZkTestBase {
|
|||
@Override
|
||||
public void doTest() throws Exception {
|
||||
int which = r.nextInt(clients.size());
|
||||
HttpSolrServer client = (HttpSolrServer) clients.get(which);
|
||||
HttpSolrClient client = (HttpSolrClient) clients.get(which);
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("op", "write");
|
||||
|
|
|
@ -16,19 +16,10 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
|
@ -38,6 +29,12 @@ import org.apache.solr.common.util.NamedList;
|
|||
import org.apache.zookeeper.KeeperException;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
||||
public class TestRebalanceLeaders extends AbstractFullDistribZkTestBase {
|
||||
|
||||
|
@ -65,7 +62,7 @@ public class TestRebalanceLeaders extends AbstractFullDistribZkTestBase {
|
|||
|
||||
@Override
|
||||
public void doTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
reps = random().nextInt(9) + 1; // make sure and do at least one.
|
||||
try {
|
||||
// Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
|
||||
|
@ -247,11 +244,11 @@ public class TestRebalanceLeaders extends AbstractFullDistribZkTestBase {
|
|||
return true;
|
||||
}
|
||||
|
||||
byte[] getZkData(CloudSolrServer server, String path) {
|
||||
byte[] getZkData(CloudSolrClient client, String path) {
|
||||
org.apache.zookeeper.data.Stat stat = new org.apache.zookeeper.data.Stat();
|
||||
long start = System.currentTimeMillis();
|
||||
try {
|
||||
byte[] data = server.getZkStateReader().getZkClient().getData(path, null, stat, true);
|
||||
byte[] data = client.getZkStateReader().getZkClient().getData(path, null, stat, true);
|
||||
if (data != null) {
|
||||
return data;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.Map;
|
|||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
|
@ -58,7 +58,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
|
|||
|
||||
@Override
|
||||
public void doTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
// Mix up a bunch of different combinations of shards and replicas in order to exercise boundary cases.
|
||||
// shards, replicationfactor, maxreplicaspernode
|
||||
|
@ -81,7 +81,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
|
|||
}
|
||||
|
||||
private void listCollection() throws IOException, SolrServerException {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action", CollectionParams.CollectionAction.LIST.toString());
|
||||
|
@ -101,7 +101,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
|
|||
|
||||
|
||||
private void clusterAssignPropertyTest() throws Exception {
|
||||
CloudSolrServer client = createCloudClient(null);
|
||||
CloudSolrClient client = createCloudClient(null);
|
||||
try {
|
||||
client.connect();
|
||||
try {
|
||||
|
@ -204,7 +204,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
|
|||
}
|
||||
}
|
||||
|
||||
private void verifyLeaderAssignment(CloudSolrServer client, String collectionName)
|
||||
private void verifyLeaderAssignment(CloudSolrClient client, String collectionName)
|
||||
throws InterruptedException, KeeperException {
|
||||
String lastFailMsg = "";
|
||||
for (int idx = 0; idx < 300; ++idx) { // Keep trying while Overseer writes the ZK state for up to 30 seconds.
|
||||
|
@ -239,7 +239,7 @@ public class TestReplicaProperties extends ReplicaPropertiesBase {
|
|||
fail(lastFailMsg);
|
||||
}
|
||||
|
||||
private void addProperty(CloudSolrServer client, String... paramsIn) throws IOException, SolrServerException {
|
||||
private void addProperty(CloudSolrClient client, String... paramsIn) throws IOException, SolrServerException {
|
||||
assertTrue("paramsIn must be an even multiple of 2, it is: " + paramsIn.length, (paramsIn.length % 2) == 0);
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
for (int idx = 0; idx < paramsIn.length; idx += 2) {
|
||||
|
|
|
@ -19,7 +19,7 @@ package org.apache.solr.cloud;
|
|||
|
||||
import org.apache.solr.client.solrj.SolrRequest;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.common.params.CollectionParams;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
|
@ -214,12 +214,12 @@ public class TestRequestStatusCollectionAPI extends BasicDistributedZkTest {
|
|||
SolrRequest request = new QueryRequest(params);
|
||||
request.setPath("/admin/collections");
|
||||
|
||||
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.solrClient)
|
||||
.getBaseURL();
|
||||
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
|
||||
|
||||
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
|
||||
baseServer.setConnectionTimeout(15000);
|
||||
return baseServer.request(request);
|
||||
HttpSolrClient baseClient = new HttpSolrClient(baseUrl);
|
||||
baseClient.setConnectionTimeout(15000);
|
||||
return baseClient.request(request);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ package org.apache.solr.cloud;
|
|||
*/
|
||||
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.params.ShardParams;
|
||||
|
@ -48,7 +48,7 @@ public class TestShortCircuitedRequests extends AbstractFullDistribZkTestBase {
|
|||
// query shard3 directly with _route_=a! so that we trigger the short circuited request path
|
||||
Replica shard3 = cloudClient.getZkStateReader().getClusterState().getLeader(DEFAULT_COLLECTION, "shard3");
|
||||
String nodeName = shard3.getNodeName();
|
||||
SolrServer shard3Client = getClient(nodeName);
|
||||
SolrClient shard3Client = getClient(nodeName);
|
||||
QueryResponse response = shard3Client.query(new SolrQuery("*:*").add(ShardParams._ROUTE_, "a!").add(ShardParams.SHARDS_INFO, "true"));
|
||||
|
||||
assertEquals("Could not find doc", 1, response.getResults().getNumFound());
|
||||
|
|
|
@ -17,19 +17,12 @@ package org.apache.solr.cloud;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.Create;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest.Unload;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
@ -41,6 +34,13 @@ import org.apache.solr.util.DefaultSolrThreadFactory;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.SynchronousQueue;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* This test simply does a bunch of basic things in solrcloud mode and asserts things
|
||||
* work as expected.
|
||||
|
@ -92,12 +92,12 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
createCmd.setDataDir(getDataDir(coreDataDir));
|
||||
createCmd.setNumShards(2);
|
||||
|
||||
SolrServer client = clients.get(0);
|
||||
SolrClient client = clients.get(0);
|
||||
String url1 = getBaseUrl(client);
|
||||
HttpSolrServer server = new HttpSolrServer(url1);
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
server.request(createCmd);
|
||||
HttpSolrClient adminClient = new HttpSolrClient(url1);
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(60000);
|
||||
adminClient.request(createCmd);
|
||||
|
||||
createCmd = new Create();
|
||||
createCmd.setCoreName("test_unload_shard_and_collection_2");
|
||||
|
@ -106,7 +106,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
coreDataDir = createTempDir().toFile().getAbsolutePath();
|
||||
createCmd.setDataDir(getDataDir(coreDataDir));
|
||||
|
||||
server.request(createCmd);
|
||||
adminClient.request(createCmd);
|
||||
|
||||
// does not mean they are active and up yet :*
|
||||
waitForRecoveriesToFinish(collection, false);
|
||||
|
@ -114,10 +114,10 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
// now unload one of the two
|
||||
Unload unloadCmd = new Unload(false);
|
||||
unloadCmd.setCoreName("test_unload_shard_and_collection_2");
|
||||
server.request(unloadCmd);
|
||||
adminClient.request(unloadCmd);
|
||||
|
||||
// there should be only one shard
|
||||
int slices = getCommonCloudSolrServer().getZkStateReader().getClusterState().getSlices(collection).size();
|
||||
int slices = getCommonCloudSolrClient().getZkStateReader().getClusterState().getSlices(collection).size();
|
||||
long timeoutAt = System.currentTimeMillis() + 45000;
|
||||
while (slices != 1) {
|
||||
if (System.currentTimeMillis() > timeoutAt) {
|
||||
|
@ -126,20 +126,20 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
}
|
||||
|
||||
Thread.sleep(1000);
|
||||
slices = getCommonCloudSolrServer().getZkStateReader().getClusterState().getSlices(collection).size();
|
||||
slices = getCommonCloudSolrClient().getZkStateReader().getClusterState().getSlices(collection).size();
|
||||
}
|
||||
|
||||
// now unload one of the other
|
||||
unloadCmd = new Unload(false);
|
||||
unloadCmd.setCoreName("test_unload_shard_and_collection_1");
|
||||
server.request(unloadCmd);
|
||||
server.shutdown();
|
||||
server = null;
|
||||
adminClient.request(unloadCmd);
|
||||
adminClient.shutdown();
|
||||
adminClient = null;
|
||||
|
||||
//printLayout();
|
||||
// the collection should be gone
|
||||
timeoutAt = System.currentTimeMillis() + 30000;
|
||||
while (getCommonCloudSolrServer().getZkStateReader().getClusterState().hasCollection(collection)) {
|
||||
while (getCommonCloudSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
|
||||
if (System.currentTimeMillis() > timeoutAt) {
|
||||
printLayout();
|
||||
fail("Still found collection");
|
||||
|
@ -157,11 +157,11 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
File tmpDir = createTempDir().toFile();
|
||||
|
||||
// create a new collection collection
|
||||
SolrServer client = clients.get(0);
|
||||
SolrClient client = clients.get(0);
|
||||
String url1 = getBaseUrl(client);
|
||||
HttpSolrServer server = new HttpSolrServer(url1);
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
HttpSolrClient adminClient = new HttpSolrClient(url1);
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(60000);
|
||||
|
||||
Create createCmd = new Create();
|
||||
createCmd.setCoreName("unloadcollection1");
|
||||
|
@ -169,11 +169,11 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
createCmd.setNumShards(1);
|
||||
String core1DataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_1n";
|
||||
createCmd.setDataDir(getDataDir(core1DataDir));
|
||||
server.request(createCmd);
|
||||
server.shutdown();
|
||||
server = null;
|
||||
adminClient.request(createCmd);
|
||||
adminClient.shutdown();
|
||||
adminClient = null;
|
||||
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrServer().getZkStateReader();
|
||||
ZkStateReader zkStateReader = getCommonCloudSolrClient().getZkStateReader();
|
||||
|
||||
zkStateReader.updateClusterState(true);
|
||||
|
||||
|
@ -182,16 +182,16 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
|
||||
client = clients.get(1);
|
||||
String url2 = getBaseUrl(client);
|
||||
server = new HttpSolrServer(url2);
|
||||
adminClient = new HttpSolrClient(url2);
|
||||
|
||||
createCmd = new Create();
|
||||
createCmd.setCoreName("unloadcollection2");
|
||||
createCmd.setCollection("unloadcollection");
|
||||
String core2dataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection1" + "_2n";
|
||||
createCmd.setDataDir(getDataDir(core2dataDir));
|
||||
server.request(createCmd);
|
||||
server.shutdown();
|
||||
server = null;
|
||||
adminClient.request(createCmd);
|
||||
adminClient.shutdown();
|
||||
adminClient = null;
|
||||
|
||||
zkStateReader.updateClusterState(true);
|
||||
slices = zkStateReader.getClusterState().getCollection("unloadcollection").getSlices().size();
|
||||
|
@ -202,9 +202,9 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
ZkCoreNodeProps leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
|
||||
|
||||
Random random = random();
|
||||
HttpSolrServer collectionClient;
|
||||
HttpSolrClient collectionClient;
|
||||
if (random.nextBoolean()) {
|
||||
collectionClient = new HttpSolrServer(leaderProps.getCoreUrl());
|
||||
collectionClient = new HttpSolrClient(leaderProps.getCoreUrl());
|
||||
// lets try and use the solrj client to index and retrieve a couple
|
||||
// documents
|
||||
SolrInputDocument doc1 = getDoc(id, 6, i1, -600, tlong, 600, t1,
|
||||
|
@ -224,16 +224,16 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
// create another replica for our collection
|
||||
client = clients.get(2);
|
||||
String url3 = getBaseUrl(client);
|
||||
server = new HttpSolrServer(url3);
|
||||
adminClient = new HttpSolrClient(url3);
|
||||
|
||||
createCmd = new Create();
|
||||
createCmd.setCoreName("unloadcollection3");
|
||||
createCmd.setCollection("unloadcollection");
|
||||
String core3dataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_3n";
|
||||
createCmd.setDataDir(getDataDir(core3dataDir));
|
||||
server.request(createCmd);
|
||||
server.shutdown();
|
||||
server = null;
|
||||
adminClient.request(createCmd);
|
||||
adminClient.shutdown();
|
||||
adminClient = null;
|
||||
|
||||
|
||||
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
|
||||
|
@ -241,7 +241,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
// so that we start with some versions when we reload...
|
||||
DirectUpdateHandler2.commitOnClose = false;
|
||||
|
||||
HttpSolrServer addClient = new HttpSolrServer(url3 + "/unloadcollection3");
|
||||
HttpSolrClient addClient = new HttpSolrClient(url3 + "/unloadcollection3");
|
||||
addClient.setConnectionTimeout(30000);
|
||||
|
||||
// add a few docs
|
||||
|
@ -257,7 +257,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
//collectionClient.commit();
|
||||
|
||||
// unload the leader
|
||||
collectionClient = new HttpSolrServer(leaderProps.getBaseUrl());
|
||||
collectionClient = new HttpSolrClient(leaderProps.getBaseUrl());
|
||||
collectionClient.setConnectionTimeout(15000);
|
||||
collectionClient.setSoTimeout(30000);
|
||||
|
||||
|
@ -283,7 +283,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
// ensure there is a leader
|
||||
zkStateReader.getLeaderRetry("unloadcollection", "shard1", 15000);
|
||||
|
||||
addClient = new HttpSolrServer(url2 + "/unloadcollection2");
|
||||
addClient = new HttpSolrClient(url2 + "/unloadcollection2");
|
||||
addClient.setConnectionTimeout(30000);
|
||||
addClient.setSoTimeout(90000);
|
||||
|
||||
|
@ -300,24 +300,24 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
// create another replica for our collection
|
||||
client = clients.get(3);
|
||||
String url4 = getBaseUrl(client);
|
||||
server = new HttpSolrServer(url4);
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(30000);
|
||||
adminClient = new HttpSolrClient(url4);
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(30000);
|
||||
|
||||
createCmd = new Create();
|
||||
createCmd.setCoreName("unloadcollection4");
|
||||
createCmd.setCollection("unloadcollection");
|
||||
String core4dataDir = tmpDir.getAbsolutePath() + File.separator + System.currentTimeMillis() + "unloadcollection" + "_4n";
|
||||
createCmd.setDataDir(getDataDir(core4dataDir));
|
||||
server.request(createCmd);
|
||||
server.shutdown();
|
||||
server = null;
|
||||
adminClient.request(createCmd);
|
||||
adminClient.shutdown();
|
||||
adminClient = null;
|
||||
|
||||
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
|
||||
|
||||
// unload the leader again
|
||||
leaderProps = getLeaderUrlFromZk("unloadcollection", "shard1");
|
||||
collectionClient = new HttpSolrServer(leaderProps.getBaseUrl());
|
||||
collectionClient = new HttpSolrClient(leaderProps.getBaseUrl());
|
||||
collectionClient.setConnectionTimeout(15000);
|
||||
collectionClient.setSoTimeout(30000);
|
||||
|
||||
|
@ -343,64 +343,64 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
DirectUpdateHandler2.commitOnClose = true;
|
||||
|
||||
// bring the downed leader back as replica
|
||||
server = new HttpSolrServer(leaderProps.getBaseUrl());
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(30000);
|
||||
adminClient = new HttpSolrClient(leaderProps.getBaseUrl());
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(30000);
|
||||
|
||||
createCmd = new Create();
|
||||
createCmd.setCoreName(leaderProps.getCoreName());
|
||||
createCmd.setCollection("unloadcollection");
|
||||
createCmd.setDataDir(getDataDir(core1DataDir));
|
||||
server.request(createCmd);
|
||||
server.shutdown();
|
||||
server = null;
|
||||
adminClient.request(createCmd);
|
||||
adminClient.shutdown();
|
||||
adminClient = null;
|
||||
|
||||
waitForRecoveriesToFinish("unloadcollection", zkStateReader, false);
|
||||
|
||||
server = new HttpSolrServer(url2 + "/unloadcollection");
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(30000);
|
||||
server.commit();
|
||||
adminClient = new HttpSolrClient(url2 + "/unloadcollection");
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(30000);
|
||||
adminClient.commit();
|
||||
SolrQuery q = new SolrQuery("*:*");
|
||||
q.set("distrib", false);
|
||||
long found1 = server.query(q).getResults().getNumFound();
|
||||
server.shutdown();
|
||||
server = new HttpSolrServer(url3 + "/unloadcollection");
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(30000);
|
||||
server.commit();
|
||||
long found1 = adminClient.query(q).getResults().getNumFound();
|
||||
adminClient.shutdown();
|
||||
adminClient = new HttpSolrClient(url3 + "/unloadcollection");
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(30000);
|
||||
adminClient.commit();
|
||||
q = new SolrQuery("*:*");
|
||||
q.set("distrib", false);
|
||||
long found3 = server.query(q).getResults().getNumFound();
|
||||
server.shutdown();
|
||||
server = new HttpSolrServer(url4 + "/unloadcollection");
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(30000);
|
||||
server.commit();
|
||||
long found3 = adminClient.query(q).getResults().getNumFound();
|
||||
adminClient.shutdown();
|
||||
adminClient = new HttpSolrClient(url4 + "/unloadcollection");
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(30000);
|
||||
adminClient.commit();
|
||||
q = new SolrQuery("*:*");
|
||||
q.set("distrib", false);
|
||||
long found4 = server.query(q).getResults().getNumFound();
|
||||
long found4 = adminClient.query(q).getResults().getNumFound();
|
||||
|
||||
// all 3 shards should now have the same number of docs
|
||||
assertEquals(found1, found3);
|
||||
assertEquals(found3, found4);
|
||||
server.shutdown();
|
||||
adminClient.shutdown();
|
||||
|
||||
}
|
||||
|
||||
private void testUnloadLotsOfCores() throws Exception {
|
||||
SolrServer client = clients.get(2);
|
||||
SolrClient client = clients.get(2);
|
||||
String url3 = getBaseUrl(client);
|
||||
final HttpSolrServer server = new HttpSolrServer(url3);
|
||||
server.setConnectionTimeout(15000);
|
||||
server.setSoTimeout(60000);
|
||||
final HttpSolrClient adminClient = new HttpSolrClient(url3);
|
||||
adminClient.setConnectionTimeout(15000);
|
||||
adminClient.setSoTimeout(60000);
|
||||
ThreadPoolExecutor executor = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
|
||||
5, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
|
||||
new DefaultSolrThreadFactory("testExecutor"));
|
||||
int cnt = atLeast(3);
|
||||
|
||||
// create the cores
|
||||
createCores(server, executor, "multiunload", 2, cnt);
|
||||
createCores(adminClient, executor, "multiunload", 2, cnt);
|
||||
|
||||
executor.shutdown();
|
||||
executor.awaitTermination(120, TimeUnit.SECONDS);
|
||||
|
@ -415,7 +415,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
Unload unloadCmd = new Unload(true);
|
||||
unloadCmd.setCoreName("multiunload" + freezeJ);
|
||||
try {
|
||||
server.request(unloadCmd);
|
||||
adminClient.request(unloadCmd);
|
||||
} catch (SolrServerException e) {
|
||||
throw new RuntimeException(e);
|
||||
} catch (IOException e) {
|
||||
|
@ -427,7 +427,7 @@ public class UnloadDistributedZkTest extends BasicDistributedZkTest {
|
|||
}
|
||||
executor.shutdown();
|
||||
executor.awaitTermination(120, TimeUnit.SECONDS);
|
||||
server.shutdown();
|
||||
adminClient.shutdown();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -17,11 +17,8 @@
|
|||
|
||||
package org.apache.solr.cloud.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.store.NRTCachingDirectory;
|
||||
|
@ -29,7 +26,7 @@ import org.apache.lucene.util.LuceneTestCase.Nightly;
|
|||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.cloud.BasicDistributedZkTest;
|
||||
import org.apache.solr.cloud.StopableIndexingThread;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
|
@ -44,8 +41,10 @@ import org.apache.solr.util.RefCounted;
|
|||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
@Slow
|
||||
@Nightly
|
||||
|
@ -95,13 +94,13 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
|
|||
for (int i = 0; i < cnt; i++) {
|
||||
waitForRecoveriesToFinish(ACOLLECTION + i, false);
|
||||
}
|
||||
List<CloudSolrServer> cloudServers = new ArrayList<>();
|
||||
List<CloudSolrClient> cloudClients = new ArrayList<>();
|
||||
List<StopableIndexingThread> threads = new ArrayList<>();
|
||||
for (int i = 0; i < cnt; i++) {
|
||||
CloudSolrServer server = new CloudSolrServer(zkServer.getZkAddress());
|
||||
server.setDefaultCollection(ACOLLECTION + i);
|
||||
cloudServers.add(server);
|
||||
StopableIndexingThread indexThread = new StopableIndexingThread(null, server, "1", true, docCount);
|
||||
CloudSolrClient client = new CloudSolrClient(zkServer.getZkAddress());
|
||||
client.setDefaultCollection(ACOLLECTION + i);
|
||||
cloudClients.add(client);
|
||||
StopableIndexingThread indexThread = new StopableIndexingThread(null, client, "1", true, docCount);
|
||||
threads.add(indexThread);
|
||||
indexThread.start();
|
||||
}
|
||||
|
@ -113,13 +112,13 @@ public class HdfsWriteToMultipleCollectionsTest extends BasicDistributedZkTest {
|
|||
}
|
||||
|
||||
long collectionsCount = 0;
|
||||
for (CloudSolrServer server : cloudServers) {
|
||||
server.commit();
|
||||
collectionsCount += server.query(new SolrQuery("*:*")).getResults().getNumFound();
|
||||
for (CloudSolrClient client : cloudClients) {
|
||||
client.commit();
|
||||
collectionsCount += client.query(new SolrQuery("*:*")).getResults().getNumFound();
|
||||
}
|
||||
|
||||
for (CloudSolrServer server : cloudServers) {
|
||||
server.shutdown();
|
||||
for (CloudSolrClient client : cloudClients) {
|
||||
client.shutdown();
|
||||
}
|
||||
|
||||
assertEquals(addCnt, collectionsCount);
|
||||
|
|
|
@ -33,9 +33,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.cloud.BasicDistributedZkTest;
|
||||
import org.apache.solr.cloud.ChaosMonkey;
|
||||
|
@ -162,8 +162,8 @@ public class StressHdfsTest extends BasicDistributedZkTest {
|
|||
List<String> dataDirs = new ArrayList<>();
|
||||
|
||||
int i = 0;
|
||||
for (SolrServer client : clients) {
|
||||
HttpSolrServer c = new HttpSolrServer(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION);
|
||||
for (SolrClient client : clients) {
|
||||
HttpSolrClient c = new HttpSolrClient(getBaseUrl(client) + "/" + DELETE_DATA_DIR_COLLECTION);
|
||||
try {
|
||||
int docCnt = random().nextInt(1000) + 1;
|
||||
for (int j = 0; j < docCnt; j++) {
|
||||
|
|
|
@ -17,6 +17,21 @@
|
|||
|
||||
package org.apache.solr.core;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.client.solrj.response.UpdateResponse;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -29,21 +44,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.client.solrj.response.UpdateResponse;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Incorporate the open/close stress tests into unit tests.
|
||||
*/
|
||||
|
@ -69,8 +69,8 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
|
||||
File solrHomeDirectory;
|
||||
|
||||
List<HttpSolrServer> indexingServers = new ArrayList<>(indexingThreads);
|
||||
List<HttpSolrServer> queryServers = new ArrayList<>(queryThreads);
|
||||
List<HttpSolrClient> indexingClients = new ArrayList<>(indexingThreads);
|
||||
List<HttpSolrClient> queryingClients = new ArrayList<>(queryThreads);
|
||||
|
||||
static String savedFactory;
|
||||
|
||||
|
@ -93,14 +93,14 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
@After
|
||||
public void tearDownServer() throws Exception {
|
||||
if (jetty != null) jetty.stop();
|
||||
for(SolrServer server:indexingServers) {
|
||||
server.shutdown();
|
||||
for(SolrClient client: indexingClients) {
|
||||
client.shutdown();
|
||||
}
|
||||
for(SolrServer server:queryServers) {
|
||||
server.shutdown();
|
||||
for(SolrClient client: queryingClients) {
|
||||
client.shutdown();
|
||||
}
|
||||
indexingServers.clear();
|
||||
queryServers.clear();
|
||||
indexingClients.clear();
|
||||
queryingClients.clear();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -145,25 +145,25 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
|
||||
|
||||
private void getServers() throws Exception {
|
||||
private void buildClients() throws Exception {
|
||||
jetty.start();
|
||||
url = buildUrl(jetty.getLocalPort(), "/solr/");
|
||||
|
||||
// Mostly to keep annoying logging messages from being sent out all the time.
|
||||
|
||||
for (int idx = 0; idx < indexingThreads; ++idx) {
|
||||
HttpSolrServer server = new HttpSolrServer(url);
|
||||
server.setDefaultMaxConnectionsPerHost(25);
|
||||
server.setConnectionTimeout(30000);
|
||||
server.setSoTimeout(60000);
|
||||
indexingServers.add(server);
|
||||
HttpSolrClient client = new HttpSolrClient(url);
|
||||
client.setDefaultMaxConnectionsPerHost(25);
|
||||
client.setConnectionTimeout(30000);
|
||||
client.setSoTimeout(60000);
|
||||
indexingClients.add(client);
|
||||
}
|
||||
for (int idx = 0; idx < queryThreads; ++idx) {
|
||||
HttpSolrServer server = new HttpSolrServer(url);
|
||||
server.setDefaultMaxConnectionsPerHost(25);
|
||||
server.setConnectionTimeout(30000);
|
||||
server.setSoTimeout(30000);
|
||||
queryServers.add(server);
|
||||
HttpSolrClient client = new HttpSolrClient(url);
|
||||
client.setDefaultMaxConnectionsPerHost(25);
|
||||
client.setConnectionTimeout(30000);
|
||||
client.setSoTimeout(30000);
|
||||
queryingClients.add(client);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
makeCores(solrHomeDirectory, oldStyle);
|
||||
|
||||
//MUST start the server after the cores are made.
|
||||
getServers();
|
||||
buildClients();
|
||||
|
||||
try {
|
||||
|
||||
|
@ -187,9 +187,9 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
log.info(String.format(Locale.ROOT, "\n\n\n\n\nStarting a %,d second cycle, seconds left: %,d. Seconds run so far: %,d.",
|
||||
cycleSeconds, secondsRemaining, secondsRun));
|
||||
|
||||
Indexer idxer = new Indexer(this, url, indexingServers, indexingThreads, cycleSeconds, random());
|
||||
Indexer idxer = new Indexer(this, url, indexingClients, indexingThreads, cycleSeconds, random());
|
||||
|
||||
Queries queries = new Queries(this, url, queryServers, queryThreads, random());
|
||||
Queries queries = new Queries(this, url, queryingClients, queryThreads, random());
|
||||
|
||||
idxer.waitOnThreads();
|
||||
|
||||
|
@ -197,12 +197,12 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
|
||||
secondsRemaining = Math.max(secondsRemaining - resetInterval, 0);
|
||||
|
||||
checkResults(queryServers.get(0), queries, idxer);
|
||||
checkResults(queryingClients.get(0), queries, idxer);
|
||||
|
||||
secondsRun += cycleSeconds;
|
||||
|
||||
if (secondsRemaining > 0) {
|
||||
deleteAllDocuments(queryServers.get(0), queries);
|
||||
deleteAllDocuments(queryingClients.get(0), queries);
|
||||
}
|
||||
} while (secondsRemaining > 0);
|
||||
|
||||
|
@ -251,14 +251,14 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
|
||||
|
||||
void deleteAllDocuments(HttpSolrServer server, Queries queries) {
|
||||
void deleteAllDocuments(HttpSolrClient client, Queries queries) {
|
||||
log.info("Deleting data from last cycle, this may take a few minutes.");
|
||||
|
||||
for (String core : coreNames) {
|
||||
try {
|
||||
server.setBaseURL(url + core);
|
||||
server.deleteByQuery("*:*");
|
||||
server.optimize(true, true); // should be close to a no-op.
|
||||
client.setBaseURL(url + core);
|
||||
client.deleteByQuery("*:*");
|
||||
client.optimize(true, true); // should be close to a no-op.
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
@ -269,7 +269,7 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
long foundDocs = 0;
|
||||
for (String core : coreNames) {
|
||||
try {
|
||||
long found = queries.getCount(server, core);
|
||||
long found = queries.getCount(client, core);
|
||||
assertEquals("Cores should be empty", found, 0L);
|
||||
foundDocs += found;
|
||||
} catch (Exception e) {
|
||||
|
@ -287,21 +287,21 @@ public class OpenCloseCoreStressTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
}
|
||||
|
||||
private void checkResults(HttpSolrServer server, Queries queries, Indexer idxer) throws InterruptedException {
|
||||
private void checkResults(HttpSolrClient client, Queries queries, Indexer idxer) throws InterruptedException {
|
||||
log.info("Checking if indexes have all the documents they should...");
|
||||
long totalDocsFound = 0;
|
||||
for (Map.Entry<String, Long> ent : coreCounts.entrySet()) {
|
||||
server.setBaseURL(url + ent.getKey());
|
||||
client.setBaseURL(url + ent.getKey());
|
||||
for (int idx = 0; idx < 3; ++idx) {
|
||||
try {
|
||||
server.commit(true, true);
|
||||
client.commit(true, true);
|
||||
break; // retry loop
|
||||
} catch (Exception e) {
|
||||
log.warn("Exception when committing core " + ent.getKey() + " " + e.getMessage());
|
||||
Thread.sleep(100L);
|
||||
}
|
||||
}
|
||||
long numFound = queries.getCount(server, ent.getKey());
|
||||
long numFound = queries.getCount(client, ent.getKey());
|
||||
totalDocsFound += numFound;
|
||||
assertEquals(String.format(Locale.ROOT, "Core %s bad!", ent.getKey()), (long) ent.getValue(), numFound);
|
||||
}
|
||||
|
@ -341,14 +341,14 @@ class Indexer {
|
|||
|
||||
ArrayList<OneIndexer> _threads = new ArrayList<>();
|
||||
|
||||
public Indexer(OpenCloseCoreStressTest OCCST, String url, List<HttpSolrServer> servers, int numThreads, int secondsToRun, Random random) {
|
||||
public Indexer(OpenCloseCoreStressTest OCCST, String url, List<HttpSolrClient> clients, int numThreads, int secondsToRun, Random random) {
|
||||
stopTime = System.currentTimeMillis() + (secondsToRun * 1000);
|
||||
nextTime = System.currentTimeMillis() + 60000;
|
||||
docsThisCycle.set(0);
|
||||
qTimesAccum.set(0);
|
||||
updateCounts.set(0);
|
||||
for (int idx = 0; idx < numThreads; ++idx) {
|
||||
OneIndexer one = new OneIndexer(OCCST, url, servers.get(idx), random.nextLong());
|
||||
OneIndexer one = new OneIndexer(OCCST, url, clients.get(idx), random.nextLong());
|
||||
_threads.add(one);
|
||||
one.start();
|
||||
}
|
||||
|
@ -385,13 +385,13 @@ class Indexer {
|
|||
|
||||
class OneIndexer extends Thread {
|
||||
private final OpenCloseCoreStressTest OCCST;
|
||||
private final HttpSolrServer server;
|
||||
private final HttpSolrClient client;
|
||||
private final String baseUrl;
|
||||
private final Random random;
|
||||
|
||||
OneIndexer(OpenCloseCoreStressTest OCCST, String url, HttpSolrServer server, long seed) {
|
||||
OneIndexer(OpenCloseCoreStressTest OCCST, String url, HttpSolrClient client, long seed) {
|
||||
this.OCCST = OCCST;
|
||||
this.server = server;
|
||||
this.client = client;
|
||||
this.baseUrl = url;
|
||||
this.random = new Random(seed);
|
||||
}
|
||||
|
@ -414,8 +414,8 @@ class OneIndexer extends Thread {
|
|||
update.add(doc);
|
||||
|
||||
try {
|
||||
server.setBaseURL(baseUrl + core);
|
||||
UpdateResponse response = server.add(doc, OpenCloseCoreStressTest.COMMIT_WITHIN);
|
||||
client.setBaseURL(baseUrl + core);
|
||||
UpdateResponse response = client.add(doc, OpenCloseCoreStressTest.COMMIT_WITHIN);
|
||||
if (response.getStatus() != 0) {
|
||||
SolrTestCaseJ4.log.warn("Failed to index a document to core " + core + " with status " + response.getStatus());
|
||||
} else {
|
||||
|
@ -451,10 +451,10 @@ class Queries {
|
|||
static AtomicInteger _errors = new AtomicInteger(0);
|
||||
String baseUrl;
|
||||
|
||||
public Queries(OpenCloseCoreStressTest OCCST, String url, List<HttpSolrServer> servers, int numThreads, Random random) {
|
||||
public Queries(OpenCloseCoreStressTest OCCST, String url, List<HttpSolrClient> clients, int numThreads, Random random) {
|
||||
baseUrl = url;
|
||||
for (int idx = 0; idx < numThreads; ++idx) {
|
||||
Thread one = new OneQuery(OCCST, url, servers.get(idx), random.nextLong());
|
||||
Thread one = new OneQuery(OCCST, url, clients.get(idx), random.nextLong());
|
||||
_threads.add(one);
|
||||
one.start();
|
||||
}
|
||||
|
@ -472,14 +472,14 @@ class Queries {
|
|||
}
|
||||
}
|
||||
|
||||
public long getCount(HttpSolrServer server, String core) {
|
||||
public long getCount(HttpSolrClient client, String core) {
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("qt", "/select");
|
||||
params.set("q", "*:*");
|
||||
long numFound = 0;
|
||||
server.setBaseURL(baseUrl + core);
|
||||
client.setBaseURL(baseUrl + core);
|
||||
try {
|
||||
QueryResponse response = server.query(params);
|
||||
QueryResponse response = client.query(params);
|
||||
numFound = response.getResults().getNumFound();
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
|
@ -490,13 +490,13 @@ class Queries {
|
|||
|
||||
class OneQuery extends Thread {
|
||||
OpenCloseCoreStressTest OCCST;
|
||||
private final HttpSolrServer server;
|
||||
private final HttpSolrClient client;
|
||||
private final String baseUrl;
|
||||
private final Random random;
|
||||
|
||||
OneQuery(OpenCloseCoreStressTest OCCST, String url, HttpSolrServer server, long seed) {
|
||||
OneQuery(OpenCloseCoreStressTest OCCST, String url, HttpSolrClient client, long seed) {
|
||||
this.OCCST = OCCST;
|
||||
this.server = server;
|
||||
this.client = client;
|
||||
this.baseUrl = url;
|
||||
this.random = new Random(seed);
|
||||
}
|
||||
|
@ -514,8 +514,8 @@ class OneQuery extends Thread {
|
|||
try {
|
||||
// sleep between 250ms and 10000 ms
|
||||
Thread.sleep(100L); // Let's not go crazy here.
|
||||
server.setBaseURL(baseUrl + core);
|
||||
QueryResponse response = server.query(params);
|
||||
client.setBaseURL(baseUrl + core);
|
||||
QueryResponse response = client.query(params);
|
||||
|
||||
if (response.getStatus() != 0) {
|
||||
SolrTestCaseJ4.log.warn("Failed to query core " + core + " with status " + response.getStatus());
|
||||
|
|
|
@ -18,6 +18,17 @@ package org.apache.solr.core;
|
|||
*/
|
||||
|
||||
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.handler.TestBlobHandler;
|
||||
import org.apache.solr.util.RESTfulServerProvider;
|
||||
import org.apache.solr.util.RestTestHarness;
|
||||
import org.apache.solr.util.SimplePostTool;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
@ -28,27 +39,16 @@ import java.util.Map;
|
|||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipOutputStream;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.handler.TestBlobHandler;
|
||||
import org.apache.solr.util.RESTfulServerProvider;
|
||||
import org.apache.solr.util.RestTestHarness;
|
||||
import org.apache.solr.util.SimplePostTool;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class TestDynamicLoading extends AbstractFullDistribZkTestBase {
|
||||
static final Logger log = LoggerFactory.getLogger(TestDynamicLoading.class);
|
||||
private List<RestTestHarness> restTestHarnesses = new ArrayList<>();
|
||||
|
||||
private void setupHarnesses() {
|
||||
for (final SolrServer client : clients) {
|
||||
for (final SolrClient client : clients) {
|
||||
RestTestHarness harness = new RestTestHarness(new RESTfulServerProvider() {
|
||||
@Override
|
||||
public String getBaseURL() {
|
||||
return ((HttpSolrServer)client).getBaseURL();
|
||||
return ((HttpSolrClient)client).getBaseURL();
|
||||
}
|
||||
});
|
||||
restTestHarnesses.add(harness);
|
||||
|
@ -85,10 +85,10 @@ public class TestDynamicLoading extends AbstractFullDistribZkTestBase {
|
|||
assertNotNull(map = (Map) map.get("error"));
|
||||
assertEquals(".system collection not available", map.get("msg"));
|
||||
|
||||
HttpSolrServer server = (HttpSolrServer) clients.get(random().nextInt(clients.size()));
|
||||
String baseURL = server.getBaseURL();
|
||||
HttpSolrClient randomClient = (HttpSolrClient) clients.get(random().nextInt(clients.size()));
|
||||
String baseURL = randomClient.getBaseURL();
|
||||
baseURL = baseURL.substring(0, baseURL.lastIndexOf('/'));
|
||||
TestBlobHandler.createSysColl(new HttpSolrServer(baseURL,server.getHttpClient()));
|
||||
TestBlobHandler.createSysColl(new HttpSolrClient(baseURL,randomClient.getHttpClient()));
|
||||
map = TestSolrConfigHandler.getRespMap("/test1?wt=json", client);
|
||||
|
||||
assertNotNull(map = (Map) map.get("error"));
|
||||
|
|
|
@ -35,7 +35,7 @@ import java.util.concurrent.TimeUnit;
|
|||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.common.cloud.ZkStateReader;
|
||||
import org.apache.solr.handler.TestSolrConfigHandlerConcurrent;
|
||||
import org.apache.solr.util.RestTestBase;
|
||||
|
@ -85,7 +85,7 @@ public class TestSolrConfigHandler extends RestTestBase {
|
|||
jetty.stop();
|
||||
jetty = null;
|
||||
}
|
||||
server = null;
|
||||
client = null;
|
||||
restTestHarness = null;
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ public class TestSolrConfigHandler extends RestTestBase {
|
|||
}
|
||||
|
||||
|
||||
public static void reqhandlertests(RestTestHarness writeHarness,String testServerBaseUrl, CloudSolrServer cloudSolrServer) throws Exception {
|
||||
public static void reqhandlertests(RestTestHarness writeHarness,String testServerBaseUrl, CloudSolrClient cloudSolrServer) throws Exception {
|
||||
String payload = "{\n" +
|
||||
"'create-requesthandler' : { 'name' : '/x', 'class': 'org.apache.solr.handler.DumpRequestHandler' , 'startup' : 'lazy'}\n" +
|
||||
"}";
|
||||
|
@ -204,7 +204,7 @@ public class TestSolrConfigHandler extends RestTestBase {
|
|||
public static void testForResponseElement(RestTestHarness harness,
|
||||
String testServerBaseUrl,
|
||||
String uri,
|
||||
CloudSolrServer cloudSolrServer,List<String> jsonPath,
|
||||
CloudSolrClient cloudSolrServer,List<String> jsonPath,
|
||||
String expected,
|
||||
long maxTimeoutSeconds ) throws Exception {
|
||||
|
||||
|
|
|
@ -17,14 +17,6 @@ package org.apache.solr.handler;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.HttpClient;
|
||||
|
@ -32,10 +24,10 @@ import org.apache.http.client.methods.HttpGet;
|
|||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
|
||||
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
|
@ -48,20 +40,28 @@ import org.apache.solr.util.SimplePostTool;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.apache.solr.core.ConfigOverlay.getObjectByPath;
|
||||
|
||||
public class TestBlobHandler extends AbstractFullDistribZkTestBase {
|
||||
static final Logger log = LoggerFactory.getLogger(TestBlobHandler.class);
|
||||
|
||||
private void doBlobHandlerTest() throws Exception {
|
||||
SolrServer server = createNewSolrServer("", getBaseUrl((HttpSolrServer) clients.get(0)));
|
||||
SolrClient client = createNewSolrClient("", getBaseUrl((HttpSolrClient) clients.get(0)));
|
||||
|
||||
CollectionAdminResponse response1;
|
||||
CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create();
|
||||
createCollectionRequest.setCollectionName(".system");
|
||||
createCollectionRequest.setNumShards(1);
|
||||
createCollectionRequest.setReplicationFactor(2);
|
||||
response1 = createCollectionRequest.process(server);
|
||||
response1 = createCollectionRequest.process(client);
|
||||
assertEquals(0, response1.getStatus());
|
||||
assertTrue(response1.isSuccess());
|
||||
DocCollection sysColl = cloudClient.getZkStateReader().getClusterState().getCollection(".system");
|
||||
|
@ -96,13 +96,13 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
|
|||
|
||||
}
|
||||
|
||||
public static void createSysColl(SolrServer server) throws SolrServerException, IOException {
|
||||
public static void createSysColl(SolrClient client) throws SolrServerException, IOException {
|
||||
CollectionAdminResponse response1;
|
||||
CollectionAdminRequest.Create createCollectionRequest = new CollectionAdminRequest.Create();
|
||||
createCollectionRequest.setCollectionName(".system");
|
||||
createCollectionRequest.setNumShards(1);
|
||||
createCollectionRequest.setReplicationFactor(2);
|
||||
response1 = createCollectionRequest.process(server);
|
||||
response1 = createCollectionRequest.process(client);
|
||||
assertEquals(0, response1.getStatus());
|
||||
assertTrue(response1.isSuccess());
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
|
|||
DirectUpdateHandler2.commitOnClose = true;
|
||||
}
|
||||
|
||||
public static void postAndCheck(CloudSolrServer cloudClient, String baseUrl, ByteBuffer bytes, int count) throws Exception {
|
||||
public static void postAndCheck(CloudSolrClient cloudClient, String baseUrl, ByteBuffer bytes, int count) throws Exception {
|
||||
postData(cloudClient, baseUrl, bytes);
|
||||
String url;
|
||||
Map map;
|
||||
|
@ -144,7 +144,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
|
|||
|
||||
private void compareInputAndOutput(String url, byte[] bytarr) throws IOException {
|
||||
|
||||
HttpClient httpClient = cloudClient.getLbServer().getHttpClient();
|
||||
HttpClient httpClient = cloudClient.getLbClient().getHttpClient();
|
||||
|
||||
HttpGet httpGet = new HttpGet(url);
|
||||
HttpResponse entity = httpClient.execute(httpGet);
|
||||
|
@ -160,7 +160,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
|
|||
|
||||
}
|
||||
|
||||
public static String postData(CloudSolrServer cloudClient, String baseUrl, ByteBuffer bytarr) throws IOException {
|
||||
public static String postData(CloudSolrClient cloudClient, String baseUrl, ByteBuffer bytarr) throws IOException {
|
||||
HttpPost httpPost = null;
|
||||
HttpEntity entity;
|
||||
String response;
|
||||
|
@ -168,7 +168,7 @@ public class TestBlobHandler extends AbstractFullDistribZkTestBase {
|
|||
httpPost = new HttpPost(baseUrl+"/.system/blob/test");
|
||||
httpPost.setHeader("Content-Type","application/octet-stream");
|
||||
httpPost.setEntity(new ByteArrayEntity(bytarr.array(), bytarr.arrayOffset(), bytarr.limit()));
|
||||
entity = cloudClient.getLbServer().getHttpClient().execute(httpPost).getEntity();
|
||||
entity = cloudClient.getLbClient().getHttpClient().execute(httpPost).getEntity();
|
||||
return EntityUtils.toString(entity, StandardCharsets.UTF_8);
|
||||
} finally {
|
||||
httpPost.releaseConnection();
|
||||
|
|
|
@ -29,8 +29,8 @@ import java.util.concurrent.TimeUnit;
|
|||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
import org.apache.solr.cloud.ZkController;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
|
@ -59,11 +59,11 @@ public class TestConfigReload extends AbstractFullDistribZkTestBase {
|
|||
private List<RestTestHarness> restTestHarnesses = new ArrayList<>();
|
||||
|
||||
private void setupHarnesses() {
|
||||
for (final SolrServer client : clients) {
|
||||
for (final SolrClient client : clients) {
|
||||
RestTestHarness harness = new RestTestHarness(new RESTfulServerProvider() {
|
||||
@Override
|
||||
public String getBaseURL() {
|
||||
return ((HttpSolrServer)client).getBaseURL();
|
||||
return ((HttpSolrClient)client).getBaseURL();
|
||||
}
|
||||
});
|
||||
restTestHarnesses.add(harness);
|
||||
|
@ -128,7 +128,7 @@ public class TestConfigReload extends AbstractFullDistribZkTestBase {
|
|||
HttpGet get = new HttpGet(uri) ;
|
||||
HttpEntity entity = null;
|
||||
try {
|
||||
entity = cloudClient.getLbServer().getHttpClient().execute(get).getEntity();
|
||||
entity = cloudClient.getLbClient().getHttpClient().execute(get).getEntity();
|
||||
String response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
|
||||
return (Map) ObjectBuilder.getVal(new JSONParser(new StringReader(response)));
|
||||
} finally {
|
||||
|
|
|
@ -16,26 +16,6 @@
|
|||
*/
|
||||
package org.apache.solr.handler;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Writer;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FSDirectory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -44,10 +24,10 @@ import org.apache.lucene.util.TestUtil;
|
|||
import org.apache.solr.BaseDistributedSearchTestCase;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.request.UpdateRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
|
@ -71,6 +51,26 @@ import org.junit.Before;
|
|||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.OutputStreamWriter;
|
||||
import java.io.Writer;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Date;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* Test for ReplicationHandler
|
||||
*
|
||||
|
@ -87,7 +87,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
+ File.separator;
|
||||
|
||||
JettySolrRunner masterJetty, slaveJetty, repeaterJetty;
|
||||
SolrServer masterClient, slaveClient, repeaterClient;
|
||||
SolrClient masterClient, slaveClient, repeaterClient;
|
||||
SolrInstance master = null, slave = null, repeater = null;
|
||||
|
||||
static String context = "/solr";
|
||||
|
@ -110,12 +110,12 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
master = new SolrInstance(createTempDir("solr-instance").toFile(), "master", null);
|
||||
master.setUp();
|
||||
masterJetty = createJetty(master);
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
slave = new SolrInstance(createTempDir("solr-instance").toFile(), "slave", masterJetty.getLocalPort());
|
||||
slave.setUp();
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
}
|
||||
|
||||
public void clearIndexWithReplication() throws Exception {
|
||||
|
@ -150,22 +150,22 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
return jetty;
|
||||
}
|
||||
|
||||
private static SolrServer createNewSolrServer(int port) {
|
||||
private static SolrClient createNewSolrClient(int port) {
|
||||
try {
|
||||
// setup the server...
|
||||
HttpSolrServer s = new HttpSolrServer(buildUrl(port));
|
||||
s.setConnectionTimeout(15000);
|
||||
s.setSoTimeout(60000);
|
||||
s.setDefaultMaxConnectionsPerHost(100);
|
||||
s.setMaxTotalConnections(100);
|
||||
return s;
|
||||
// setup the client...
|
||||
HttpSolrClient client = new HttpSolrClient(buildUrl(port));
|
||||
client.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
client.setDefaultMaxConnectionsPerHost(100);
|
||||
client.setMaxTotalConnections(100);
|
||||
return client;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
}
|
||||
}
|
||||
|
||||
int index(SolrServer s, Object... fields) throws Exception {
|
||||
int index(SolrClient s, Object... fields) throws Exception {
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
for (int i = 0; i < fields.length; i += 2) {
|
||||
doc.addField((String) (fields[i]), fields[i + 1]);
|
||||
|
@ -173,7 +173,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
return s.add(doc).getStatus();
|
||||
}
|
||||
|
||||
NamedList query(String query, SolrServer s) throws SolrServerException {
|
||||
NamedList query(String query, SolrClient s) throws SolrServerException {
|
||||
NamedList res = new SimpleOrderedMap();
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
||||
|
@ -188,15 +188,15 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
}
|
||||
|
||||
/** will sleep up to 30 seconds, looking for expectedDocCount */
|
||||
private NamedList rQuery(int expectedDocCount, String query, SolrServer server) throws Exception {
|
||||
private NamedList rQuery(int expectedDocCount, String query, SolrClient client) throws Exception {
|
||||
int timeSlept = 0;
|
||||
NamedList res = query(query, server);
|
||||
NamedList res = query(query, client);
|
||||
while (expectedDocCount != numFound(res)
|
||||
&& timeSlept < 30000) {
|
||||
log.info("Waiting for " + expectedDocCount + " docs");
|
||||
timeSlept += 100;
|
||||
Thread.sleep(100);
|
||||
res = query(query, server);
|
||||
res = query(query, client);
|
||||
}
|
||||
log.info("Waited for {}ms and found {} docs", timeSlept, numFound(res));
|
||||
return res;
|
||||
|
@ -206,7 +206,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
return ((SolrDocumentList) res.get("response")).getNumFound();
|
||||
}
|
||||
|
||||
private NamedList<Object> getDetails(SolrServer s) throws Exception {
|
||||
private NamedList<Object> getDetails(SolrClient s) throws Exception {
|
||||
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
@ -227,7 +227,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
return details;
|
||||
}
|
||||
|
||||
private NamedList<Object> getCommits(SolrServer s) throws Exception {
|
||||
private NamedList<Object> getCommits(SolrClient s) throws Exception {
|
||||
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
|
@ -244,7 +244,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
return res;
|
||||
}
|
||||
|
||||
private NamedList<Object> getIndexVersion(SolrServer s) throws Exception {
|
||||
private NamedList<Object> getIndexVersion(SolrClient s) throws Exception {
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("command","indexversion");
|
||||
|
@ -260,7 +260,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
return res;
|
||||
}
|
||||
|
||||
private NamedList<Object> reloadCore(SolrServer s, String core) throws Exception {
|
||||
private NamedList<Object> reloadCore(SolrClient s, String core) throws Exception {
|
||||
|
||||
ModifiableSolrParams params = new ModifiableSolrParams();
|
||||
params.set("action","reload");
|
||||
|
@ -314,12 +314,12 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
SolrInstance repeater = null;
|
||||
JettySolrRunner repeaterJetty = null;
|
||||
SolrServer repeaterClient = null;
|
||||
SolrClient repeaterClient = null;
|
||||
try {
|
||||
repeater = new SolrInstance(createTempDir("solr-instance").toFile(), "repeater", masterJetty.getLocalPort());
|
||||
repeater.setUp();
|
||||
repeaterJetty = createJetty(repeater);
|
||||
repeaterClient = createNewSolrServer(repeaterJetty.getLocalPort());
|
||||
repeaterClient = createNewSolrClient(repeaterJetty.getLocalPort());
|
||||
|
||||
|
||||
NamedList<Object> details = getDetails(repeaterClient);
|
||||
|
@ -530,7 +530,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
masterJetty = createJetty(master);
|
||||
masterClient.shutdown();
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
slave.setTestPort(masterJetty.getLocalPort());
|
||||
slave.copyConfigFile(slave.getSolrConfigFile(), "solrconfig.xml");
|
||||
|
@ -549,7 +549,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
//add a doc with new field and commit on master to trigger snappull from slave.
|
||||
index(masterClient, "id", "2000", "name", "name = " + 2000, "newname", "newname = " + 2000);
|
||||
|
@ -626,7 +626,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
slaveJetty.stop();
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
masterClient.deleteByQuery("*:*");
|
||||
slaveClient.deleteByQuery("*:*");
|
||||
|
@ -762,14 +762,14 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
slaveJetty.stop();
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
master.copyConfigFile(CONF_DIR + "solrconfig-master3.xml",
|
||||
"solrconfig.xml");
|
||||
masterJetty.stop();
|
||||
masterJetty = createJetty(master);
|
||||
masterClient.shutdown();
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
masterClient.deleteByQuery("*:*");
|
||||
slaveClient.deleteByQuery("*:*");
|
||||
|
@ -884,7 +884,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
slaveJetty.stop();
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
try {
|
||||
repeater = new SolrInstance(createTempDir("solr-instance").toFile(), "repeater", null);
|
||||
|
@ -895,7 +895,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
if (repeaterClient != null) {
|
||||
repeaterClient.shutdown();
|
||||
}
|
||||
repeaterClient = createNewSolrServer(repeaterJetty.getLocalPort());
|
||||
repeaterClient = createNewSolrClient(repeaterJetty.getLocalPort());
|
||||
|
||||
for (int i = 0; i < 3; i++)
|
||||
index(masterClient, "id", i, "name", "name = " + i);
|
||||
|
@ -948,7 +948,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
}
|
||||
|
||||
private void assertVersions(SolrServer client1, SolrServer client2) throws Exception {
|
||||
private void assertVersions(SolrClient client1, SolrClient client2) throws Exception {
|
||||
NamedList<Object> details = getDetails(client1);
|
||||
ArrayList<NamedList<Object>> commits = (ArrayList<NamedList<Object>>) details.get("commits");
|
||||
Long maxVersionClient1 = getVersion(client1);
|
||||
|
@ -975,7 +975,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
assertEquals(maxVersionClient2, version);
|
||||
}
|
||||
|
||||
private Long getVersion(SolrServer client) throws Exception {
|
||||
private Long getVersion(SolrClient client) throws Exception {
|
||||
NamedList<Object> details;
|
||||
ArrayList<NamedList<Object>> commits;
|
||||
details = getDetails(client);
|
||||
|
@ -1025,7 +1025,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
masterJetty = createJetty(master);
|
||||
masterClient.shutdown();
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
for (int i = 0; i < nDocs; i++)
|
||||
index(masterClient, "id", i, "name", "name = " + i);
|
||||
|
@ -1043,7 +1043,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
//start slave
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
//get docs from slave and check if number is equal to master
|
||||
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
|
||||
|
@ -1077,7 +1077,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
masterJetty = createJetty(master);
|
||||
masterClient.shutdown();
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
for (int i = 0; i < nDocs; i++)
|
||||
index(masterClient, "id", i, "name", "name = " + i);
|
||||
|
@ -1090,7 +1090,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
masterJetty.stop();
|
||||
masterJetty.start(true);
|
||||
|
||||
// masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
// masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
NamedList masterQueryRsp = rQuery(nDocs, "*:*", masterClient);
|
||||
SolrDocumentList masterQueryResult = (SolrDocumentList) masterQueryRsp
|
||||
|
@ -1103,7 +1103,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
// start slave
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
// get docs from slave and check if number is equal to master
|
||||
NamedList slaveQueryRsp = rQuery(nDocs, "*:*", slaveClient);
|
||||
|
@ -1137,7 +1137,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
masterJetty = createJetty(master);
|
||||
masterClient.shutdown();
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
masterClient.deleteByQuery("*:*");
|
||||
for (int i = 0; i < docs; i++)
|
||||
|
@ -1155,7 +1155,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
//start slave
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
//get docs from slave and check if number is equal to master
|
||||
NamedList slaveQueryRsp = rQuery(docs, "*:*", slaveClient);
|
||||
|
@ -1236,7 +1236,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
masterJetty = createJetty(master);
|
||||
masterClient.shutdown();
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
slave.setTestPort(masterJetty.getLocalPort());
|
||||
slave.copyConfigFile(slave.getSolrConfigFile(), "solrconfig.xml");
|
||||
|
@ -1244,7 +1244,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
slaveJetty.stop();
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
slaveClient.deleteByQuery("*:*");
|
||||
slaveClient.commit();
|
||||
|
@ -1297,7 +1297,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
useFactory(null);
|
||||
masterJetty = createJetty(master);
|
||||
masterClient.shutdown();
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
//index docs
|
||||
final int totalDocs = TestUtil.nextInt(random(), 50, 100);
|
||||
|
@ -1323,14 +1323,14 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
//Start again and replicate the data
|
||||
useFactory(null);
|
||||
masterJetty = createJetty(master);
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
|
||||
//start slave
|
||||
slave.setTestPort(masterJetty.getLocalPort());
|
||||
slave.copyConfigFile(CONF_DIR + "solrconfig-slave1.xml", "solrconfig.xml");
|
||||
slaveJetty = createJetty(slave);
|
||||
slaveClient.shutdown();
|
||||
slaveClient = createNewSolrServer(slaveJetty.getLocalPort());
|
||||
slaveClient = createNewSolrClient(slaveJetty.getLocalPort());
|
||||
|
||||
long startTime = System.nanoTime();
|
||||
|
||||
|
@ -1359,9 +1359,9 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
|
||||
private class AddExtraDocs implements Runnable {
|
||||
|
||||
SolrServer masterClient;
|
||||
SolrClient masterClient;
|
||||
int startId;
|
||||
public AddExtraDocs(SolrServer masterClient, int startId) {
|
||||
public AddExtraDocs(SolrClient masterClient, int startId) {
|
||||
this.masterClient = masterClient;
|
||||
this.startId = startId;
|
||||
}
|
||||
|
@ -1404,7 +1404,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
out.close();
|
||||
}
|
||||
|
||||
private UpdateResponse emptyUpdate(SolrServer client, String... params)
|
||||
private UpdateResponse emptyUpdate(SolrClient client, String... params)
|
||||
throws SolrServerException, IOException {
|
||||
|
||||
UpdateRequest req = new UpdateRequest();
|
||||
|
@ -1417,12 +1417,12 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
|
|||
* time for collection is after the specified "min". Will loop for
|
||||
* at most "timeout" milliseconds before throwing an assertion failure.
|
||||
*
|
||||
* @param client The SolrServer to poll
|
||||
* @param client The SolrClient to poll
|
||||
* @param timeout the max milliseconds to continue polling for
|
||||
* @param min the startTime value must exceed this value before the method will return, if null this method will return the first startTime value encountered.
|
||||
* @return the startTime value of collection
|
||||
*/
|
||||
private Date watchCoreStartAt(SolrServer client, final long timeout,
|
||||
private Date watchCoreStartAt(SolrClient client, final long timeout,
|
||||
final Date min) throws InterruptedException, IOException, SolrServerException {
|
||||
final long sleepInterval = 200;
|
||||
long timeSlept = 0;
|
||||
|
|
|
@ -17,15 +17,6 @@ package org.apache.solr.handler;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URL;
|
||||
import java.nio.file.Path;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
|
@ -37,21 +28,30 @@ import org.apache.lucene.store.SimpleFSDirectory;
|
|||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.SolrJettyTestBase;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.util.FileUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FilenameFilter;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URL;
|
||||
import java.nio.file.Path;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
@SolrTestCaseJ4.SuppressSSL // Currently unknown why SSL does not work with this test
|
||||
public class TestReplicationHandlerBackup extends SolrJettyTestBase {
|
||||
|
||||
JettySolrRunner masterJetty;
|
||||
TestReplicationHandler.SolrInstance master = null;
|
||||
SolrServer masterClient;
|
||||
SolrClient masterClient;
|
||||
|
||||
private static final String CONF_DIR = "solr"
|
||||
+ File.separator + "collection1" + File.separator + "conf"
|
||||
|
@ -70,15 +70,15 @@ public class TestReplicationHandlerBackup extends SolrJettyTestBase {
|
|||
return jetty;
|
||||
}
|
||||
|
||||
private static SolrServer createNewSolrServer(int port) {
|
||||
private static SolrClient createNewSolrClient(int port) {
|
||||
try {
|
||||
// setup the server...
|
||||
HttpSolrServer s = new HttpSolrServer(buildUrl(port, context));
|
||||
s.setConnectionTimeout(15000);
|
||||
s.setSoTimeout(60000);
|
||||
s.setDefaultMaxConnectionsPerHost(100);
|
||||
s.setMaxTotalConnections(100);
|
||||
return s;
|
||||
// setup the client...
|
||||
HttpSolrClient client = new HttpSolrClient(buildUrl(port, context));
|
||||
client.setConnectionTimeout(15000);
|
||||
client.setSoTimeout(60000);
|
||||
client.setDefaultMaxConnectionsPerHost(100);
|
||||
client.setMaxTotalConnections(100);
|
||||
return client;
|
||||
}
|
||||
catch (Exception ex) {
|
||||
throw new RuntimeException(ex);
|
||||
|
@ -101,7 +101,7 @@ public class TestReplicationHandlerBackup extends SolrJettyTestBase {
|
|||
master.copyConfigFile(CONF_DIR + configFile, "solrconfig.xml");
|
||||
|
||||
masterJetty = createJetty(master);
|
||||
masterClient = createNewSolrServer(masterJetty.getLocalPort());
|
||||
masterClient = createNewSolrClient(masterJetty.getLocalPort());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,8 +24,8 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
|
@ -43,11 +43,11 @@ public class TestSolrConfigHandlerCloud extends AbstractFullDistribZkTestBase {
|
|||
private List<RestTestHarness> restTestHarnesses = new ArrayList<>();
|
||||
|
||||
private void setupHarnesses() {
|
||||
for (final SolrServer client : clients) {
|
||||
for (final SolrClient client : clients) {
|
||||
RestTestHarness harness = new RestTestHarness(new RESTfulServerProvider() {
|
||||
@Override
|
||||
public String getBaseURL() {
|
||||
return ((HttpSolrServer)client).getBaseURL();
|
||||
return ((HttpSolrClient)client).getBaseURL();
|
||||
}
|
||||
});
|
||||
restTestHarnesses.add(harness);
|
||||
|
|
|
@ -31,11 +31,10 @@ import java.util.concurrent.TimeUnit;
|
|||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.apache.lucene.queryparser.xml.ParserException;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.impl.CloudSolrClient;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.cloud.AbstractFullDistribZkTestBase;
|
||||
import org.apache.solr.common.cloud.DocCollection;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
|
@ -51,7 +50,6 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
import static java.util.Arrays.asList;
|
||||
import static org.apache.solr.core.ConfigOverlay.getObjectByPath;
|
||||
import static org.apache.solr.rest.schema.TestBulkSchemaAPI.getAsMap;
|
||||
import static org.noggit.ObjectBuilder.getVal;
|
||||
|
||||
|
||||
|
@ -62,11 +60,11 @@ public class TestSolrConfigHandlerConcurrent extends AbstractFullDistribZkTestBa
|
|||
private List<RestTestHarness> restTestHarnesses = new ArrayList<>();
|
||||
|
||||
private void setupHarnesses() {
|
||||
for (final SolrServer client : clients) {
|
||||
for (final SolrClient client : clients) {
|
||||
RestTestHarness harness = new RestTestHarness(new RESTfulServerProvider() {
|
||||
@Override
|
||||
public String getBaseURL() {
|
||||
return ((HttpSolrServer)client).getBaseURL();
|
||||
return ((HttpSolrClient)client).getBaseURL();
|
||||
}
|
||||
});
|
||||
restTestHarnesses.add(harness);
|
||||
|
@ -193,11 +191,11 @@ public class TestSolrConfigHandlerConcurrent extends AbstractFullDistribZkTestBa
|
|||
|
||||
}
|
||||
|
||||
public static Map getAsMap(String uri, CloudSolrServer cloudClient) throws Exception {
|
||||
public static Map getAsMap(String uri, CloudSolrClient cloudClient) throws Exception {
|
||||
HttpGet get = new HttpGet(uri) ;
|
||||
HttpEntity entity = null;
|
||||
try {
|
||||
entity = cloudClient.getLbServer().getHttpClient().execute(get).getEntity();
|
||||
entity = cloudClient.getLbClient().getHttpClient().execute(get).getEntity();
|
||||
String response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
|
||||
try {
|
||||
return (Map) ObjectBuilder.getVal(new JSONParser(new StringReader(response)));
|
||||
|
|
|
@ -17,14 +17,12 @@
|
|||
|
||||
package org.apache.solr.handler.admin;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.Map;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
|
||||
import org.apache.commons.codec.Charsets;
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.embedded.JettySolrRunner;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
|
@ -41,7 +39,8 @@ import org.junit.Test;
|
|||
import org.junit.rules.RuleChain;
|
||||
import org.junit.rules.TestRule;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule;
|
||||
import java.io.File;
|
||||
import java.util.Map;
|
||||
|
||||
public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
|
||||
|
||||
|
@ -214,26 +213,26 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
|
|||
File corex = new File(solrHomeDirectory, "corex");
|
||||
FileUtils.write(new File(corex, "core.properties"), "", Charsets.UTF_8.toString());
|
||||
JettySolrRunner runner = new JettySolrRunner(solrHomeDirectory.getAbsolutePath(), "/solr", 0);
|
||||
HttpSolrServer server = null;
|
||||
HttpSolrClient client = null;
|
||||
try {
|
||||
runner.start();
|
||||
server = new HttpSolrServer("http://localhost:" + runner.getLocalPort() + "/solr/corex");
|
||||
server.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
server.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
client = new HttpSolrClient("http://localhost:" + runner.getLocalPort() + "/solr/corex");
|
||||
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField("id", "123");
|
||||
server.add(doc);
|
||||
server.commit();
|
||||
server.shutdown();
|
||||
client.add(doc);
|
||||
client.commit();
|
||||
client.shutdown();
|
||||
|
||||
server = new HttpSolrServer("http://localhost:" + runner.getLocalPort() + "/solr");
|
||||
server.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
server.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
client = new HttpSolrClient("http://localhost:" + runner.getLocalPort() + "/solr");
|
||||
client.setConnectionTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
client.setSoTimeout(SolrTestCaseJ4.DEFAULT_CONNECTION_TIMEOUT);
|
||||
CoreAdminRequest.Unload req = new CoreAdminRequest.Unload(false);
|
||||
req.setDeleteInstanceDir(true);
|
||||
req.setCoreName("corex");
|
||||
req.process(server);
|
||||
server.shutdown();
|
||||
req.process(client);
|
||||
client.shutdown();
|
||||
|
||||
runner.stop();
|
||||
|
||||
|
@ -242,8 +241,8 @@ public class CoreAdminHandlerTest extends SolrTestCaseJ4 {
|
|||
} catch (Exception e) {
|
||||
log.error("Exception testing core unload with deleteInstanceDir=true", e);
|
||||
} finally {
|
||||
if (server != null) {
|
||||
server.shutdown();
|
||||
if (client != null) {
|
||||
client.shutdown();
|
||||
}
|
||||
if (!runner.isStopped()) {
|
||||
runner.stop();
|
||||
|
|
|
@ -17,15 +17,14 @@ package org.apache.solr.handler.admin;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import org.apache.solr.client.solrj.ResponseParser;
|
||||
import org.apache.solr.SolrJettyTestBase;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.ResponseParser;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.util.ExternalPaths;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.SolrCore;
|
||||
import org.apache.solr.response.SolrQueryResponse;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -47,12 +46,12 @@ public class ShowFileRequestHandlerTest extends SolrJettyTestBase {
|
|||
}
|
||||
|
||||
public void test404ViaHttp() throws SolrServerException {
|
||||
SolrServer server = getSolrServer();
|
||||
SolrClient client = getSolrClient();
|
||||
QueryRequest request = new QueryRequest(params("file",
|
||||
"does-not-exist-404.txt"));
|
||||
request.setPath("/admin/file");
|
||||
try {
|
||||
QueryResponse resp = request.process(server);
|
||||
QueryResponse resp = request.process(client);
|
||||
fail("didn't get 404 exception");
|
||||
} catch (SolrException e) {
|
||||
assertEquals(404, e.code());
|
||||
|
@ -82,17 +81,17 @@ public class ShowFileRequestHandlerTest extends SolrJettyTestBase {
|
|||
}
|
||||
|
||||
public void testDirList() throws SolrServerException {
|
||||
SolrServer server = getSolrServer();
|
||||
SolrClient client = getSolrClient();
|
||||
//assertQ(req("qt", "/admin/file")); TODO file bug that SolrJettyTestBase extends SolrTestCaseJ4
|
||||
QueryRequest request = new QueryRequest();
|
||||
request.setPath("/admin/file");
|
||||
QueryResponse resp = request.process(server);
|
||||
QueryResponse resp = request.process(client);
|
||||
assertEquals(0,resp.getStatus());
|
||||
assertTrue(((NamedList) resp.getResponse().get("files")).size() > 0);//some files
|
||||
}
|
||||
|
||||
public void testGetRawFile() throws SolrServerException, IOException {
|
||||
SolrServer server = getSolrServer();
|
||||
SolrClient client = getSolrClient();
|
||||
//assertQ(req("qt", "/admin/file")); TODO file bug that SolrJettyTestBase extends SolrTestCaseJ4
|
||||
QueryRequest request = new QueryRequest(params("file","schema.xml"));
|
||||
request.setPath("/admin/file");
|
||||
|
@ -120,8 +119,8 @@ public class ShowFileRequestHandlerTest extends SolrJettyTestBase {
|
|||
}
|
||||
});
|
||||
|
||||
server.request( request );//runs request
|
||||
//request.process(server); but we don't have a NamedList response
|
||||
client.request(request);//runs request
|
||||
//request.process(client); but we don't have a NamedList response
|
||||
assertTrue(readFile.get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,22 @@
|
|||
package org.apache.solr.handler.component;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.SolrJettyTestBase;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.params.ShardParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.File;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -10,23 +27,6 @@ import java.util.Map;
|
|||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.solr.SolrJettyTestBase;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.request.CoreAdminRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrException;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.apache.solr.common.params.ShardParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
|
@ -46,8 +46,8 @@ import org.junit.Test;
|
|||
|
||||
public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
||||
|
||||
private static SolrServer collection1;
|
||||
private static SolrServer collection2;
|
||||
private static SolrClient collection1;
|
||||
private static SolrClient collection2;
|
||||
private static String shard1;
|
||||
private static String shard2;
|
||||
private static File solrHome;
|
||||
|
@ -65,8 +65,8 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
solrHome = createSolrHome();
|
||||
createJetty(solrHome.getAbsolutePath(), null, null);
|
||||
String url = jetty.getBaseUrl().toString();
|
||||
collection1 = new HttpSolrServer(url);
|
||||
collection2 = new HttpSolrServer(url + "/collection2");
|
||||
collection1 = new HttpSolrClient(url);
|
||||
collection2 = new HttpSolrClient(url + "/collection2");
|
||||
|
||||
String urlCollection1 = jetty.getBaseUrl().toString() + "/" + "collection1";
|
||||
String urlCollection2 = jetty.getBaseUrl().toString() + "/" + "collection2";
|
||||
|
@ -155,7 +155,7 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
final int NUM_ITERS = atLeast(50);
|
||||
|
||||
for (int i = 0; i < NUM_ITERS; i++) {
|
||||
SolrServer client = random().nextBoolean() ? collection1 : collection2;
|
||||
SolrClient client = random().nextBoolean() ? collection1 : collection2;
|
||||
|
||||
SolrQuery q = new SolrQuery();
|
||||
q.set("distrib", "true");
|
||||
|
@ -259,10 +259,10 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
|
||||
}
|
||||
|
||||
private void verifyDebugSections(SolrQuery query, SolrServer server) throws SolrServerException {
|
||||
private void verifyDebugSections(SolrQuery query, SolrClient client) throws SolrServerException {
|
||||
query.set("debugQuery", "true");
|
||||
query.remove("debug");
|
||||
QueryResponse response = server.query(query);
|
||||
QueryResponse response = client.query(query);
|
||||
assertFalse(response.getDebugMap().isEmpty());
|
||||
assertInDebug(response, "track");
|
||||
assertInDebug(response, "rawquerystring");
|
||||
|
@ -275,7 +275,7 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
|
||||
query.set("debug", "true");
|
||||
query.remove("debugQuery");
|
||||
response = server.query(query);
|
||||
response = client.query(query);
|
||||
assertFalse(response.getDebugMap().isEmpty());
|
||||
assertInDebug(response, "track");
|
||||
assertInDebug(response, "rawquerystring");
|
||||
|
@ -286,8 +286,8 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
assertInDebug(response, "explain");
|
||||
assertInDebug(response, "timing");
|
||||
|
||||
query.set("debug", "track");
|
||||
response = server.query(query);
|
||||
query.set("debug", "track");
|
||||
response = client.query(query);
|
||||
assertFalse(response.getDebugMap().isEmpty());
|
||||
assertInDebug(response, "track");
|
||||
assertNotInDebug(response, "rawquerystring");
|
||||
|
@ -298,8 +298,8 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
assertNotInDebug(response, "explain");
|
||||
assertNotInDebug(response, "timing");
|
||||
|
||||
query.set("debug", "query");
|
||||
response = server.query(query);
|
||||
query.set("debug", "query");
|
||||
response = client.query(query);
|
||||
assertFalse(response.getDebugMap().isEmpty());
|
||||
assertNotInDebug(response, "track");
|
||||
assertInDebug(response, "rawquerystring");
|
||||
|
@ -310,8 +310,8 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
assertNotInDebug(response, "explain");
|
||||
assertNotInDebug(response, "timing");
|
||||
|
||||
query.set("debug", "results");
|
||||
response = server.query(query);
|
||||
query.set("debug", "results");
|
||||
response = client.query(query);
|
||||
assertFalse(response.getDebugMap().isEmpty());
|
||||
assertNotInDebug(response, "track");
|
||||
assertNotInDebug(response, "rawquerystring");
|
||||
|
@ -322,8 +322,8 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
assertInDebug(response, "explain");
|
||||
assertNotInDebug(response, "timing");
|
||||
|
||||
query.set("debug", "timing");
|
||||
response = server.query(query);
|
||||
query.set("debug", "timing");
|
||||
response = client.query(query);
|
||||
assertFalse(response.getDebugMap().isEmpty());
|
||||
assertNotInDebug(response, "track");
|
||||
assertNotInDebug(response, "rawquerystring");
|
||||
|
@ -334,8 +334,8 @@ public class DistributedDebugComponentTest extends SolrJettyTestBase {
|
|||
assertNotInDebug(response, "explain");
|
||||
assertInDebug(response, "timing");
|
||||
|
||||
query.set("debug", "false");
|
||||
response = server.query(query);
|
||||
query.set("debug", "false");
|
||||
response = client.query(query);
|
||||
assertNull(response.getDebugMap());
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import java.util.List;
|
|||
import java.io.IOException;
|
||||
|
||||
import org.apache.solr.BaseDistributedSearchTestCase;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.response.FieldStatsInfo;
|
||||
import org.apache.solr.client.solrj.response.PivotField;
|
||||
|
@ -763,10 +763,10 @@ public class DistributedFacetPivotLargeTest extends BaseDistributedSearchTestCas
|
|||
commit();
|
||||
|
||||
final int maxDocs = 50;
|
||||
final SolrServer zeroShard = clients.get(0);
|
||||
final SolrServer oneShard = clients.get(1);
|
||||
final SolrServer twoShard = clients.get(2);
|
||||
final SolrServer threeShard = clients.get(3); // edge case: never gets any matching docs
|
||||
final SolrClient zeroShard = clients.get(0);
|
||||
final SolrClient oneShard = clients.get(1);
|
||||
final SolrClient twoShard = clients.get(2);
|
||||
final SolrClient threeShard = clients.get(3); // edge case: never gets any matching docs
|
||||
|
||||
for(Integer i=0;i<maxDocs;i++){//50 entries
|
||||
addPivotDoc(zeroShard, "id", getDocNum(), "place_s", "cardiff", "company_t", "microsoft polecat bbc","pay_i",2400,"hiredate_dt", "2012-07-01T12:30:00Z","real_b","true");
|
||||
|
@ -817,10 +817,10 @@ public class DistributedFacetPivotLargeTest extends BaseDistributedSearchTestCas
|
|||
/**
|
||||
* Builds up a SolrInputDocument using the specified fields, then adds it to the
|
||||
* specified client as well as the control client
|
||||
* @see #indexDoc(SolrServer,SolrParams,SolrInputDocument...)
|
||||
* @see #indexDoc(org.apache.solr.client.solrj.SolrClient,SolrParams,SolrInputDocument...)
|
||||
* @see #sdoc
|
||||
*/
|
||||
private void addPivotDoc(SolrServer client, Object... fields)
|
||||
private void addPivotDoc(SolrClient client, Object... fields)
|
||||
throws IOException, SolrServerException {
|
||||
|
||||
indexDoc(client, params(), sdoc(fields));
|
||||
|
|
|
@ -17,16 +17,10 @@ package org.apache.solr.handler.component;
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.solr.BaseDistributedSearchTestCase;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.response.FieldStatsInfo;
|
||||
import org.apache.solr.client.solrj.response.PivotField;
|
||||
import org.apache.solr.common.params.FacetParams;
|
||||
|
@ -60,9 +54,9 @@ public class DistributedFacetPivotLongTailTest extends BaseDistributedSearchTest
|
|||
@Override
|
||||
public void doTest() throws Exception {
|
||||
|
||||
final SolrServer shard0 = clients.get(0);
|
||||
final SolrServer shard1 = clients.get(1);
|
||||
final SolrServer shard2 = clients.get(2);
|
||||
final SolrClient shard0 = clients.get(0);
|
||||
final SolrClient shard1 = clients.get(1);
|
||||
final SolrClient shard2 = clients.get(2);
|
||||
|
||||
// the 5 top foo_s terms have 100 docs each on every shard
|
||||
for (int i = 0; i < 100; i++) {
|
||||
|
|
|
@ -18,8 +18,7 @@ package org.apache.solr.handler.component;
|
|||
*/
|
||||
|
||||
import org.apache.solr.BaseDistributedSearchTestCase;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.response.FieldStatsInfo;
|
||||
import org.apache.solr.client.solrj.response.PivotField;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
|
@ -27,9 +26,6 @@ import org.apache.solr.common.params.FacetParams;
|
|||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SolrParams;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -50,8 +46,8 @@ public class DistributedFacetPivotSmallAdvancedTest extends BaseDistributedSearc
|
|||
public void doTest() throws Exception {
|
||||
|
||||
del("*:*");
|
||||
final SolrServer shard0 = clients.get(0);
|
||||
final SolrServer shard1 = clients.get(1);
|
||||
final SolrClient shard0 = clients.get(0);
|
||||
final SolrClient shard1 = clients.get(1);
|
||||
|
||||
// NOTE: we use the literal (4 character) string "null" as a company name
|
||||
// to help ensure there isn't any bugs where the literal string is treated as if it
|
||||
|
|
|
@ -26,7 +26,7 @@ import junit.framework.Assert;
|
|||
import org.apache.lucene.util.LuceneTestCase.Slow;
|
||||
import org.apache.lucene.util.LuceneTestCase.SuppressTempFileChecks;
|
||||
import org.apache.solr.BaseDistributedSearchTestCase;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.params.SpellingParams;
|
||||
|
@ -79,7 +79,7 @@ public class DistributedSpellCheckComponentTest extends BaseDistributedSearchTes
|
|||
// query a random server
|
||||
params.set("shards", shards);
|
||||
int which = r.nextInt(clients.size());
|
||||
SolrServer client = clients.get(which);
|
||||
SolrClient client = clients.get(which);
|
||||
client.query(params);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,13 +19,12 @@ package org.apache.solr.request;
|
|||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.apache.solr.SolrJettyTestBase;
|
||||
import org.apache.solr.SolrTestCaseJ4.SuppressSSL;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrServerException;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrServer;
|
||||
import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.SolrException;
|
||||
|
@ -69,11 +68,11 @@ public class TestRemoteStreaming extends SolrJettyTestBase {
|
|||
@Before
|
||||
public void doBefore() throws IOException, SolrServerException {
|
||||
//add document and commit, and ensure it's there
|
||||
SolrServer server1 = getSolrServer();
|
||||
SolrClient client = getSolrClient();
|
||||
SolrInputDocument doc = new SolrInputDocument();
|
||||
doc.addField( "id", "1234" );
|
||||
server1.add(doc);
|
||||
server1.commit();
|
||||
client.add(doc);
|
||||
client.commit();
|
||||
assertTrue(searchFindsIt());
|
||||
}
|
||||
|
||||
|
@ -85,10 +84,10 @@ public class TestRemoteStreaming extends SolrJettyTestBase {
|
|||
|
||||
@Test
|
||||
public void testStreamUrl() throws Exception {
|
||||
HttpSolrServer solrServer = (HttpSolrServer) getSolrServer();
|
||||
String streamUrl = solrServer.getBaseURL()+"/select?q=*:*&fl=id&wt=csv";
|
||||
HttpSolrClient client = (HttpSolrClient) getSolrClient();
|
||||
String streamUrl = client.getBaseURL()+"/select?q=*:*&fl=id&wt=csv";
|
||||
|
||||
String getUrl = solrServer.getBaseURL()+"/debug/dump?wt=xml&stream.url="+URLEncoder.encode(streamUrl,"UTF-8");
|
||||
String getUrl = client.getBaseURL()+"/debug/dump?wt=xml&stream.url="+URLEncoder.encode(streamUrl,"UTF-8");
|
||||
String content = getUrlForString(getUrl);
|
||||
assertTrue(content.contains("1234"));
|
||||
//System.out.println(content);
|
||||
|
@ -116,7 +115,7 @@ public class TestRemoteStreaming extends SolrJettyTestBase {
|
|||
query.setQuery( "*:*" );//for anything
|
||||
query.add("stream.url",makeDeleteAllUrl());
|
||||
try {
|
||||
getSolrServer().query(query);
|
||||
getSolrClient().query(query);
|
||||
fail();
|
||||
} catch (SolrException se) {
|
||||
assertSame(ErrorCode.BAD_REQUEST, ErrorCode.getErrorCode(se.code()));
|
||||
|
@ -140,7 +139,7 @@ public class TestRemoteStreaming extends SolrJettyTestBase {
|
|||
return "/select";
|
||||
}
|
||||
};
|
||||
QueryResponse rsp = queryRequest.process(getSolrServer());
|
||||
QueryResponse rsp = queryRequest.process(getSolrClient());
|
||||
//!! should *fail* above for security purposes
|
||||
String handler = (String) rsp.getHeader().get("handler");
|
||||
System.out.println(handler);
|
||||
|
@ -148,15 +147,15 @@ public class TestRemoteStreaming extends SolrJettyTestBase {
|
|||
|
||||
/** Compose a url that if you get it, it will delete all the data. */
|
||||
private String makeDeleteAllUrl() throws UnsupportedEncodingException {
|
||||
HttpSolrServer solrServer = (HttpSolrServer) getSolrServer();
|
||||
HttpSolrClient client = (HttpSolrClient) getSolrClient();
|
||||
String deleteQuery = "<delete><query>*:*</query></delete>";
|
||||
return solrServer.getBaseURL()+"/update?commit=true&stream.body="+ URLEncoder.encode(deleteQuery, "UTF-8");
|
||||
return client.getBaseURL()+"/update?commit=true&stream.body="+ URLEncoder.encode(deleteQuery, "UTF-8");
|
||||
}
|
||||
|
||||
private boolean searchFindsIt() throws SolrServerException {
|
||||
SolrQuery query = new SolrQuery();
|
||||
query.setQuery( "id:1234" );
|
||||
QueryResponse rsp = getSolrServer().query(query);
|
||||
QueryResponse rsp = getSolrClient().query(query);
|
||||
return rsp.getResults().getNumFound() != 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ public class TestBulkSchemaAPI extends RestTestBase {
|
|||
jetty.stop();
|
||||
jetty = null;
|
||||
}
|
||||
server = null;
|
||||
client = null;
|
||||
restTestHarness = null;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ public class TestManagedSchemaDynamicFieldResource extends RestTestBase {
|
|||
jetty.stop();
|
||||
jetty = null;
|
||||
}
|
||||
server = null;
|
||||
client = null;
|
||||
restTestHarness = null;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ public class TestManagedSchemaFieldResource extends RestTestBase {
|
|||
jetty.stop();
|
||||
jetty = null;
|
||||
}
|
||||
server = null;
|
||||
client = null;
|
||||
restTestHarness = null;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,21 +16,22 @@
|
|||
*/
|
||||
package org.apache.solr.schema;
|
||||
|
||||
import java.io.File;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
|
||||
import org.apache.solr.SolrJettyTestBase;
|
||||
import org.apache.solr.SolrTestCaseJ4;
|
||||
import org.apache.solr.client.solrj.SolrServer;
|
||||
import org.apache.solr.client.solrj.SolrClient;
|
||||
import org.apache.solr.client.solrj.SolrQuery;
|
||||
import org.apache.solr.client.solrj.beans.Field;
|
||||
import org.apache.solr.client.solrj.response.QueryResponse;
|
||||
import org.apache.solr.common.*;
|
||||
import org.apache.solr.SolrJettyTestBase;
|
||||
import org.apache.solr.common.SolrDocument;
|
||||
import org.apache.solr.common.SolrDocumentList;
|
||||
import org.apache.solr.common.SolrInputDocument;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.File;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
|
||||
public class TestBinaryField extends SolrJettyTestBase {
|
||||
|
||||
@BeforeClass
|
||||
|
@ -61,7 +62,7 @@ public class TestBinaryField extends SolrJettyTestBase {
|
|||
|
||||
|
||||
public void testSimple() throws Exception {
|
||||
SolrServer server = getSolrServer();
|
||||
SolrClient client = getSolrClient();
|
||||
byte[] buf = new byte[10];
|
||||
for (int i = 0; i < 10; i++) {
|
||||
buf[i] = (byte) i;
|
||||
|
@ -70,21 +71,21 @@ public class TestBinaryField extends SolrJettyTestBase {
|
|||
doc = new SolrInputDocument();
|
||||
doc.addField("id", 1);
|
||||
doc.addField("data", ByteBuffer.wrap(buf, 2, 5));
|
||||
server.add(doc);
|
||||
client.add(doc);
|
||||
|
||||
doc = new SolrInputDocument();
|
||||
doc.addField("id", 2);
|
||||
doc.addField("data", ByteBuffer.wrap(buf, 4, 3));
|
||||
server.add(doc);
|
||||
client.add(doc);
|
||||
|
||||
doc = new SolrInputDocument();
|
||||
doc.addField("id", 3);
|
||||
doc.addField("data", buf);
|
||||
server.add(doc);
|
||||
client.add(doc);
|
||||
|
||||
server.commit();
|
||||
client.commit();
|
||||
|
||||
QueryResponse resp = server.query(new SolrQuery("*:*"));
|
||||
QueryResponse resp = client.query(new SolrQuery("*:*"));
|
||||
SolrDocumentList res = resp.getResults();
|
||||
List<Bean> beans = resp.getBeans(Bean.class);
|
||||
assertEquals(3, res.size());
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue