mirror of https://github.com/apache/lucene.git
SOLR-13322 - let forbidden apis check for sysout in solr core
This commit is contained in:
parent
de13c8e79d
commit
2d690885e5
|
@ -22,9 +22,9 @@
|
|||
</description>
|
||||
|
||||
<dirname file="${ant.file.common-solr}" property="common-solr.dir"/>
|
||||
|
||||
|
||||
<property name="Name" value="Solr" />
|
||||
|
||||
|
||||
<!-- solr uses Java 8 -->
|
||||
<property name="javac.release" value="8"/>
|
||||
<property name="javac.args" value="-Xlint:-deprecation"/>
|
||||
|
@ -79,17 +79,17 @@
|
|||
<fileset dir="${common-solr.dir}/core/lib" excludes="${common.classpath.excludes}"/>
|
||||
<fileset dir="${common-solr.dir}/solrj/lib" excludes="${common.classpath.excludes}"/>
|
||||
<fileset dir="${common-solr.dir}/server/lib" excludes="${common.classpath.excludes}"/>
|
||||
<fileset dir="${common-solr.dir}/example/example-DIH/solr/db/lib" excludes="${common.classpath.excludes}"/>
|
||||
<fileset dir="${common-solr.dir}/example/example-DIH/solr/db/lib" excludes="${common.classpath.excludes}"/>
|
||||
<fileset dir="lib" excludes="${common.classpath.excludes}" erroronmissingdir="false"/>
|
||||
</path>
|
||||
|
||||
<path id="solr.lucene.libs">
|
||||
<!-- List of jars that will be used as the foundation for both
|
||||
the base classpath, as well as copied into the lucene-libs dir
|
||||
the base classpath, as well as copied into the lucene-libs dir
|
||||
in the release.
|
||||
-->
|
||||
<!-- NOTE: lucene-core is explicitly not included because of the
|
||||
base.classpath (compilation & tests are done directly against
|
||||
<!-- NOTE: lucene-core is explicitly not included because of the
|
||||
base.classpath (compilation & tests are done directly against
|
||||
the class files w/o needing to build the jar)
|
||||
-->
|
||||
<pathelement location="${analyzers-common.jar}"/>
|
||||
|
@ -134,7 +134,7 @@
|
|||
<pathelement path="src/test-files"/>
|
||||
<path refid="test.base.classpath"/>
|
||||
</path>
|
||||
|
||||
|
||||
<path id="test.classpath" refid="solr.test.base.classpath"/>
|
||||
|
||||
<macrodef name="solr-contrib-uptodate">
|
||||
|
@ -152,7 +152,7 @@
|
|||
</sequential>
|
||||
</macrodef>
|
||||
|
||||
<!--
|
||||
<!--
|
||||
We don't want to run HDFS tests on Windows by default, because they require Cygwin.
|
||||
Cygwin users can explicitly set -Dtests.disableHdfs=false to enable Hdfs related testing.
|
||||
-->
|
||||
|
@ -170,16 +170,16 @@
|
|||
<mkdir dir="${maven.dist.dir}"/>
|
||||
</target>
|
||||
|
||||
<target name="prep-lucene-jars"
|
||||
<target name="prep-lucene-jars"
|
||||
depends="resolve-groovy,
|
||||
jar-lucene-core, jar-backward-codecs, jar-analyzers-phonetic, jar-analyzers-kuromoji, jar-analyzers-nori, jar-codecs,jar-expressions, jar-suggest, jar-highlighter, jar-memory,
|
||||
jar-misc, jar-spatial-extras, jar-spatial3d, jar-grouping, jar-queries, jar-queryparser, jar-join, jar-sandbox, jar-classification">
|
||||
<property name="solr.deps.compiled" value="true"/>
|
||||
</target>
|
||||
|
||||
<target name="lucene-jars-to-solr"
|
||||
|
||||
<target name="lucene-jars-to-solr"
|
||||
depends="-lucene-jars-to-solr-not-for-package,-lucene-jars-to-solr-package"/>
|
||||
|
||||
|
||||
<target name="-lucene-jars-to-solr-not-for-package" unless="called.from.create-package">
|
||||
<sequential>
|
||||
<antcall target="prep-lucene-jars" inheritall="true"/>
|
||||
|
@ -191,7 +191,7 @@
|
|||
</copy>
|
||||
</sequential>
|
||||
</target>
|
||||
|
||||
|
||||
<target name="-lucene-jars-to-solr-package" if="called.from.create-package">
|
||||
<sequential>
|
||||
<antcall target="-unpack-lucene-tgz" inheritall="true"/>
|
||||
|
@ -208,7 +208,7 @@
|
|||
</target>
|
||||
|
||||
<!-- Shared core/solrj/test-framework/contrib targets -->
|
||||
|
||||
|
||||
<macrodef name="solr-jarify" description="Builds a Solr JAR file">
|
||||
<attribute name="basedir" default="${build.dir}/classes/java"/>
|
||||
<attribute name="destfile" default="${build.dir}/${final.name}.jar"/>
|
||||
|
@ -511,7 +511,7 @@
|
|||
<target name="compile-contrib" description="Compile contrib modules">
|
||||
<contrib-crawl target="compile-core"/>
|
||||
</target>
|
||||
|
||||
|
||||
<target name="compile-test-contrib" description="Compile contrib modules' tests">
|
||||
<contrib-crawl target="compile-test"/>
|
||||
</target>
|
||||
|
@ -529,7 +529,7 @@
|
|||
<delete dir="${dest}/web" includes="**/*" failonerror="false"/>
|
||||
<contrib-crawl target="add-to-webapp"/>
|
||||
</target>
|
||||
|
||||
|
||||
<!-- Forbidden API Task, customizations for Solr -->
|
||||
<target name="-check-forbidden-all" depends="-init-forbidden-apis,compile-core,compile-test">
|
||||
<property prefix="ivyversions" file="${common.dir}/ivy-versions.properties"/><!-- for commons-io version -->
|
||||
|
@ -550,14 +550,13 @@
|
|||
<fileset dir="${build.dir}/classes/test" excludes="${forbidden-tests-excludes}" erroronmissingdir="false"/>
|
||||
</forbidden-apis>
|
||||
</target>
|
||||
|
||||
<target name="-check-forbidden-sysout"/>
|
||||
|
||||
|
||||
<!-- hack for now to disable *all* Solr tests on Jenkins when "tests.disable-solr" property is set -->
|
||||
<target name="test" unless="tests.disable-solr">
|
||||
<antcall target="common.test" inheritrefs="true" inheritall="true"/>
|
||||
</target>
|
||||
|
||||
|
||||
<!-- In Solr we do not generate MR-JARs yet; disable completely so we do not accidentally patch -->
|
||||
<target name="patch-mrjar-classes"/>
|
||||
</project>
|
||||
|
|
|
@ -83,7 +83,7 @@ import org.slf4j.MDC;
|
|||
|
||||
/**
|
||||
* Run solr using jetty
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
public class JettySolrRunner {
|
||||
|
@ -93,7 +93,7 @@ public class JettySolrRunner {
|
|||
private static final int THREAD_POOL_MAX_THREADS = 10000;
|
||||
// NOTE: needs to be larger than SolrHttpClient.threadPoolSweeperMaxIdleTime
|
||||
private static final int THREAD_POOL_MAX_IDLE_TIME_MS = 260000;
|
||||
|
||||
|
||||
Server server;
|
||||
|
||||
volatile FilterHolder dispatchFilter;
|
||||
|
@ -128,14 +128,14 @@ public class JettySolrRunner {
|
|||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private AtomicLong nRequests = new AtomicLong();
|
||||
|
||||
|
||||
List<Delay> delays = new ArrayList<>();
|
||||
|
||||
public long getTotalRequests() {
|
||||
return nRequests.get();
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Introduce a delay of specified milliseconds for the specified request.
|
||||
*
|
||||
|
@ -146,7 +146,7 @@ public class JettySolrRunner {
|
|||
public void addDelay(String reason, int count, int delay) {
|
||||
delays.add(new Delay(reason, count, delay));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Remove any delay introduced before.
|
||||
*/
|
||||
|
@ -167,14 +167,14 @@ public class JettySolrRunner {
|
|||
|
||||
@Override
|
||||
public void destroy() { }
|
||||
|
||||
|
||||
private void executeDelay() {
|
||||
int delayMs = 0;
|
||||
for (Delay delay: delays) {
|
||||
this.log.info("Delaying "+delay.delayValue+", for reason: "+delay.reason);
|
||||
if (delay.counter.decrementAndGet() == 0) {
|
||||
delayMs += delay.delayValue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (delayMs > 0) {
|
||||
|
@ -215,7 +215,7 @@ public class JettySolrRunner {
|
|||
public JettySolrRunner(String solrHome, JettyConfig config) {
|
||||
this(solrHome, new Properties(), config);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Construct a JettySolrRunner
|
||||
*
|
||||
|
@ -244,7 +244,7 @@ public class JettySolrRunner {
|
|||
this.solrHome = solrHome;
|
||||
this.config = config;
|
||||
this.nodeProperties = nodeProperties;
|
||||
|
||||
|
||||
if (enableProxy) {
|
||||
try {
|
||||
proxy = new SocketProxy(0, config.sslConfig != null && config.sslConfig.isSSLMode());
|
||||
|
@ -256,7 +256,7 @@ public class JettySolrRunner {
|
|||
|
||||
this.init(this.config.port);
|
||||
}
|
||||
|
||||
|
||||
private void init(int port) {
|
||||
|
||||
QueuedThreadPool qtp = new QueuedThreadPool();
|
||||
|
@ -275,7 +275,7 @@ public class JettySolrRunner {
|
|||
//
|
||||
// This means we will use the same truststore, keystore (and keys) for
|
||||
// the server as well as any client actions taken by this JVM in
|
||||
// talking to that server, but for the purposes of testing that should
|
||||
// talking to that server, but for the purposes of testing that should
|
||||
// be good enough
|
||||
final SslContextFactory sslcontext = SSLConfig.createContextFactory(config.sslConfig);
|
||||
|
||||
|
@ -382,7 +382,7 @@ public class JettySolrRunner {
|
|||
dispatchFilter.setHeldClass(SolrDispatchFilter.class);
|
||||
dispatchFilter.setInitParameter("excludePatterns", excludePatterns);
|
||||
root.addFilter(dispatchFilter, "*", EnumSet.of(DispatcherType.REQUEST));
|
||||
|
||||
|
||||
synchronized (JettySolrRunner.this) {
|
||||
waitOnSolr = true;
|
||||
JettySolrRunner.this.notify();
|
||||
|
@ -400,7 +400,7 @@ public class JettySolrRunner {
|
|||
}
|
||||
|
||||
chain = injectJettyHandlers(chain);
|
||||
|
||||
|
||||
GzipHandler gzipHandler = new GzipHandler();
|
||||
gzipHandler.setHandler(chain);
|
||||
|
||||
|
@ -413,7 +413,7 @@ public class JettySolrRunner {
|
|||
server.setHandler(gzipHandler);
|
||||
}
|
||||
|
||||
/** descendants may inject own handler chaining it to the given root
|
||||
/** descendants may inject own handler chaining it to the given root
|
||||
* and then returning that own one*/
|
||||
protected HandlerWrapper injectJettyHandlers(HandlerWrapper chain) {
|
||||
return chain;
|
||||
|
@ -445,7 +445,7 @@ public class JettySolrRunner {
|
|||
public boolean isRunning() {
|
||||
return server.isRunning() && dispatchFilter != null && dispatchFilter.isRunning();
|
||||
}
|
||||
|
||||
|
||||
public boolean isStopped() {
|
||||
return (server.isStopped() && dispatchFilter == null) || (server.isStopped() && dispatchFilter.isStopped()
|
||||
&& ((QueuedThreadPool) server.getThreadPool()).isStopped());
|
||||
|
@ -478,12 +478,12 @@ public class JettySolrRunner {
|
|||
// Do not let Jetty/Solr pollute the MDC for this thread
|
||||
Map<String, String> prevContext = MDC.getCopyOfContextMap();
|
||||
MDC.clear();
|
||||
|
||||
|
||||
log.info("Start Jetty (original configured port={})", this.config.port);
|
||||
|
||||
|
||||
try {
|
||||
int port = reusePort && jettyPort != -1 ? jettyPort : this.config.port;
|
||||
|
||||
|
||||
// if started before, make a new server
|
||||
if (startedBefore) {
|
||||
waitOnSolr = false;
|
||||
|
@ -508,21 +508,21 @@ public class JettySolrRunner {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (config.waitForLoadingCoresToFinishMs != null && config.waitForLoadingCoresToFinishMs > 0L) {
|
||||
waitForLoadingCoresToFinish(config.waitForLoadingCoresToFinishMs);
|
||||
}
|
||||
|
||||
|
||||
setProtocolAndHost();
|
||||
|
||||
|
||||
if (enableProxy) {
|
||||
if (started) {
|
||||
proxy.reopen();
|
||||
} else {
|
||||
proxy.open(getBaseUrl().toURI());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} finally {
|
||||
started = true;
|
||||
if (prevContext != null) {
|
||||
|
@ -548,7 +548,7 @@ public class JettySolrRunner {
|
|||
this.protocol = protocol;
|
||||
this.host = c.getHost();
|
||||
}
|
||||
|
||||
|
||||
private void retryOnPortBindFailure(int portRetryTime, int port) throws Exception, InterruptedException {
|
||||
TimeOut timeout = new TimeOut(portRetryTime, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
int tryCnt = 1;
|
||||
|
@ -567,7 +567,7 @@ public class JettySolrRunner {
|
|||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
@ -628,7 +628,7 @@ public class JettySolrRunner {
|
|||
|
||||
QueuedThreadPool qtp = (QueuedThreadPool) server.getThreadPool();
|
||||
ReservedThreadExecutor rte = qtp.getBean(ReservedThreadExecutor.class);
|
||||
|
||||
|
||||
server.stop();
|
||||
|
||||
if (server.getState().equals(Server.FAILED)) {
|
||||
|
@ -647,18 +647,18 @@ public class JettySolrRunner {
|
|||
Thread.sleep(50);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// we tried to kill everything, now we wait for executor to stop
|
||||
qtp.setStopTimeout(Integer.MAX_VALUE);
|
||||
qtp.stop();
|
||||
qtp.join();
|
||||
|
||||
|
||||
if (rte != null) {
|
||||
// we try and wait for the reserved thread executor, but it doesn't always seem to work
|
||||
// so we actually set 0 reserved threads at creation
|
||||
|
||||
|
||||
rte.stop();
|
||||
|
||||
|
||||
TimeOut timeout = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
timeout.waitFor("Timeout waiting for reserved executor to stop.", ()
|
||||
-> rte.isStopped());
|
||||
|
@ -675,12 +675,12 @@ public class JettySolrRunner {
|
|||
// ignore
|
||||
}
|
||||
} while (!server.isStopped());
|
||||
|
||||
|
||||
} finally {
|
||||
if (enableProxy) {
|
||||
proxy.close();
|
||||
}
|
||||
|
||||
|
||||
if (prevContext != null) {
|
||||
MDC.setContextMap(prevContext);
|
||||
} else {
|
||||
|
@ -691,7 +691,7 @@ public class JettySolrRunner {
|
|||
|
||||
/**
|
||||
* Returns the Local Port of the jetty Server.
|
||||
*
|
||||
*
|
||||
* @exception RuntimeException if there is no Connector
|
||||
*/
|
||||
private int getFirstConnectorPort() {
|
||||
|
@ -701,22 +701,22 @@ public class JettySolrRunner {
|
|||
}
|
||||
return ((ServerConnector) conns[0]).getLocalPort();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns the Local Port of the jetty Server.
|
||||
*
|
||||
*
|
||||
* @exception RuntimeException if there is no Connector
|
||||
*/
|
||||
public int getLocalPort() {
|
||||
return getLocalPort(false);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns the Local Port of the jetty Server.
|
||||
*
|
||||
*
|
||||
* @param internalPort pass true to get the true jetty port rather than the proxy port if configured
|
||||
*
|
||||
*
|
||||
* @exception RuntimeException if there is no Connector
|
||||
*/
|
||||
public int getLocalPort(boolean internalPort) {
|
||||
|
@ -728,7 +728,7 @@ public class JettySolrRunner {
|
|||
}
|
||||
return (proxyPort != -1) ? proxyPort : jettyPort;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Sets the port of a local socket proxy that sits infront of this server; if set
|
||||
* then all client traffic will flow through the proxy, giving us the ability to
|
||||
|
@ -737,7 +737,7 @@ public class JettySolrRunner {
|
|||
public void setProxyPort(int proxyPort) {
|
||||
this.proxyPort = proxyPort;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns a base URL consisting of the protocol, host, and port for a
|
||||
* Connector in use by the Jetty Server contained in this runner.
|
||||
|
@ -764,7 +764,7 @@ public class JettySolrRunner {
|
|||
public SolrClient newClient() {
|
||||
return new HttpSolrClient.Builder(getBaseUrl().toString()).build();
|
||||
}
|
||||
|
||||
|
||||
public SolrClient newClient(int connectionTimeoutMillis, int socketTimeoutMillis) {
|
||||
return new HttpSolrClient.Builder(getBaseUrl().toString())
|
||||
.withConnectionTimeout(connectionTimeoutMillis)
|
||||
|
@ -793,13 +793,9 @@ public class JettySolrRunner {
|
|||
/**
|
||||
* A main class that starts jetty+solr This is useful for debugging
|
||||
*/
|
||||
public static void main(String[] args) {
|
||||
try {
|
||||
JettySolrRunner jetty = new JettySolrRunner(".", "/solr", 8983);
|
||||
jetty.start();
|
||||
} catch (Exception ex) {
|
||||
ex.printStackTrace();
|
||||
}
|
||||
public static void main(String[] args) throws Exception {
|
||||
JettySolrRunner jetty = new JettySolrRunner(".", "/solr", 8983);
|
||||
jetty.start();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -829,12 +825,12 @@ public class JettySolrRunner {
|
|||
throw new IllegalStateException("The dispatchFilter is not set!");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static class Delay {
|
||||
final AtomicInteger counter;
|
||||
final int delayValue;
|
||||
final String reason;
|
||||
|
||||
|
||||
public Delay(String reason, int counter, int delay) {
|
||||
this.reason = reason;
|
||||
this.counter = new AtomicInteger(counter);
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.solr.common.cloud.ClusterProperties;
|
|||
import org.apache.solr.common.cloud.SolrZkClient;
|
||||
import org.apache.solr.common.cloud.ZkConfigManager;
|
||||
import org.apache.solr.core.CoreContainer;
|
||||
import org.apache.solr.util.CLIO;
|
||||
import org.apache.zookeeper.CreateMode;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.xml.sax.SAXException;
|
||||
|
@ -50,8 +51,8 @@ import org.xml.sax.SAXException;
|
|||
import static org.apache.solr.common.params.CommonParams.NAME;
|
||||
import static org.apache.solr.common.params.CommonParams.VALUE_LONG;
|
||||
|
||||
public class ZkCLI {
|
||||
|
||||
public class ZkCLI implements CLIO {
|
||||
|
||||
private static final String MAKEPATH = "makepath";
|
||||
private static final String PUT = "put";
|
||||
private static final String PUT_FILE = "putfile";
|
||||
|
@ -84,19 +85,19 @@ public class ZkCLI {
|
|||
ZkCLI.stdout = stdout;
|
||||
}
|
||||
|
||||
private static PrintStream stdout = System.out;
|
||||
|
||||
private static PrintStream stdout = CLIO.getOutStream();
|
||||
|
||||
/**
|
||||
* Allows you to perform a variety of zookeeper related tasks, such as:
|
||||
*
|
||||
*
|
||||
* Bootstrap the current configs for all collections in solr.xml.
|
||||
*
|
||||
*
|
||||
* Upload a named config set from a given directory.
|
||||
*
|
||||
*
|
||||
* Link a named config set explicity to a collection.
|
||||
*
|
||||
*
|
||||
* Clear ZooKeeper info.
|
||||
*
|
||||
*
|
||||
* If you also pass a solrPort, it will be used to start an embedded zk useful
|
||||
* for single machine, multi node tests.
|
||||
*/
|
||||
|
@ -106,7 +107,7 @@ public class ZkCLI {
|
|||
|
||||
CommandLineParser parser = new PosixParser();
|
||||
Options options = new Options();
|
||||
|
||||
|
||||
options.addOption(OptionBuilder
|
||||
.hasArg(true)
|
||||
.withDescription(
|
||||
|
@ -121,16 +122,16 @@ public class ZkCLI {
|
|||
Option solrHomeOption = new Option("s", SOLRHOME, true,
|
||||
"for " + BOOTSTRAP + ", " + RUNZK + ": solrhome location");
|
||||
options.addOption(solrHomeOption);
|
||||
|
||||
|
||||
options.addOption("d", CONFDIR, true,
|
||||
"for " + UPCONFIG + ": a directory of configuration files");
|
||||
options.addOption("n", CONFNAME, true,
|
||||
"for " + UPCONFIG + ", " + LINKCONFIG + ": name of the config set");
|
||||
|
||||
|
||||
|
||||
options.addOption("c", COLLECTION, true,
|
||||
"for " + LINKCONFIG + ": name of the collection");
|
||||
|
||||
|
||||
options.addOption(EXCLUDE_REGEX_SHORT, EXCLUDE_REGEX, true,
|
||||
"for " + UPCONFIG + ": files matching this regular expression won't be uploaded");
|
||||
|
||||
|
@ -140,7 +141,7 @@ public class ZkCLI {
|
|||
RUNZK,
|
||||
true,
|
||||
"run zk internally by passing the solr run port - only for clusters on one machine (tests, dev)");
|
||||
|
||||
|
||||
options.addOption("h", HELP, false, "bring up this help page");
|
||||
options.addOption(NAME, true, "name of the cluster property to set");
|
||||
options.addOption(VALUE_LONG, true, "value of the cluster to set");
|
||||
|
@ -148,7 +149,7 @@ public class ZkCLI {
|
|||
try {
|
||||
// parse the command line arguments
|
||||
CommandLine line = parser.parse(options, args);
|
||||
|
||||
|
||||
if (line.hasOption(HELP) || !line.hasOption(ZKHOST)
|
||||
|| !line.hasOption(CMD)) {
|
||||
// automatically generate the help statement
|
||||
|
@ -171,11 +172,11 @@ public class ZkCLI {
|
|||
stdout.println("zkcli.sh -zkhost localhost:9983 -cmd " + UPDATEACLS + " /solr");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
// start up a tmp zk server first
|
||||
String zkServerAddress = line.getOptionValue(ZKHOST);
|
||||
String solrHome = line.getOptionValue(SOLRHOME);
|
||||
|
||||
|
||||
String solrPort = null;
|
||||
if (line.hasOption(RUNZK)) {
|
||||
if (!line.hasOption(SOLRHOME)) {
|
||||
|
@ -184,7 +185,7 @@ public class ZkCLI {
|
|||
}
|
||||
solrPort = line.getOptionValue(RUNZK);
|
||||
}
|
||||
|
||||
|
||||
SolrZkServer zkServer = null;
|
||||
if (solrPort != null) {
|
||||
zkServer = new SolrZkServer("true", null, solrHome + "/zoo_data",
|
||||
|
@ -197,7 +198,7 @@ public class ZkCLI {
|
|||
zkClient = new SolrZkClient(zkServerAddress, 30000, 30000,
|
||||
() -> {
|
||||
});
|
||||
|
||||
|
||||
if (line.getOptionValue(CMD).equalsIgnoreCase(BOOTSTRAP)) {
|
||||
if (!line.hasOption(SOLRHOME)) {
|
||||
stdout.println("-" + SOLRHOME
|
||||
|
@ -216,7 +217,7 @@ public class ZkCLI {
|
|||
|
||||
// No need to close the CoreContainer, as it wasn't started
|
||||
// up in the first place...
|
||||
|
||||
|
||||
} else if (line.getOptionValue(CMD).equalsIgnoreCase(UPCONFIG)) {
|
||||
if (!line.hasOption(CONFDIR) || !line.hasOption(CONFNAME)) {
|
||||
stdout.println("-" + CONFDIR + " and -" + CONFNAME
|
||||
|
@ -226,7 +227,7 @@ public class ZkCLI {
|
|||
String confDir = line.getOptionValue(CONFDIR);
|
||||
String confName = line.getOptionValue(CONFNAME);
|
||||
final String excludeExpr = line.getOptionValue(EXCLUDE_REGEX, EXCLUDE_REGEX_DEFAULT);
|
||||
|
||||
|
||||
if(!ZkController.checkChrootPath(zkServerAddress, true)) {
|
||||
stdout.println("A chroot was specified in zkHost but the znode doesn't exist. ");
|
||||
System.exit(1);
|
||||
|
@ -252,7 +253,7 @@ public class ZkCLI {
|
|||
}
|
||||
String collection = line.getOptionValue(COLLECTION);
|
||||
String confName = line.getOptionValue(CONFNAME);
|
||||
|
||||
|
||||
ZkController.linkConfSet(zkClient, collection, confName);
|
||||
} else if (line.getOptionValue(CMD).equalsIgnoreCase(LIST)) {
|
||||
zkClient.printLayoutToStream(stdout);
|
||||
|
@ -368,6 +369,6 @@ public class ZkCLI {
|
|||
} catch (ParseException exp) {
|
||||
stdout.println("Unexpected exception:" + exp.getMessage());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -298,8 +298,8 @@ public class ZkController implements Closeable {
|
|||
this.genericCoreNodeNames = cloudConfig.getGenericCoreNodeNames();
|
||||
|
||||
// be forgiving and strip this off leading/trailing slashes
|
||||
// this allows us to support users specifying hostContext="/" in
|
||||
// solr.xml to indicate the root context, instead of hostContext=""
|
||||
// this allows us to support users specifying hostContext="/" in
|
||||
// solr.xml to indicate the root context, instead of hostContext=""
|
||||
// which means the default of "solr"
|
||||
String localHostContext = trimLeadingAndTrailingSlashes(cloudConfig.getSolrHostContext());
|
||||
|
||||
|
@ -350,7 +350,7 @@ public class ZkController implements Closeable {
|
|||
|
||||
// seems we dont need to do this again...
|
||||
// Overseer.createClientNodes(zkClient, getNodeName());
|
||||
|
||||
|
||||
// start the overseer first as following code may need it's processing
|
||||
if (!zkRunOnly) {
|
||||
ElectionContext context = new OverseerElectionContext(zkClient,
|
||||
|
@ -458,7 +458,7 @@ public class ZkController implements Closeable {
|
|||
});
|
||||
|
||||
init(registerOnReconnect);
|
||||
|
||||
|
||||
this.overseerJobQueue = overseer.getStateUpdateQueue();
|
||||
this.overseerCollectionQueue = overseer.getCollectionQueue(zkClient);
|
||||
this.overseerConfigSetQueue = overseer.getConfigSetQueue(zkClient);
|
||||
|
@ -482,7 +482,7 @@ public class ZkController implements Closeable {
|
|||
if (descriptors != null) {
|
||||
// before registering as live, make sure everyone is in a
|
||||
// down state
|
||||
publishNodeAsDown(getNodeName());
|
||||
publishNodeAsDown(getNodeName());
|
||||
for (CoreDescriptor descriptor : descriptors) {
|
||||
// if it looks like we are going to be the leader, we don't
|
||||
// want to wait for the following stuff
|
||||
|
@ -524,9 +524,9 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void closeOutstandingElections(final CurrentCoreDescriptorProvider registerOnReconnect) {
|
||||
|
||||
|
||||
List<CoreDescriptor> descriptors = registerOnReconnect.getCurrentDescriptors();
|
||||
if (descriptors != null) {
|
||||
for (CoreDescriptor descriptor : descriptors) {
|
||||
|
@ -534,20 +534,20 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private ContextKey closeExistingElectionContext(CoreDescriptor cd) {
|
||||
// look for old context - if we find it, cancel it
|
||||
String collection = cd.getCloudDescriptor().getCollectionName();
|
||||
final String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
|
||||
|
||||
|
||||
ContextKey contextKey = new ContextKey(collection, coreNodeName);
|
||||
ElectionContext prevContext = electionContexts.get(contextKey);
|
||||
|
||||
|
||||
if (prevContext != null) {
|
||||
prevContext.close();
|
||||
electionContexts.remove(contextKey);
|
||||
}
|
||||
|
||||
|
||||
return contextKey;
|
||||
}
|
||||
|
||||
|
@ -1002,7 +1002,7 @@ public class ZkController implements Closeable {
|
|||
InterruptedException {
|
||||
publishAndWaitForDownStates(WAIT_DOWN_STATES_TIMEOUT_SECONDS);
|
||||
}
|
||||
|
||||
|
||||
public void publishAndWaitForDownStates(int timeoutSeconds) throws KeeperException,
|
||||
InterruptedException {
|
||||
|
||||
|
@ -1104,7 +1104,7 @@ public class ZkController implements Closeable {
|
|||
List<Op> ops = new ArrayList<>(2);
|
||||
ops.add(Op.delete(nodePath, -1));
|
||||
ops.add(Op.delete(nodeAddedPath, -1));
|
||||
|
||||
|
||||
try {
|
||||
zkClient.multi(ops, true);
|
||||
} catch (NoNodeException e) {
|
||||
|
@ -1194,25 +1194,25 @@ public class ZkController implements Closeable {
|
|||
} catch (KeeperException | IOException e) {
|
||||
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);
|
||||
}
|
||||
|
||||
|
||||
// in this case, we want to wait for the leader as long as the leader might
|
||||
// wait for a vote, at least - but also long enough that a large cluster has
|
||||
// time to get its act together
|
||||
String leaderUrl = getLeader(cloudDesc, leaderVoteWait + 600000);
|
||||
|
||||
|
||||
String ourUrl = ZkCoreNodeProps.getCoreUrl(baseUrl, coreName);
|
||||
log.debug("We are " + ourUrl + " and leader is " + leaderUrl);
|
||||
boolean isLeader = leaderUrl.equals(ourUrl);
|
||||
assert !(isLeader && replica.getType() == Type.PULL) : "Pull replica became leader!";
|
||||
|
||||
try (SolrCore core = cc.getCore(desc.getName())) {
|
||||
|
||||
|
||||
// recover from local transaction log and wait for it to complete before
|
||||
// going active
|
||||
// TODO: should this be moved to another thread? To recoveryStrat?
|
||||
// TODO: should this actually be done earlier, before (or as part of)
|
||||
// leader election perhaps?
|
||||
|
||||
|
||||
if (core == null) {
|
||||
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE, "SolrCore is no longer available to register");
|
||||
}
|
||||
|
@ -1260,7 +1260,7 @@ public class ZkController implements Closeable {
|
|||
unregister(coreName, desc, false);
|
||||
throw e;
|
||||
}
|
||||
|
||||
|
||||
// make sure we have an update cluster state right away
|
||||
zkStateReader.forceUpdateCollection(collection);
|
||||
// the watcher is added to a set so multiple calls of this method will left only one watcher
|
||||
|
@ -1350,7 +1350,7 @@ public class ZkController implements Closeable {
|
|||
.getCoreUrl();
|
||||
}
|
||||
|
||||
} catch (AlreadyClosedException e) {
|
||||
} catch (AlreadyClosedException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
log.error("Error getting leader from zk", e);
|
||||
|
@ -1502,7 +1502,7 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
try {
|
||||
String collection = cd.getCloudDescriptor().getCollectionName();
|
||||
|
||||
|
||||
log.debug("publishing state={}", state.toString());
|
||||
// System.out.println(Thread.currentThread().getStackTrace()[3]);
|
||||
Integer numShards = cd.getCloudDescriptor().getNumShards();
|
||||
|
@ -1510,11 +1510,11 @@ public class ZkController implements Closeable {
|
|||
log.debug("numShards not found on descriptor - reading it from system property");
|
||||
numShards = Integer.getInteger(ZkStateReader.NUM_SHARDS_PROP);
|
||||
}
|
||||
|
||||
|
||||
assert collection != null && collection.length() > 0;
|
||||
|
||||
|
||||
String shardId = cd.getCloudDescriptor().getShardId();
|
||||
|
||||
|
||||
String coreNodeName = cd.getCloudDescriptor().getCoreNodeName();
|
||||
|
||||
Map<String,Object> props = new HashMap<>();
|
||||
|
@ -1566,7 +1566,7 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
|
||||
ZkNodeProps m = new ZkNodeProps(props);
|
||||
|
||||
|
||||
if (updateLastState) {
|
||||
cd.getCloudDescriptor().setLastPublished(state);
|
||||
}
|
||||
|
@ -1638,11 +1638,6 @@ public class ZkController implements Closeable {
|
|||
overseerJobQueue.offer(Utils.toJSON(m));
|
||||
}
|
||||
|
||||
// convenience for testing
|
||||
void printLayoutToStdOut() throws KeeperException, InterruptedException {
|
||||
zkClient.printLayoutToStdOut();
|
||||
}
|
||||
|
||||
public ZkStateReader getZkStateReader() {
|
||||
return zkStateReader;
|
||||
}
|
||||
|
@ -1842,7 +1837,7 @@ public class ZkController implements Closeable {
|
|||
CoreDescriptor descriptor, final String coreZkNodeName) {
|
||||
// try not to wait too long here - if we are waiting too long, we should probably
|
||||
// move along and join the election
|
||||
|
||||
|
||||
CloudDescriptor cloudDesc = descriptor.getCloudDescriptor();
|
||||
String collection = cloudDesc.getCollectionName();
|
||||
String shard = cloudDesc.getShardId();
|
||||
|
@ -2044,7 +2039,7 @@ public class ZkController implements Closeable {
|
|||
* has been reserved for the operation, meaning that no other thread/operation can claim
|
||||
* it. If for whatever reason, the operation is not scheduled, the asuncId needs to be
|
||||
* cleared using {@link #clearAsyncId(String)}.
|
||||
* If this method returns false, no reservation has been made, and this asyncId can't
|
||||
* If this method returns false, no reservation has been made, and this asyncId can't
|
||||
* be used, since it's being used by another operation (currently or in the past)
|
||||
* @param asyncId A string representing the asyncId of an operation. Can't be null.
|
||||
* @return True if the reservation succeeds.
|
||||
|
@ -2059,7 +2054,7 @@ public class ZkController implements Closeable {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Clears an asyncId previously claimed by calling {@link #claimAsyncId(String)}
|
||||
* @param asyncId A string representing the asyncId of an operation. Can't be null.
|
||||
|
@ -2161,7 +2156,7 @@ public class ZkController implements Closeable {
|
|||
|
||||
public void rejoinShardLeaderElection(SolrParams params) {
|
||||
try {
|
||||
|
||||
|
||||
String collectionName = params.get(COLLECTION_PROP);
|
||||
String shardId = params.get(SHARD_ID_PROP);
|
||||
String coreNodeName = params.get(CORE_NODE_NAME_PROP);
|
||||
|
@ -2171,24 +2166,24 @@ public class ZkController implements Closeable {
|
|||
|
||||
try (SolrCore core = cc.getCore(coreName)) {
|
||||
MDCLoggingContext.setCore(core);
|
||||
|
||||
|
||||
log.info("Rejoin the shard leader election.");
|
||||
|
||||
|
||||
ContextKey contextKey = new ContextKey(collectionName, coreNodeName);
|
||||
|
||||
|
||||
ElectionContext prevContext = electionContexts.get(contextKey);
|
||||
if (prevContext != null) prevContext.cancelElection();
|
||||
|
||||
|
||||
ZkNodeProps zkProps = new ZkNodeProps(BASE_URL_PROP, baseUrl, CORE_NAME_PROP, coreName, NODE_NAME_PROP, getNodeName(), CORE_NODE_NAME_PROP, coreNodeName);
|
||||
|
||||
|
||||
LeaderElector elect = ((ShardLeaderElectionContextBase) prevContext).getLeaderElector();
|
||||
ShardLeaderElectionContext context = new ShardLeaderElectionContext(elect, shardId, collectionName,
|
||||
coreNodeName, zkProps, this, getCoreContainer());
|
||||
|
||||
|
||||
context.leaderSeqPath = context.electionPath + LeaderElector.ELECTION_NODE + "/" + electionNode;
|
||||
elect.setup(context);
|
||||
electionContexts.put(contextKey, context);
|
||||
|
||||
|
||||
elect.retryElection(context, params.getBool(REJOIN_AT_HEAD_PROP, false));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
@ -2393,7 +2388,7 @@ public class ZkController implements Closeable {
|
|||
public void preClose(SolrCore core) {
|
||||
unregisterConfListener(confDir, listener);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void postClose(SolrCore core) {
|
||||
}
|
||||
|
@ -2584,11 +2579,11 @@ public class ZkController implements Closeable {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Best effort to set DOWN state for all replicas on node.
|
||||
*
|
||||
*
|
||||
* @param nodeName to operate on
|
||||
*/
|
||||
public void publishNodeAsDown(String nodeName) {
|
||||
|
@ -2604,7 +2599,7 @@ public class ZkController implements Closeable {
|
|||
log.debug("Publish node as down was interrupted.");
|
||||
} catch (KeeperException e) {
|
||||
log.warn("Could not publish node as down: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -262,8 +262,4 @@ public class ConfigOverlay implements MapSerializable {
|
|||
public static final String ZNODEVER = "znodeVersion";
|
||||
public static final String NAME = "overlay";
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(Utils.toJSONString(editable_prop_map));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.solr.common.cloud.Slice;
|
|||
import org.apache.solr.common.params.CollectionAdminParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.apache.solr.core.snapshots.CollectionSnapshotMetaData.CoreSnapshotMetaData;
|
||||
import org.apache.solr.util.CLIO;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -66,7 +67,7 @@ import com.google.common.base.Preconditions;
|
|||
/**
|
||||
* This class provides utility functions required for Solr snapshots functionality.
|
||||
*/
|
||||
public class SolrSnapshotsTool implements Closeable {
|
||||
public class SolrSnapshotsTool implements Closeable, CLIO {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private static final DateFormat dateFormat = new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss z", Locale.getDefault());
|
||||
|
||||
|
@ -107,11 +108,11 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
try {
|
||||
resp = createSnap.process(solrClient);
|
||||
Preconditions.checkState(resp.getStatus() == 0, "The CREATESNAPSHOT request failed. The status code is " + resp.getStatus());
|
||||
System.out.println("Successfully created snapshot with name " + snapshotName + " for collection " + collectionName);
|
||||
CLIO.out("Successfully created snapshot with name " + snapshotName + " for collection " + collectionName);
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to create a snapshot with name " + snapshotName + " for collection " + collectionName, e);
|
||||
System.out.println("Failed to create a snapshot with name " + snapshotName + " for collection " + collectionName
|
||||
CLIO.out("Failed to create a snapshot with name " + snapshotName + " for collection " + collectionName
|
||||
+" due to following error : "+e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
@ -122,11 +123,11 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
try {
|
||||
resp = deleteSnap.process(solrClient);
|
||||
Preconditions.checkState(resp.getStatus() == 0, "The DELETESNAPSHOT request failed. The status code is " + resp.getStatus());
|
||||
System.out.println("Successfully deleted snapshot with name " + snapshotName + " for collection " + collectionName);
|
||||
CLIO.out("Successfully deleted snapshot with name " + snapshotName + " for collection " + collectionName);
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to delete a snapshot with name " + snapshotName + " for collection " + collectionName, e);
|
||||
System.out.println("Failed to delete a snapshot with name " + snapshotName + " for collection " + collectionName
|
||||
CLIO.out("Failed to delete a snapshot with name " + snapshotName + " for collection " + collectionName
|
||||
+" due to following error : "+e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
@ -141,12 +142,12 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
|
||||
NamedList apiResult = (NamedList) resp.getResponse().get(SolrSnapshotManager.SNAPSHOTS_INFO);
|
||||
for (int i = 0; i < apiResult.size(); i++) {
|
||||
System.out.println(apiResult.getName(i));
|
||||
CLIO.out(apiResult.getName(i));
|
||||
}
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to list snapshots for collection " + collectionName, e);
|
||||
System.out.println("Failed to list snapshots for collection " + collectionName
|
||||
CLIO.out("Failed to list snapshots for collection " + collectionName
|
||||
+" due to following error : "+e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
@ -156,11 +157,11 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
Collection<CollectionSnapshotMetaData> snaps = listCollectionSnapshots(collectionName);
|
||||
for (CollectionSnapshotMetaData m : snaps) {
|
||||
if (snapshotName.equals(m.getName())) {
|
||||
System.out.println("Name: " + m.getName());
|
||||
System.out.println("Status: " + m.getStatus());
|
||||
System.out.println("Time of creation: " + dateFormat.format(m.getCreationDate()));
|
||||
System.out.println("Total number of cores with snapshot: " + m.getReplicaSnapshots().size());
|
||||
System.out.println("-----------------------------------");
|
||||
CLIO.out("Name: " + m.getName());
|
||||
CLIO.out("Status: " + m.getStatus());
|
||||
CLIO.out("Time of creation: " + dateFormat.format(m.getCreationDate()));
|
||||
CLIO.out("Total number of cores with snapshot: " + m.getReplicaSnapshots().size());
|
||||
CLIO.out("-----------------------------------");
|
||||
for (CoreSnapshotMetaData n : m.getReplicaSnapshots()) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
builder.append("Core [name=");
|
||||
|
@ -172,13 +173,13 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
builder.append(", indexDirPath=");
|
||||
builder.append(n.getIndexDirPath());
|
||||
builder.append("]\n");
|
||||
System.out.println(builder.toString());
|
||||
CLIO.out(builder.toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to fetch snapshot details", e);
|
||||
System.out.println("Failed to fetch snapshot details due to following error : " + e.getLocalizedMessage());
|
||||
CLIO.out("Failed to fetch snapshot details due to following error : " + e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -272,21 +273,21 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
public void prepareForExport(String collectionName, String snapshotName, String localFsPath, Optional<String> pathPrefix, String destPath) {
|
||||
try {
|
||||
buildCopyListings(collectionName, snapshotName, localFsPath, pathPrefix);
|
||||
System.out.println("Successfully prepared copylisting for the snapshot export.");
|
||||
CLIO.out("Successfully prepared copylisting for the snapshot export.");
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to prepare a copylisting for snapshot with name " + snapshotName + " for collection "
|
||||
+ collectionName, e);
|
||||
System.out.println("Failed to prepare a copylisting for snapshot with name " + snapshotName + " for collection "
|
||||
CLIO.out("Failed to prepare a copylisting for snapshot with name " + snapshotName + " for collection "
|
||||
+ collectionName + " due to following error : " + e.getLocalizedMessage());
|
||||
System.exit(1);
|
||||
}
|
||||
|
||||
try {
|
||||
backupCollectionMetaData(collectionName, snapshotName, destPath);
|
||||
System.out.println("Successfully backed up collection meta-data");
|
||||
CLIO.out("Successfully backed up collection meta-data");
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to backup collection meta-data for collection " + collectionName, e);
|
||||
System.out.println("Failed to backup collection meta-data for collection " + collectionName
|
||||
CLIO.out("Failed to backup collection meta-data for collection " + collectionName
|
||||
+ " due to following error : " + e.getLocalizedMessage());
|
||||
System.exit(1);
|
||||
}
|
||||
|
@ -306,7 +307,7 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
backup.processAsync(asyncReqId.orElse(null), solrClient);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to backup collection meta-data for collection " + collectionName, e);
|
||||
System.out.println("Failed to backup collection meta-data for collection " + collectionName
|
||||
CLIO.out("Failed to backup collection meta-data for collection " + collectionName
|
||||
+ " due to following error : " + e.getLocalizedMessage());
|
||||
System.exit(1);
|
||||
}
|
||||
|
@ -342,7 +343,7 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
try {
|
||||
cmd = parser.parse(options, args);
|
||||
} catch (ParseException e) {
|
||||
System.out.println(e.getLocalizedMessage());
|
||||
CLIO.out(e.getLocalizedMessage());
|
||||
printHelp(options);
|
||||
System.exit(1);
|
||||
}
|
||||
|
@ -380,7 +381,7 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
try {
|
||||
new URI(pathPrefix.get());
|
||||
} catch (URISyntaxException e) {
|
||||
System.out.println(
|
||||
CLIO.out(
|
||||
"The specified File system path prefix " + pathPrefix.get()
|
||||
+ " is invalid. The error is " + e.getLocalizedMessage());
|
||||
System.exit(1);
|
||||
|
@ -401,14 +402,14 @@ public class SolrSnapshotsTool implements Closeable {
|
|||
} else if (cmd.hasOption(HELP)) {
|
||||
printHelp(options);
|
||||
} else {
|
||||
System.out.println("Unknown command specified.");
|
||||
CLIO.out("Unknown command specified.");
|
||||
printHelp(options);
|
||||
}
|
||||
}
|
||||
|
||||
private static String requiredArg(Options options, CommandLine cmd, String optVal) {
|
||||
if (!cmd.hasOption(optVal)) {
|
||||
System.out.println("Please specify the value for option " + optVal);
|
||||
CLIO.out("Please specify the value for option " + optVal);
|
||||
printHelp(options);
|
||||
System.exit(1);
|
||||
}
|
||||
|
|
|
@ -6,10 +6,6 @@ public class QueryParserTokenManager implements QueryParserConstants
|
|||
{
|
||||
int commentNestingDepth ;
|
||||
|
||||
/** Debug output. */
|
||||
public java.io.PrintStream debugStream = System.out;
|
||||
/** Set debug output. */
|
||||
public void setDebugStream(java.io.PrintStream ds) { debugStream = ds; }
|
||||
private final int jjStopStringLiteralDfa_3(int pos, long active0)
|
||||
{
|
||||
switch (pos)
|
||||
|
@ -1332,9 +1328,9 @@ private int jjMoveNfa_1(int startState, int curPos)
|
|||
}
|
||||
}
|
||||
static final int[] jjnextStates = {
|
||||
32, 34, 35, 31, 36, 17, 18, 20, 56, 59, 25, 60, 57, 59, 25, 60,
|
||||
22, 23, 38, 39, 46, 38, 39, 40, 46, 38, 39, 41, 49, 54, 46, 42,
|
||||
43, 45, 50, 51, 53, 38, 39, 54, 46, 58, 61, 29, 2, 4, 5,
|
||||
32, 34, 35, 31, 36, 17, 18, 20, 56, 59, 25, 60, 57, 59, 25, 60,
|
||||
22, 23, 38, 39, 46, 38, 39, 40, 46, 38, 39, 41, 49, 54, 46, 42,
|
||||
43, 45, 50, 51, 53, 38, 39, 54, 46, 58, 61, 29, 2, 4, 5,
|
||||
};
|
||||
private static final boolean jjCanMove_0(int hiByte, int i1, int i2, long l1, long l2)
|
||||
{
|
||||
|
@ -1375,9 +1371,9 @@ private static final boolean jjCanMove_2(int hiByte, int i1, int i2, long l1, lo
|
|||
|
||||
/** Token literal values. */
|
||||
public static final String[] jjstrLiteralImages = {
|
||||
"", null, null, null, null, null, null, null, null, null, null, null, null,
|
||||
null, null, null, "\53", "\55", null, "\50", "\51", "\72", "\52", "\136", null, null,
|
||||
null, null, null, null, "\133", "\173", null, "\146\151\154\164\145\162\50", null,
|
||||
"", null, null, null, null, null, null, null, null, null, null, null, null,
|
||||
null, null, null, "\53", "\55", null, "\50", "\51", "\72", "\52", "\136", null, null,
|
||||
null, null, null, null, "\133", "\173", null, "\146\151\154\164\145\162\50", null,
|
||||
"\124\117", "\135", "\175", null, null, };
|
||||
|
||||
/** Lexer state names. */
|
||||
|
@ -1390,14 +1386,14 @@ public static final String[] lexStateNames = {
|
|||
|
||||
/** Lex State array. */
|
||||
public static final int[] jjnewLexState = {
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1,
|
||||
-1, -1, -1, -1, -1, 1, 1, -1, -1, 3, -1, 3, 3, -1, -1,
|
||||
-1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1,
|
||||
-1, -1, -1, -1, -1, 1, 1, -1, -1, 3, -1, 3, 3, -1, -1,
|
||||
};
|
||||
static final long[] jjtoToken = {
|
||||
0xffffffe001L,
|
||||
0xffffffe001L,
|
||||
};
|
||||
static final long[] jjtoSkip = {
|
||||
0x1f00L,
|
||||
0x1f00L,
|
||||
};
|
||||
protected CharStream input_stream;
|
||||
private final int[] jjrounds = new int[63];
|
||||
|
@ -1482,7 +1478,7 @@ int jjmatchedPos;
|
|||
int jjmatchedKind;
|
||||
|
||||
/** Get the next Token. */
|
||||
public Token getNextToken()
|
||||
public Token getNextToken()
|
||||
{
|
||||
Token matchedToken;
|
||||
int curPos = 0;
|
||||
|
|
|
@ -18,6 +18,7 @@ package org.apache.solr.response;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.Writer;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
@ -31,6 +32,8 @@ import org.apache.solr.common.util.XML;
|
|||
import org.apache.solr.request.SolrQueryRequest;
|
||||
import org.apache.solr.search.ReturnFields;
|
||||
import org.apache.solr.search.SolrReturnFields;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import static org.apache.solr.common.params.CommonParams.NAME;
|
||||
|
||||
|
@ -39,6 +42,7 @@ import static org.apache.solr.common.params.CommonParams.NAME;
|
|||
* @lucene.internal
|
||||
*/
|
||||
public class XMLWriter extends TextResponseWriter {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
public static float CURRENT_VERSION=2.2f;
|
||||
|
||||
|
@ -53,7 +57,7 @@ public class XMLWriter extends TextResponseWriter {
|
|||
+" xsi:noNamespaceSchemaLocation=\"http://pi.cnet.com/cnet-search/response.xsd\">\n"
|
||||
).toCharArray();
|
||||
***/
|
||||
|
||||
|
||||
private static final char[] XML_START2_NOSCHEMA=("<response>\n").toCharArray();
|
||||
|
||||
final int version;
|
||||
|
@ -162,7 +166,7 @@ public class XMLWriter extends TextResponseWriter {
|
|||
|
||||
|
||||
@Override
|
||||
public void writeStartDocumentList(String name,
|
||||
public void writeStartDocumentList(String name,
|
||||
long start, int size, long numFound, Float maxScore) throws IOException
|
||||
{
|
||||
if (doIndent) indent();
|
||||
|
@ -175,7 +179,7 @@ public class XMLWriter extends TextResponseWriter {
|
|||
writeAttr("maxScore",Float.toString(maxScore));
|
||||
}
|
||||
writer.write(">");
|
||||
|
||||
|
||||
incLevel();
|
||||
}
|
||||
|
||||
|
@ -183,7 +187,7 @@ public class XMLWriter extends TextResponseWriter {
|
|||
/**
|
||||
* The SolrDocument should already have multivalued fields implemented as
|
||||
* Collections -- this will not rewrite to <arr>
|
||||
*/
|
||||
*/
|
||||
@Override
|
||||
public void writeSolrDocument(String name, SolrDocument doc, ReturnFields returnFields, int idx ) throws IOException {
|
||||
startTag("doc", name, false);
|
||||
|
@ -196,7 +200,7 @@ public class XMLWriter extends TextResponseWriter {
|
|||
|
||||
Object val = doc.getFieldValue(fname);
|
||||
if( "_explain_".equals( fname ) ) {
|
||||
System.out.println( val );
|
||||
log.debug(String.valueOf(val));
|
||||
}
|
||||
writeVal(fname, val);
|
||||
}
|
||||
|
@ -206,11 +210,11 @@ public class XMLWriter extends TextResponseWriter {
|
|||
writeSolrDocument(null, childDoc, new SolrReturnFields(), idx);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
decLevel();
|
||||
writer.write("</doc>");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void writeEndDocumentList() throws IOException
|
||||
{
|
||||
|
|
|
@ -107,22 +107,17 @@ public class ExplainAugmenterFactory extends TransformerFactory
|
|||
}
|
||||
|
||||
@Override
|
||||
public void transform(SolrDocument doc, int docid) {
|
||||
public void transform(SolrDocument doc, int docid) throws IOException {
|
||||
if( context != null && context.getQuery() != null ) {
|
||||
try {
|
||||
Explanation exp = context.getSearcher().explain(context.getQuery(), docid);
|
||||
if( style == Style.nl ) {
|
||||
doc.setField( name, SolrPluginUtils.explanationToNamedList(exp) );
|
||||
}
|
||||
else if( style == Style.html ) {
|
||||
doc.setField( name, toHtml(exp));
|
||||
}
|
||||
else {
|
||||
doc.setField( name, exp.toString() );
|
||||
}
|
||||
Explanation exp = context.getSearcher().explain(context.getQuery(), docid);
|
||||
if( style == Style.nl ) {
|
||||
doc.setField( name, SolrPluginUtils.explanationToNamedList(exp) );
|
||||
}
|
||||
catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
else if( style == Style.html ) {
|
||||
doc.setField( name, toHtml(exp));
|
||||
}
|
||||
else {
|
||||
doc.setField( name, exp.toString() );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,8 +172,7 @@ public class CloudMLTQParser extends QParser {
|
|||
|
||||
return realMLTQuery.build();
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Bad Request");
|
||||
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Bad Request", e);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.solr.util;
|
||||
|
||||
import java.io.PrintStream;
|
||||
|
||||
import org.apache.solr.common.util.SuppressForbidden;
|
||||
|
||||
@SuppressForbidden( reason = "For use in command line tools only")
|
||||
public interface CLIO {
|
||||
static void out(String s) {
|
||||
System.out.println(s);
|
||||
}
|
||||
|
||||
static void err(String s) {
|
||||
System.err.println(s);
|
||||
}
|
||||
|
||||
static PrintStream getOutStream() {
|
||||
return System.out;
|
||||
}
|
||||
|
||||
static PrintStream getErrStream() {
|
||||
return System.err;
|
||||
}
|
||||
}
|
|
@ -50,7 +50,7 @@ import org.slf4j.LoggerFactory;
|
|||
/**A utility class to verify signatures
|
||||
*
|
||||
*/
|
||||
public final class CryptoKeys {
|
||||
public final class CryptoKeys implements CLIO {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
private final Map<String, PublicKey> keys;
|
||||
private Exception exception;
|
||||
|
@ -342,14 +342,14 @@ public final class CryptoKeys {
|
|||
|
||||
public static void main(String[] args) throws Exception {
|
||||
RSAKeyPair keyPair = new RSAKeyPair();
|
||||
System.out.println(keyPair.getPublicKeyStr());
|
||||
CLIO.out(keyPair.getPublicKeyStr());
|
||||
PublicKey pk = deserializeX509PublicKey(keyPair.getPublicKeyStr());
|
||||
byte[] payload = "Hello World!".getBytes(StandardCharsets.UTF_8);
|
||||
byte[] encrypted = keyPair.encrypt(ByteBuffer.wrap(payload));
|
||||
String cipherBase64 = Base64.byteArrayToBase64(encrypted);
|
||||
System.out.println("encrypted: "+ cipherBase64);
|
||||
System.out.println("signed: "+ Base64.byteArrayToBase64(keyPair.signSha256(payload)));
|
||||
System.out.println("decrypted "+ new String(decryptRSA(encrypted , pk), StandardCharsets.UTF_8));
|
||||
CLIO.out("encrypted: "+ cipherBase64);
|
||||
CLIO.out("signed: "+ Base64.byteArrayToBase64(keyPair.signSha256(payload)));
|
||||
CLIO.out("decrypted "+ new String(decryptRSA(encrypted , pk), StandardCharsets.UTF_8));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -18,10 +18,15 @@ package org.apache.solr.util;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.Reader;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
|
||||
import org.noggit.JSONParser;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class RecordingJSONParser extends JSONParser {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
static ThreadLocal<char[]> buf = new ThreadLocal<>();
|
||||
private final char[] bufCopy;
|
||||
//global position is the global position at the beginning of my buffer
|
||||
|
@ -68,7 +73,7 @@ public class RecordingJSONParser extends JSONParser {
|
|||
private void captureMissing() {
|
||||
long currPosition = getPosition() - globalPosition;
|
||||
if(currPosition < 0){
|
||||
System.out.println("ERROR");
|
||||
log.error("currPosition less than zero in captureMissing()?");
|
||||
}
|
||||
|
||||
if (currPosition > lastMarkedPosition) {
|
||||
|
|
|
@ -70,9 +70,9 @@ import static java.nio.charset.StandardCharsets.US_ASCII;
|
|||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
|
||||
/**
|
||||
* A simple utility class for posting raw updates to a Solr server,
|
||||
* A simple utility class for posting raw updates to a Solr server,
|
||||
* has a main method so it can be run on the command line.
|
||||
* View this not as a best-practice code example, but as a standalone
|
||||
* View this not as a best-practice code example, but as a standalone
|
||||
* example built with an explicit purpose of not having external
|
||||
* jar dependencies.
|
||||
*/
|
||||
|
@ -119,7 +119,7 @@ public class SimplePostTool {
|
|||
// Backlog for crawling
|
||||
List<LinkedHashSet<URL>> backlog = new ArrayList<>();
|
||||
Set<URL> visited = new HashSet<>();
|
||||
|
||||
|
||||
static final Set<String> DATA_MODES = new HashSet<>();
|
||||
static final String USAGE_STRING_SHORT =
|
||||
"Usage: java [SystemProperties] -jar post.jar [-h|-] [<file|folder|url|arg> [<file|folder|url|arg>...]]";
|
||||
|
@ -133,7 +133,7 @@ public class SimplePostTool {
|
|||
DATA_MODES.add(DATA_MODE_ARGS);
|
||||
DATA_MODES.add(DATA_MODE_STDIN);
|
||||
DATA_MODES.add(DATA_MODE_WEB);
|
||||
|
||||
|
||||
mimeMap = new HashMap<>();
|
||||
mimeMap.put("xml", "application/xml");
|
||||
mimeMap.put("csv", "text/csv");
|
||||
|
@ -158,7 +158,7 @@ public class SimplePostTool {
|
|||
mimeMap.put("txt", "text/plain");
|
||||
mimeMap.put("log", "text/plain");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* See usage() for valid command line usage
|
||||
* @param args the params on the command line
|
||||
|
@ -191,12 +191,12 @@ public class SimplePostTool {
|
|||
usageShort();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (commit) commit();
|
||||
if (optimize) optimize();
|
||||
displayTiming((long) timer.getTime());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Pretty prints the number of milliseconds taken to post the content to Solr
|
||||
* @param millis the time in milliseconds
|
||||
|
@ -204,7 +204,7 @@ public class SimplePostTool {
|
|||
private void displayTiming(long millis) {
|
||||
SimpleDateFormat df = new SimpleDateFormat("H:mm:ss.SSS", Locale.getDefault());
|
||||
df.setTimeZone(TimeZone.getTimeZone("UTC"));
|
||||
System.out.println("Time spent: "+df.format(new Date(millis)));
|
||||
CLIO.out("Time spent: "+df.format(new Date(millis)));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -220,19 +220,19 @@ public class SimplePostTool {
|
|||
if (! DATA_MODES.contains(mode)) {
|
||||
fatal("System Property 'data' is not valid for this tool: " + mode);
|
||||
}
|
||||
|
||||
|
||||
String params = System.getProperty("params", "");
|
||||
|
||||
String host = System.getProperty("host", DEFAULT_POST_HOST);
|
||||
String port = System.getProperty("port", DEFAULT_POST_PORT);
|
||||
String core = System.getProperty("c");
|
||||
|
||||
|
||||
urlStr = System.getProperty("url");
|
||||
|
||||
|
||||
if (urlStr == null && core == null) {
|
||||
fatal("Specifying either url or core/collection is mandatory.\n" + USAGE_STRING_SHORT);
|
||||
}
|
||||
|
||||
|
||||
if(urlStr == null) {
|
||||
urlStr = String.format(Locale.ROOT, "http://%s:%s/solr/%s/update", host, port, core);
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ public class SimplePostTool {
|
|||
}
|
||||
if (user != null)
|
||||
info("Basic Authentication enabled, user=" + user);
|
||||
|
||||
|
||||
boolean auto = isOn(System.getProperty("auto", DEFAULT_AUTO));
|
||||
String type = System.getProperty("type");
|
||||
String format = System.getProperty("format");
|
||||
|
@ -264,11 +264,11 @@ public class SimplePostTool {
|
|||
try {
|
||||
delay = Integer.parseInt(System.getProperty("delay", ""+delay));
|
||||
} catch(Exception e) { }
|
||||
OutputStream out = isOn(System.getProperty("out", DEFAULT_OUT)) ? System.out : null;
|
||||
OutputStream out = isOn(System.getProperty("out", DEFAULT_OUT)) ? CLIO.getOutStream() : null;
|
||||
String fileTypes = System.getProperty("filetypes", DEFAULT_FILE_TYPES);
|
||||
boolean commit = isOn(System.getProperty("commit",DEFAULT_COMMIT));
|
||||
boolean optimize = isOn(System.getProperty("optimize",DEFAULT_OPTIMIZE));
|
||||
|
||||
|
||||
return new SimplePostTool(mode, url, auto, type, format, recursive, delay, fileTypes, out, commit, optimize, args);
|
||||
} catch (MalformedURLException e) {
|
||||
fatal("System Property 'url' is not a valid URL: " + urlStr);
|
||||
|
@ -292,7 +292,7 @@ public class SimplePostTool {
|
|||
* @param args a String[] of arguments, varies between modes
|
||||
*/
|
||||
public SimplePostTool(String mode, URL url, boolean auto, String type, String format,
|
||||
int recursive, int delay, String fileTypes, OutputStream out,
|
||||
int recursive, int delay, String fileTypes, OutputStream out,
|
||||
boolean commit, boolean optimize, String[] args) {
|
||||
this.mode = mode;
|
||||
this.solrUrl = url;
|
||||
|
@ -311,19 +311,19 @@ public class SimplePostTool {
|
|||
}
|
||||
|
||||
public SimplePostTool() {}
|
||||
|
||||
|
||||
//
|
||||
// Do some action depending on which mode we have
|
||||
//
|
||||
private void doFilesMode() {
|
||||
currentDepth = 0;
|
||||
// Skip posting files if special param "-" given
|
||||
// Skip posting files if special param "-" given
|
||||
if (!args[0].equals("-")) {
|
||||
info("Posting files to [base] url " + solrUrl + (!auto?" using content-type "+(type==null?DEFAULT_CONTENT_TYPE:type):"")+"...");
|
||||
if(auto)
|
||||
info("Entering auto mode. File endings considered are "+fileTypes);
|
||||
if(recursive > 0)
|
||||
info("Entering recursive mode, max depth="+recursive+", delay="+delay+"s");
|
||||
info("Entering recursive mode, max depth="+recursive+", delay="+delay+"s");
|
||||
int numFilesPosted = postFiles(args, 0, out, type);
|
||||
info(numFilesPosted + " files indexed.");
|
||||
}
|
||||
|
@ -344,12 +344,12 @@ public class SimplePostTool {
|
|||
fatal("Specifying content-type with \"-Ddata=web\" is not supported");
|
||||
}
|
||||
if (args[0].equals("-")) {
|
||||
// Skip posting url if special param "-" given
|
||||
// Skip posting url if special param "-" given
|
||||
return 0;
|
||||
}
|
||||
// Set Extracting handler as default
|
||||
solrUrl = appendUrlPath(solrUrl, "/extract");
|
||||
|
||||
|
||||
info("Posting web pages to Solr url "+solrUrl);
|
||||
auto=true;
|
||||
info("Entering auto mode. Indexing pages with content-types corresponding to file endings "+fileTypes);
|
||||
|
@ -372,7 +372,7 @@ public class SimplePostTool {
|
|||
|
||||
private void doStdinMode() {
|
||||
info("POSTing stdin to " + solrUrl + "...");
|
||||
postData(System.in, null, out, type, solrUrl);
|
||||
postData(System.in, null, out, type, solrUrl);
|
||||
}
|
||||
|
||||
private void reset() {
|
||||
|
@ -385,12 +385,12 @@ public class SimplePostTool {
|
|||
// USAGE
|
||||
//
|
||||
private static void usageShort() {
|
||||
System.out.println(USAGE_STRING_SHORT+"\n"+
|
||||
CLIO.out(USAGE_STRING_SHORT+"\n"+
|
||||
" Please invoke with -h option for extended usage help.");
|
||||
}
|
||||
|
||||
private static void usage() {
|
||||
System.out.println
|
||||
CLIO.out
|
||||
(USAGE_STRING_SHORT+"\n\n" +
|
||||
"Supported System Properties and their defaults:\n"+
|
||||
" -Dc=<core/collection>\n"+
|
||||
|
@ -458,14 +458,14 @@ public class SimplePostTool {
|
|||
File[] files = parent.listFiles(ff);
|
||||
if(files == null || files.length == 0) {
|
||||
warn("No files or directories matching "+srcFile);
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
filesPosted += postFiles(parent.listFiles(ff), out, type);
|
||||
}
|
||||
}
|
||||
return filesPosted;
|
||||
}
|
||||
|
||||
|
||||
/** Post all filenames provided in args
|
||||
* @param files array of Files
|
||||
* @param startIndexInArgs offset to start
|
||||
|
@ -489,14 +489,14 @@ public class SimplePostTool {
|
|||
File[] fileList = parent.listFiles(ff);
|
||||
if(fileList == null || fileList.length == 0) {
|
||||
warn("No files or directories matching "+srcFile);
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
filesPosted += postFiles(fileList, out, type);
|
||||
}
|
||||
}
|
||||
return filesPosted;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Posts a whole directory
|
||||
* @return number of files posted total
|
||||
|
@ -603,7 +603,7 @@ public class SimplePostTool {
|
|||
PageFetcherResult result = pageFetcher.readPageFromUrl(u);
|
||||
if(result.httpStatus == 200) {
|
||||
u = (result.redirectUrl != null) ? result.redirectUrl : u;
|
||||
URL postUrl = new URL(appendParam(solrUrl.toString(),
|
||||
URL postUrl = new URL(appendParam(solrUrl.toString(),
|
||||
"literal.id="+URLEncoder.encode(u.toString(),"UTF-8") +
|
||||
"&literal.url="+URLEncoder.encode(u.toString(),"UTF-8")));
|
||||
boolean success = postData(new ByteArrayInputStream(result.content.array(), result.content.arrayOffset(),result.content.limit() ), null, out, result.contentType, postUrl);
|
||||
|
@ -632,7 +632,7 @@ public class SimplePostTool {
|
|||
backlog.add(subStack);
|
||||
numPages += webCrawl(level+1, out);
|
||||
}
|
||||
return numPages;
|
||||
return numPages;
|
||||
}
|
||||
public static class BAOS extends ByteArrayOutputStream {
|
||||
public ByteBuffer getByteBuffer() {
|
||||
|
@ -726,22 +726,22 @@ public class SimplePostTool {
|
|||
protected static boolean isOn(String property) {
|
||||
return("true,on,yes,1".indexOf(property) > -1);
|
||||
}
|
||||
|
||||
|
||||
static void warn(String msg) {
|
||||
System.err.println("SimplePostTool: WARNING: " + msg);
|
||||
CLIO.err("SimplePostTool: WARNING: " + msg);
|
||||
}
|
||||
|
||||
static void info(String msg) {
|
||||
System.out.println(msg);
|
||||
CLIO.out(msg);
|
||||
}
|
||||
|
||||
static void fatal(String msg) {
|
||||
System.err.println("SimplePostTool: FATAL: " + msg);
|
||||
CLIO.err("SimplePostTool: FATAL: " + msg);
|
||||
System.exit(2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Does a simple commit operation
|
||||
* Does a simple commit operation
|
||||
*/
|
||||
public void commit() {
|
||||
info("COMMITting Solr index changes to " + solrUrl + "...");
|
||||
|
@ -749,7 +749,7 @@ public class SimplePostTool {
|
|||
}
|
||||
|
||||
/**
|
||||
* Does a simple optimize operation
|
||||
* Does a simple optimize operation
|
||||
*/
|
||||
public void optimize() {
|
||||
info("Performing an OPTIMIZE to " + solrUrl + "...");
|
||||
|
@ -757,7 +757,7 @@ public class SimplePostTool {
|
|||
}
|
||||
|
||||
/**
|
||||
* Appends a URL query parameter to a URL
|
||||
* Appends a URL query parameter to a URL
|
||||
* @param url the original URL
|
||||
* @param param the parameter(s) to append, separated by "&"
|
||||
* @return the string version of the resulting URL
|
||||
|
@ -778,7 +778,7 @@ public class SimplePostTool {
|
|||
|
||||
/**
|
||||
* Opens the file and posts its contents to the solrUrl,
|
||||
* writes to response to output.
|
||||
* writes to response to output.
|
||||
*/
|
||||
public void postFile(File file, OutputStream output, String type) {
|
||||
InputStream is = null;
|
||||
|
@ -814,7 +814,6 @@ public class SimplePostTool {
|
|||
is = new FileInputStream(file);
|
||||
postData(is, file.length(), output, type, url);
|
||||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
warn("Can't open/read file: " + file);
|
||||
} finally {
|
||||
try {
|
||||
|
@ -829,7 +828,7 @@ public class SimplePostTool {
|
|||
* Appends to the path of the URL
|
||||
* @param url the URL
|
||||
* @param append the path to append
|
||||
* @return the final URL version
|
||||
* @return the final URL version
|
||||
*/
|
||||
protected static URL appendUrlPath(URL url, String append) throws MalformedURLException {
|
||||
return new URL(url.getProtocol() + "://" + url.getAuthority() + url.getPath() + append + (url.getQuery() != null ? "?"+url.getQuery() : ""));
|
||||
|
@ -858,7 +857,7 @@ public class SimplePostTool {
|
|||
warn("The specified URL "+url+" is not a valid URL. Please check");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Performs a simple get on the given URL
|
||||
*/
|
||||
|
@ -919,7 +918,7 @@ public class SimplePostTool {
|
|||
} catch (IOException e) {
|
||||
fatal("IOException while posting data: " + e);
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
success &= checkResponseCode(urlc);
|
||||
try (final InputStream in = urlc.getInputStream()) {
|
||||
|
@ -952,7 +951,7 @@ public class SimplePostTool {
|
|||
|
||||
private static boolean checkResponseCode(HttpURLConnection urlc) throws IOException, GeneralSecurityException {
|
||||
if (urlc.getResponseCode() >= 400) {
|
||||
warn("Solr returned an error #" + urlc.getResponseCode() +
|
||||
warn("Solr returned an error #" + urlc.getResponseCode() +
|
||||
" (" + urlc.getResponseMessage() + ") for url: " + urlc.getURL());
|
||||
Charset charset = StandardCharsets.ISO_8859_1;
|
||||
final String contentType = urlc.getContentType();
|
||||
|
@ -987,7 +986,7 @@ public class SimplePostTool {
|
|||
}
|
||||
|
||||
/**
|
||||
* Converts a string to an input stream
|
||||
* Converts a string to an input stream
|
||||
* @param s the string
|
||||
* @return the input stream
|
||||
*/
|
||||
|
@ -996,7 +995,7 @@ public class SimplePostTool {
|
|||
}
|
||||
|
||||
/**
|
||||
* Pipes everything from the source to the dest. If dest is null,
|
||||
* Pipes everything from the source to the dest. If dest is null,
|
||||
* then everything is read from source and thrown away.
|
||||
*/
|
||||
private static void pipe(InputStream source, OutputStream dest) throws IOException {
|
||||
|
@ -1020,7 +1019,7 @@ public class SimplePostTool {
|
|||
//
|
||||
// Utility methods for XPath handing
|
||||
//
|
||||
|
||||
|
||||
/**
|
||||
* Gets all nodes matching an XPath
|
||||
*/
|
||||
|
@ -1030,7 +1029,7 @@ public class SimplePostTool {
|
|||
XPathExpression expr = xp.compile(xpath);
|
||||
return (NodeList) expr.evaluate(n, XPathConstants.NODESET);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Gets the string content of the matching an XPath
|
||||
* @param n the node (or doc)
|
||||
|
@ -1050,9 +1049,9 @@ public class SimplePostTool {
|
|||
} else
|
||||
return "";
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Takes a string as input and returns a DOM
|
||||
* Takes a string as input and returns a DOM
|
||||
*/
|
||||
public static Document makeDom(byte[] in) throws SAXException, IOException,
|
||||
ParserConfigurationException {
|
||||
|
@ -1069,7 +1068,7 @@ public class SimplePostTool {
|
|||
{
|
||||
private String _pattern;
|
||||
private Pattern p;
|
||||
|
||||
|
||||
public GlobFileFilter(String pattern, boolean isRegex)
|
||||
{
|
||||
_pattern = pattern;
|
||||
|
@ -1085,32 +1084,32 @@ public class SimplePostTool {
|
|||
.replace("?", ".");
|
||||
_pattern = "^" + _pattern + "$";
|
||||
}
|
||||
|
||||
|
||||
try {
|
||||
p = Pattern.compile(_pattern,Pattern.CASE_INSENSITIVE);
|
||||
} catch(PatternSyntaxException e) {
|
||||
fatal("Invalid type list "+pattern+". "+e.getDescription());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean accept(File file)
|
||||
{
|
||||
return p.matcher(file.getName()).find();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//
|
||||
// Simple crawler class which can fetch a page and check for robots.txt
|
||||
//
|
||||
class PageFetcher {
|
||||
Map<String, List<String>> robotsCache;
|
||||
static final String DISALLOW = "Disallow:";
|
||||
|
||||
|
||||
public PageFetcher() {
|
||||
robotsCache = new HashMap<>();
|
||||
}
|
||||
|
||||
|
||||
public PageFetcherResult readPageFromUrl(URL u) {
|
||||
PageFetcherResult res = new PageFetcherResult();
|
||||
try {
|
||||
|
@ -1146,8 +1145,8 @@ public class SimplePostTool {
|
|||
} else {
|
||||
is = conn.getInputStream();
|
||||
}
|
||||
|
||||
// Read into memory, so that we later can pull links from the page without re-fetching
|
||||
|
||||
// Read into memory, so that we later can pull links from the page without re-fetching
|
||||
res.content = inputStreamToByteArray(is);
|
||||
is.close();
|
||||
} else {
|
||||
|
@ -1160,7 +1159,7 @@ public class SimplePostTool {
|
|||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
public boolean isDisallowedByRobots(URL url) {
|
||||
String host = url.getHost();
|
||||
String strRobot = url.getProtocol() + "://" + host + "/robots.txt";
|
||||
|
@ -1168,7 +1167,7 @@ public class SimplePostTool {
|
|||
if(disallows == null) {
|
||||
disallows = new ArrayList<>();
|
||||
URL urlRobot;
|
||||
try {
|
||||
try {
|
||||
urlRobot = new URL(strRobot);
|
||||
disallows = parseRobotsTxt(urlRobot.openStream());
|
||||
} catch (MalformedURLException e) {
|
||||
|
@ -1177,7 +1176,7 @@ public class SimplePostTool {
|
|||
// There is no robots.txt, will cache an empty disallow list
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
robotsCache.put(host, disallows);
|
||||
|
||||
String strURL = url.getFile();
|
||||
|
@ -1254,7 +1253,7 @@ public class SimplePostTool {
|
|||
return l;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Utility class to hold the result form a page fetch
|
||||
*/
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -45,12 +45,12 @@ public class CleanupOldIndexTest extends SolrCloudTestCase {
|
|||
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-dynamic").resolve("conf"))
|
||||
.configure();
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
|
||||
if (suiteFailureMarker.wasSuccessful()) {
|
||||
zkClient().printLayoutToStdOut();
|
||||
zkClient().printLayoutToStream(System.out);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -117,6 +117,6 @@ public class CleanupOldIndexTest extends SolrCloudTestCase {
|
|||
assertTrue(!oldIndexDir1.isDirectory());
|
||||
assertTrue(!oldIndexDir2.isDirectory());
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -33,12 +33,12 @@ import org.junit.Test;
|
|||
@Slow
|
||||
public class LeaderElectionIntegrationTest extends SolrCloudTestCase {
|
||||
private final static int NUM_REPLICAS_OF_SHARD1 = 5;
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
System.setProperty("solrcloud.skip.autorecovery", "true");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
|
@ -96,7 +96,7 @@ public class LeaderElectionIntegrationTest extends SolrCloudTestCase {
|
|||
}
|
||||
|
||||
if (jetty == getRunner(leader)) {
|
||||
cluster.getZkClient().printLayoutToStdOut();
|
||||
cluster.getZkClient().printLayoutToStream(System.out);
|
||||
fail("We didn't find a new leader! " + jetty + " was close, but it's still showing as the leader");
|
||||
}
|
||||
|
||||
|
@ -166,10 +166,10 @@ public class LeaderElectionIntegrationTest extends SolrCloudTestCase {
|
|||
}
|
||||
|
||||
private String getLeader(String collection) throws InterruptedException {
|
||||
|
||||
|
||||
ZkNodeProps props = cluster.getSolrClient().getZkStateReader().getLeaderRetry(collection, "shard1", 30000);
|
||||
String leader = props.getStr(ZkStateReader.NODE_NAME_PROP);
|
||||
|
||||
|
||||
return leader;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,30 +48,30 @@ import org.slf4j.LoggerFactory;
|
|||
public class LeaderElectionTest extends SolrTestCaseJ4 {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
|
||||
static final int TIMEOUT = 30000;
|
||||
private ZkTestServer server;
|
||||
private SolrZkClient zkClient;
|
||||
private ZkStateReader zkStateReader;
|
||||
private Map<Integer,Thread> seqToThread;
|
||||
|
||||
|
||||
private volatile boolean stopStress = false;
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() {
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();;
|
||||
|
||||
|
||||
server = new ZkTestServer(zkDir);
|
||||
server.setTheTickTime(1000);
|
||||
server.run();
|
||||
|
@ -179,7 +179,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
es.close();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
while (!stop) {
|
||||
try {
|
||||
Thread.sleep(100);
|
||||
|
@ -187,9 +187,9 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
public void close() {
|
||||
es.close();
|
||||
this.stop = true;
|
||||
|
@ -258,7 +258,7 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
Thread.sleep(500);
|
||||
}
|
||||
}
|
||||
zkClient.printLayoutToStdOut();
|
||||
zkClient.printLayoutToStream(System.out);
|
||||
throw new RuntimeException("Could not get leader props for " + collection + " " + slice);
|
||||
}
|
||||
|
||||
|
@ -283,69 +283,69 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
|
||||
@Test
|
||||
public void testElection() throws Exception {
|
||||
|
||||
|
||||
List<ClientThread> threads = new ArrayList<>();
|
||||
|
||||
|
||||
for (int i = 0; i < 15; i++) {
|
||||
ClientThread thread = new ClientThread("shard1", i);
|
||||
threads.add(thread);
|
||||
}
|
||||
try {
|
||||
startAndJoinElection(threads);
|
||||
|
||||
|
||||
int leaderThread = getLeaderThread();
|
||||
|
||||
|
||||
// whoever the leader is, should be the n_0 seq
|
||||
assertEquals(0, threads.get(leaderThread).seq);
|
||||
|
||||
// kill n_0, 1, 3 and 4
|
||||
((ClientThread) seqToThread.get(0)).close();
|
||||
|
||||
|
||||
waitForLeader(threads, 1);
|
||||
|
||||
|
||||
leaderThread = getLeaderThread();
|
||||
|
||||
|
||||
// whoever the leader is, should be the n_1 seq
|
||||
|
||||
|
||||
assertEquals(1, threads.get(leaderThread).seq);
|
||||
|
||||
|
||||
((ClientThread) seqToThread.get(4)).close();
|
||||
((ClientThread) seqToThread.get(1)).close();
|
||||
((ClientThread) seqToThread.get(3)).close();
|
||||
|
||||
|
||||
// whoever the leader is, should be the n_2 seq
|
||||
|
||||
|
||||
waitForLeader(threads, 2);
|
||||
|
||||
|
||||
leaderThread = getLeaderThread();
|
||||
assertEquals(2, threads.get(leaderThread).seq);
|
||||
|
||||
|
||||
// kill n_5, 2, 6, 7, and 8
|
||||
((ClientThread) seqToThread.get(5)).close();
|
||||
((ClientThread) seqToThread.get(2)).close();
|
||||
((ClientThread) seqToThread.get(6)).close();
|
||||
((ClientThread) seqToThread.get(7)).close();
|
||||
((ClientThread) seqToThread.get(8)).close();
|
||||
|
||||
|
||||
waitForLeader(threads, 9);
|
||||
leaderThread = getLeaderThread();
|
||||
|
||||
|
||||
// whoever the leader is, should be the n_9 seq
|
||||
assertEquals(9, threads.get(leaderThread).seq);
|
||||
|
||||
|
||||
} finally {
|
||||
// cleanup any threads still running
|
||||
for (ClientThread thread : threads) {
|
||||
thread.close();
|
||||
thread.interrupt();
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
for (Thread thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -415,21 +415,21 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
String leaderUrl = getLeaderUrl("collection1", "shard1");
|
||||
return Integer.parseInt(leaderUrl.replaceAll("/", ""));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testStressElection() throws Exception {
|
||||
final ScheduledExecutorService scheduler = Executors
|
||||
.newScheduledThreadPool(15, new DefaultSolrThreadFactory("stressElection"));
|
||||
final List<ClientThread> threads = Collections
|
||||
.synchronizedList(new ArrayList<ClientThread>());
|
||||
|
||||
|
||||
// start with a leader
|
||||
ClientThread thread1 = null;
|
||||
thread1 = new ClientThread("shard1", 0);
|
||||
threads.add(thread1);
|
||||
scheduler.schedule(thread1, 0, TimeUnit.MILLISECONDS);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Thread scheduleThread = new Thread() {
|
||||
@Override
|
||||
|
@ -450,11 +450,11 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Thread killThread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
|
||||
while (!stopStress) {
|
||||
try {
|
||||
int j;
|
||||
|
@ -475,11 +475,11 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Thread connLossThread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
|
||||
|
||||
while (!stopStress) {
|
||||
try {
|
||||
Thread.sleep(50);
|
||||
|
@ -495,49 +495,49 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
e.printStackTrace();
|
||||
}
|
||||
Thread.sleep(500);
|
||||
|
||||
|
||||
} catch (Exception e) {
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
scheduleThread.start();
|
||||
connLossThread.start();
|
||||
killThread.start();
|
||||
|
||||
|
||||
Thread.sleep(4000);
|
||||
|
||||
|
||||
stopStress = true;
|
||||
|
||||
|
||||
scheduleThread.interrupt();
|
||||
connLossThread.interrupt();
|
||||
killThread.interrupt();
|
||||
|
||||
|
||||
scheduleThread.join();
|
||||
scheduler.shutdownNow();
|
||||
|
||||
|
||||
connLossThread.join();
|
||||
killThread.join();
|
||||
|
||||
|
||||
int seq = threads.get(getLeaderThread()).getSeq();
|
||||
|
||||
|
||||
// we have a leader we know, TODO: lets check some other things
|
||||
|
||||
|
||||
// cleanup any threads still running
|
||||
for (ClientThread thread : threads) {
|
||||
thread.es.zkClient.getSolrZooKeeper().close();
|
||||
thread.close();
|
||||
}
|
||||
|
||||
|
||||
for (Thread thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
zkClient.close();
|
||||
|
@ -545,8 +545,8 @@ public class LeaderElectionTest extends SolrTestCaseJ4 {
|
|||
server.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
||||
private void printLayout() throws Exception {
|
||||
zkClient.printLayoutToStdOut();
|
||||
zkClient.printLayoutToStream(System.out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -114,10 +114,10 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
private static ZkTestServer server;
|
||||
|
||||
private static SolrZkClient zkClient;
|
||||
|
||||
|
||||
|
||||
private volatile boolean testDone = false;
|
||||
|
||||
|
||||
private final List<ZkController> zkControllers = Collections.synchronizedList(new ArrayList<>());
|
||||
private final List<Overseer> overseers = Collections.synchronizedList(new ArrayList<>());
|
||||
private final List<ZkStateReader> readers = Collections.synchronizedList(new ArrayList<>());
|
||||
|
@ -127,15 +127,15 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
private final List<CloudSolrClient> solrClients = Collections.synchronizedList(new ArrayList<>());
|
||||
|
||||
private static final String COLLECTION = SolrTestCaseJ4.DEFAULT_TEST_COLLECTION_NAME;
|
||||
|
||||
|
||||
public static class MockZKController{
|
||||
|
||||
|
||||
private final SolrZkClient zkClient;
|
||||
private final ZkStateReader zkStateReader;
|
||||
private final String nodeName;
|
||||
private final Map<String, ElectionContext> electionContext = Collections.synchronizedMap(new HashMap<String, ElectionContext>());
|
||||
private List<Overseer> overseers;
|
||||
|
||||
|
||||
public MockZKController(String zkAddress, String nodeName, List<Overseer> overseers) throws InterruptedException, TimeoutException, IOException, KeeperException {
|
||||
this.overseers = overseers;
|
||||
this.nodeName = nodeName;
|
||||
|
@ -145,14 +145,14 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
|
||||
zkStateReader = new ZkStateReader(zkClient);
|
||||
zkStateReader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
|
||||
// live node
|
||||
final String nodePath = ZkStateReader.LIVE_NODES_ZKNODE + "/" + nodeName;
|
||||
zkClient.makePath(nodePath, CreateMode.EPHEMERAL, true);
|
||||
}
|
||||
|
||||
private void deleteNode(final String path) {
|
||||
|
||||
|
||||
try {
|
||||
zkClient.delete(path, -1, true);
|
||||
} catch (NoNodeException e) {
|
||||
|
@ -256,7 +256,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
DocCollection dc = zkStateReader.getClusterState().getCollectionOrNull(collection);
|
||||
return getShardId(dc, coreNodeName);
|
||||
}
|
||||
|
||||
|
||||
private String getShardId(DocCollection collection, String coreNodeName) {
|
||||
if (collection == null) return null;
|
||||
Map<String,Slice> slices = collection.getSlicesMap();
|
||||
|
@ -277,50 +277,50 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
public ZkStateReader getZkReader() {
|
||||
return zkStateReader;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() throws Exception {
|
||||
assumeWorkingMockito();
|
||||
|
||||
|
||||
System.setProperty("solr.zkclienttimeout", "30000");
|
||||
|
||||
|
||||
String zkDir = createTempDir("zkData").toFile().getAbsolutePath();
|
||||
|
||||
server = new ZkTestServer(zkDir);
|
||||
server.run();
|
||||
|
||||
|
||||
zkClient = server.getZkClient();
|
||||
|
||||
|
||||
initCore();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
testDone = false;
|
||||
super.setUp();
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
if (null != zkClient) {
|
||||
zkClient.printLayoutToStdOut();
|
||||
zkClient.printLayoutToStream(System.out);
|
||||
}
|
||||
|
||||
|
||||
System.clearProperty("solr.zkclienttimeout");
|
||||
|
||||
|
||||
if (null != server) {
|
||||
server.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
testDone = true;
|
||||
|
||||
|
||||
ExecutorService customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("closeThreadPool"));
|
||||
|
||||
|
||||
for (ZkController zkController : zkControllers) {
|
||||
customThreadPool.submit( () -> zkController.close());
|
||||
}
|
||||
|
@ -332,7 +332,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
for (UpdateShardHandler updateShardHandler : updateShardHandlers) {
|
||||
customThreadPool.submit( () -> updateShardHandler.close());
|
||||
}
|
||||
|
||||
|
||||
for (SolrClient solrClient : solrClients) {
|
||||
customThreadPool.submit( () -> IOUtils.closeQuietly(solrClient));
|
||||
}
|
||||
|
@ -340,22 +340,22 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
for (ZkStateReader reader : readers) {
|
||||
customThreadPool.submit( () -> reader.close());
|
||||
}
|
||||
|
||||
|
||||
for (SolrZkClient solrZkClient : zkClients) {
|
||||
customThreadPool.submit( () -> IOUtils.closeQuietly(solrZkClient));
|
||||
}
|
||||
|
||||
|
||||
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
|
||||
|
||||
|
||||
customThreadPool = ExecutorUtil.newMDCAwareCachedThreadPool(new SolrjNamedThreadFactory("closeThreadPool"));
|
||||
|
||||
|
||||
|
||||
for (Overseer overseer : overseers) {
|
||||
customThreadPool.submit( () -> overseer.close());
|
||||
}
|
||||
|
||||
ExecutorUtil.shutdownAndAwaitTermination(customThreadPool);
|
||||
|
||||
|
||||
overseers.clear();
|
||||
zkControllers.clear();
|
||||
httpShardHandlerFactorys.clear();
|
||||
|
@ -363,10 +363,10 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
solrClients.clear();
|
||||
readers.clear();
|
||||
zkClients.clear();
|
||||
|
||||
|
||||
server.tryCleanSolrZkNode();
|
||||
server.makeSolrZkNode();
|
||||
|
||||
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
@ -378,7 +378,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
|
||||
try {
|
||||
|
||||
|
||||
|
||||
ZkController.createClusterZkNodes(zkClient);
|
||||
|
||||
overseerClient = electNewOverseer(server.getZkAddress());
|
||||
|
@ -397,13 +397,13 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
"createNodeSet", "");
|
||||
ZkDistributedQueue q = overseers.get(0).getStateUpdateQueue();
|
||||
q.offer(Utils.toJSON(m));
|
||||
|
||||
|
||||
for (int i = 0; i < numShards; i++) {
|
||||
assertNotNull("shard got no id?", mockController.publishState(COLLECTION, "core" + (i + 1), "node" + (i + 1), "shard" + ((i % 3) + 1), Replica.State.ACTIVE, 3, true, overseers.get(0)));
|
||||
}
|
||||
|
||||
|
||||
reader.waitForState(COLLECTION, 30, TimeUnit.SECONDS, MiniSolrCloudCluster.expectedShardsAndActiveReplicas(3, 6));
|
||||
|
||||
|
||||
final Map<String, Replica> rmap = reader.getClusterState().getCollection(COLLECTION).getSlice("shard1").getReplicasMap();
|
||||
assertEquals(rmap.toString(), 2, rmap.size());
|
||||
assertEquals(rmap.toString(), 2, reader.getClusterState().getCollection(COLLECTION).getSlice("shard2").getReplicasMap().size());
|
||||
|
@ -444,7 +444,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
assertNotNull("shard got no id?", mockController.publishState(COLLECTION, "core" + (i + 1),
|
||||
"node" + (i + 1), "shard" + ((i % 3) + 1), Replica.State.ACTIVE, 3, true, overseers.get(0)));
|
||||
}
|
||||
|
||||
|
||||
reader.waitForState(COLLECTION, 30, TimeUnit.SECONDS, MiniSolrCloudCluster.expectedShardsAndActiveReplicas(3, 3));
|
||||
|
||||
assertEquals(1, reader.getClusterState().getCollection(COLLECTION).getSlice("shard1").getReplicasMap().size());
|
||||
|
@ -467,7 +467,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
assertNotNull("shard got no id?", mockController.publishState("collection2",
|
||||
"core" + (i + 1), "node" + (i + 1), "shard" + ((i % 3) + 1), Replica.State.ACTIVE, 3, true, overseers.get(0)));
|
||||
}
|
||||
|
||||
|
||||
reader.waitForState("collection2", 30, TimeUnit.SECONDS, MiniSolrCloudCluster.expectedShardsAndActiveReplicas(3, 3));
|
||||
|
||||
assertEquals(1, reader.getClusterState().getCollection("collection2").getSlice("shard1").getReplicasMap().size());
|
||||
|
@ -479,7 +479,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
assertNotNull(reader.getLeaderUrl("collection2", "shard2", 15000));
|
||||
assertNotNull(reader.getLeaderUrl("collection2", "shard3", 15000));
|
||||
}
|
||||
|
||||
|
||||
} finally {
|
||||
if (mockController != null) {
|
||||
mockController.close();
|
||||
|
@ -540,7 +540,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
private void waitForCollections(ZkStateReader stateReader, String... collections) throws InterruptedException, KeeperException, TimeoutException {
|
||||
int maxIterations = 100;
|
||||
while (0 < maxIterations--) {
|
||||
|
||||
|
||||
final ClusterState state = stateReader.getClusterState();
|
||||
Set<String> availableCollections = state.getCollectionsMap().keySet();
|
||||
int availableCount = 0;
|
||||
|
@ -555,15 +555,15 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
log.warn("Timeout waiting for collections: " + Arrays.asList(collections) + " state:" + stateReader.getClusterState());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testStateChange() throws Exception {
|
||||
|
||||
ZkStateReader reader = null;
|
||||
SolrZkClient overseerClient = null;
|
||||
|
||||
|
||||
try {
|
||||
|
||||
|
||||
ZkController.createClusterZkNodes(zkClient);
|
||||
|
||||
reader = new ZkStateReader(zkClient);
|
||||
|
@ -589,9 +589,9 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
ZkStateReader.CORE_NODE_NAME_PROP, "core_node1",
|
||||
ZkStateReader.ROLES_PROP, "",
|
||||
ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
|
||||
|
||||
|
||||
q.offer(Utils.toJSON(m));
|
||||
|
||||
|
||||
waitForCollections(reader, COLLECTION);
|
||||
verifyReplicaStatus(reader, "collection1", "shard1", "core_node1", Replica.State.RECOVERING);
|
||||
|
||||
|
@ -616,7 +616,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
close(reader);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void verifyShardLeader(ZkStateReader reader, String collection, String shard, String expectedCore)
|
||||
throws InterruptedException, KeeperException, TimeoutException {
|
||||
|
||||
|
@ -630,7 +630,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
(docCollection.getLeader(shard) != null) ? docCollection.getLeader(shard).getStr(ZkStateReader.CORE_NAME_PROP)
|
||||
: null);
|
||||
}
|
||||
|
||||
|
||||
private Overseer getOpenOverseer() {
|
||||
return MiniSolrCloudCluster.getOpenOverseer(overseers);
|
||||
}
|
||||
|
@ -641,7 +641,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
SolrZkClient overseerClient = null;
|
||||
ZkStateReader reader = null;
|
||||
MockZKController mockController = null;
|
||||
|
||||
|
||||
try {
|
||||
|
||||
final String core = "core1";
|
||||
|
@ -650,47 +650,47 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
final int numShards = 1;
|
||||
|
||||
ZkController.createClusterZkNodes(zkClient);
|
||||
|
||||
|
||||
reader = new ZkStateReader(zkClient);
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
|
||||
mockController = new MockZKController(server.getZkAddress(), "node1", overseers);
|
||||
|
||||
|
||||
overseerClient = electNewOverseer(server.getZkAddress());
|
||||
|
||||
|
||||
mockController.createCollection(COLLECTION, 1);
|
||||
|
||||
|
||||
ZkController zkController = createMockZkController(server.getZkAddress(), zkClient, reader);
|
||||
|
||||
|
||||
mockController.publishState(COLLECTION, core, core_node, "shard1",
|
||||
Replica.State.RECOVERING, numShards, true, overseers.get(0));
|
||||
|
||||
|
||||
waitForCollections(reader, COLLECTION);
|
||||
verifyReplicaStatus(reader, COLLECTION, "shard1", "core_node1", Replica.State.RECOVERING);
|
||||
|
||||
|
||||
int version = getClusterStateVersion(zkClient);
|
||||
|
||||
|
||||
mockController.publishState(COLLECTION, core, core_node, "shard1", Replica.State.ACTIVE,
|
||||
numShards, true, overseers.get(0));
|
||||
|
||||
|
||||
while (version == getClusterStateVersion(zkClient));
|
||||
|
||||
verifyReplicaStatus(reader, COLLECTION, "shard1", "core_node1", Replica.State.ACTIVE);
|
||||
version = getClusterStateVersion(zkClient);
|
||||
|
||||
|
||||
mockController.publishState(COLLECTION, core, core_node, "shard1",
|
||||
Replica.State.RECOVERING, numShards, true, overseers.get(0));
|
||||
|
||||
|
||||
overseerClient.close();
|
||||
|
||||
|
||||
version = getClusterStateVersion(zkClient);
|
||||
|
||||
|
||||
overseerClient = electNewOverseer(server.getZkAddress());
|
||||
|
||||
|
||||
while (version == getClusterStateVersion(zkClient));
|
||||
|
||||
verifyReplicaStatus(reader, COLLECTION, "shard1", "core_node1", Replica.State.RECOVERING);
|
||||
|
||||
|
||||
assertEquals("Live nodes count does not match", 1, reader
|
||||
.getClusterState().getLiveNodes().size());
|
||||
assertEquals(shard+" replica count does not match", 1, reader.getClusterState()
|
||||
|
@ -701,7 +701,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
|
||||
assertTrue(COLLECTION +" should remain after removal of the last core", // as of SOLR-5209 core removal does not cascade to remove the slice and collection
|
||||
reader.getClusterState().hasCollection(COLLECTION));
|
||||
|
||||
|
||||
reader.waitForState(COLLECTION, 5000,
|
||||
TimeUnit.MILLISECONDS, (liveNodes, collectionState) -> collectionState != null && collectionState.getReplica(core_node) == null);
|
||||
assertTrue(core_node+" should be gone after publishing the null state",
|
||||
|
@ -733,7 +733,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
overseers.get(overseers.size() -1).getZkStateReader().getZkClient().close();
|
||||
}
|
||||
ZkController zkController = createMockZkController(server.getZkAddress(), zkClient, reader);
|
||||
|
||||
|
||||
UpdateShardHandler updateShardHandler = new UpdateShardHandler(UpdateShardHandlerConfig.DEFAULT);
|
||||
updateShardHandlers.add(updateShardHandler);
|
||||
HttpShardHandlerFactory httpShardHandlerFactory = new HttpShardHandlerFactory();
|
||||
|
@ -768,7 +768,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
close(reader);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private AtomicInteger killCounter = new AtomicInteger();
|
||||
|
||||
private class OverseerRestarter implements Runnable{
|
||||
|
@ -779,7 +779,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
public OverseerRestarter(String zkAddress) {
|
||||
this.zkAddress = zkAddress;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
|
@ -861,7 +861,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
Thread.sleep(50);
|
||||
}
|
||||
|
||||
|
||||
assertTrue(showQpeek(workQueue), workQueue.peek() == null);
|
||||
assertTrue(showQpeek(q), q.peek() == null);
|
||||
} finally {
|
||||
|
@ -869,7 +869,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
close(reader);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private String showQpeek(ZkDistributedQueue q) throws KeeperException, InterruptedException {
|
||||
if (q == null) {
|
||||
return "";
|
||||
|
@ -878,7 +878,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
if (bytes == null) {
|
||||
return "";
|
||||
}
|
||||
|
||||
|
||||
ZkNodeProps json = ZkNodeProps.load(bytes);
|
||||
return json.toString();
|
||||
}
|
||||
|
@ -939,9 +939,9 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
mockController2.close();
|
||||
mockController2 = null;
|
||||
}
|
||||
|
||||
|
||||
Thread.sleep(100);
|
||||
|
||||
|
||||
timeout = new TimeOut(1, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (!timeout.hasTimedOut()) {
|
||||
try {
|
||||
|
@ -954,7 +954,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
|
||||
mockController2 = new MockZKController(server.getZkAddress(), "node2", overseers);
|
||||
|
||||
|
||||
timeout = new TimeOut(5, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (!timeout.hasTimedOut()) {
|
||||
try {
|
||||
|
@ -979,7 +979,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
mockController.close();
|
||||
mockController = null;
|
||||
|
@ -993,7 +993,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
ZkCoreNodeProps leaderProps;
|
||||
try {
|
||||
leaderProps = zkController.getLeaderProps(COLLECTION, "shard1", 1000);
|
||||
} catch (SolrException e) {
|
||||
} catch (SolrException e) {
|
||||
return false;
|
||||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
@ -1023,26 +1023,26 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
|
||||
@Test
|
||||
public void testDoubleAssignment() throws Exception {
|
||||
|
||||
|
||||
SolrZkClient overseerClient = null;
|
||||
ZkStateReader reader = null;
|
||||
MockZKController mockController = null;
|
||||
|
||||
|
||||
try {
|
||||
|
||||
|
||||
ZkController.createClusterZkNodes(zkClient);
|
||||
|
||||
|
||||
reader = new ZkStateReader(zkClient);
|
||||
reader.createClusterStateWatchersAndUpdate();
|
||||
|
||||
mockController = new MockZKController(server.getZkAddress(), "node1", overseers);
|
||||
|
||||
|
||||
overseerClient = electNewOverseer(server.getZkAddress());
|
||||
|
||||
mockController.createCollection(COLLECTION, 1);
|
||||
|
||||
|
||||
ZkController zkController = createMockZkController(server.getZkAddress(), zkClient, reader);
|
||||
|
||||
|
||||
mockController.publishState(COLLECTION, "core1", "core_node1", "shard1", Replica.State.RECOVERING, 1, true, overseers.get(0));
|
||||
|
||||
waitForCollections(reader, COLLECTION);
|
||||
|
@ -1052,11 +1052,11 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
mockController.close();
|
||||
|
||||
int version = getClusterStateVersion(zkClient);
|
||||
|
||||
|
||||
mockController = new MockZKController(server.getZkAddress(), "node1", overseers);
|
||||
|
||||
mockController.publishState(COLLECTION, "core1", "core_node1","shard1", Replica.State.RECOVERING, 1, true, overseers.get(0));
|
||||
|
||||
|
||||
try {
|
||||
reader.waitForState(COLLECTION, 5, TimeUnit.SECONDS, (liveNodes, collectionState) -> version == zkController
|
||||
.getZkStateReader().getClusterState().getZkClusterStateVersion());
|
||||
|
@ -1064,7 +1064,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
// okay
|
||||
}
|
||||
ClusterState state = reader.getClusterState();
|
||||
|
||||
|
||||
int numFound = 0;
|
||||
Map<String, DocCollection> collectionsMap = state.getCollectionsMap();
|
||||
for (Map.Entry<String, DocCollection> entry : collectionsMap.entrySet()) {
|
||||
|
@ -1206,13 +1206,13 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
public void testReplay() throws Exception{
|
||||
|
||||
SolrZkClient overseerClient = null;
|
||||
ZkStateReader reader = null;
|
||||
|
||||
|
||||
try {
|
||||
|
||||
ZkController.createClusterZkNodes(zkClient);
|
||||
|
@ -1246,9 +1246,9 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
ZkStateReader.ROLES_PROP, "",
|
||||
ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
|
||||
queue.offer(Utils.toJSON(m));
|
||||
|
||||
|
||||
overseerClient = electNewOverseer(server.getZkAddress());
|
||||
|
||||
|
||||
//submit to proper queue
|
||||
queue = overseers.get(0).getStateUpdateQueue();
|
||||
m = new ZkNodeProps(Overseer.QUEUE_OPERATION, OverseerAction.STATE.toLower(),
|
||||
|
@ -1260,11 +1260,11 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
ZkStateReader.ROLES_PROP, "",
|
||||
ZkStateReader.STATE_PROP, Replica.State.RECOVERING.toString());
|
||||
queue.offer(Utils.toJSON(m));
|
||||
|
||||
|
||||
reader.waitForState(COLLECTION, 1000, TimeUnit.MILLISECONDS,
|
||||
(liveNodes, collectionState) -> collectionState != null && collectionState.getSlice("shard1") != null
|
||||
&& collectionState.getSlice("shard1").getReplicas().size() == 3);
|
||||
|
||||
|
||||
assertNotNull(reader.getClusterState().getCollection(COLLECTION).getSlice("shard1"));
|
||||
assertEquals(3, reader.getClusterState().getCollection(COLLECTION).getSlice("shard1").getReplicasMap().size());
|
||||
} finally {
|
||||
|
@ -1392,7 +1392,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
client.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private int getClusterStateVersion(SolrZkClient controllerClient)
|
||||
throws KeeperException, InterruptedException {
|
||||
return controllerClient.exists(ZkStateReader.CLUSTER_STATE, null, false).getVersion();
|
||||
|
@ -1430,7 +1430,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
|
||||
private ZkController createMockZkController(String zkAddress, SolrZkClient zkClient, ZkStateReader reader) throws InterruptedException, NoSuchFieldException, SecurityException {
|
||||
ZkController zkController = mock(ZkController.class);
|
||||
|
||||
|
||||
if (zkClient == null) {
|
||||
SolrZkClient newZkClient = new SolrZkClient(server.getZkAddress(), AbstractZkTestCase.TIMEOUT);
|
||||
Mockito.doAnswer(
|
||||
|
@ -1443,7 +1443,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
} else {
|
||||
doNothing().when(zkController).close();
|
||||
}
|
||||
|
||||
|
||||
CoreContainer mockAlwaysUpCoreContainer = mock(CoreContainer.class,
|
||||
Mockito.withSettings().defaultAnswer(Mockito.CALLS_REAL_METHODS));
|
||||
when(mockAlwaysUpCoreContainer.isShutDown()).thenReturn(testDone); // Allow retry on session expiry
|
||||
|
@ -1455,7 +1455,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
when(zkController.getZkStateReader()).thenReturn(reader);
|
||||
|
||||
when(zkController.getLeaderProps(anyString(), anyString(), anyInt())).thenCallRealMethod();
|
||||
when(zkController.getLeaderProps(anyString(), anyString(), anyInt(), anyBoolean())).thenCallRealMethod();
|
||||
when(zkController.getLeaderProps(anyString(), anyString(), anyInt(), anyBoolean())).thenCallRealMethod();
|
||||
doReturn(getCloudDataProvider(zkAddress, zkClient, reader))
|
||||
.when(zkController).getSolrCloudManager();
|
||||
return zkController;
|
||||
|
@ -1559,7 +1559,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
ZkStateReader.CORE_NODE_NAME_PROP, "core_node"+N);
|
||||
|
||||
q.offer(Utils.toJSON(m));
|
||||
|
||||
|
||||
{
|
||||
String shard = "shard"+ss;
|
||||
zkStateReader.waitForState(COLLECTION, 15000, TimeUnit.MILLISECONDS, (liveNodes, collectionState) -> collectionState != null && (collectionState.getSlice(shard) == null || collectionState.getSlice(shard).getReplicasMap().get("core_node"+N) == null));
|
||||
|
@ -1582,7 +1582,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
close(zkStateReader);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testLatchWatcher() throws InterruptedException {
|
||||
OverseerTaskQueue.LatchWatcher latch1 = new OverseerTaskQueue.LatchWatcher();
|
||||
|
@ -1596,7 +1596,7 @@ public class OverseerTest extends SolrTestCaseJ4 {
|
|||
latch1.await(10000);// Expecting no wait
|
||||
after = System.nanoTime();
|
||||
assertTrue(TimeUnit.NANOSECONDS.toMillis(after-before) < 1000);
|
||||
|
||||
|
||||
final AtomicBoolean expectedEventProcessed = new AtomicBoolean(false);
|
||||
final AtomicBoolean doneWaiting = new AtomicBoolean(false);
|
||||
final OverseerTaskQueue.LatchWatcher latch2 = new OverseerTaskQueue.LatchWatcher(Event.EventType.NodeCreated);
|
||||
|
|
|
@ -61,7 +61,7 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
|
|||
assertNotNull(leader);
|
||||
log.info("Current overseer leader = {}", leader);
|
||||
|
||||
cloudClient.getZkStateReader().getZkClient().printLayoutToStdOut();
|
||||
cloudClient.getZkStateReader().getZkClient().printLayoutToStream(System.out);
|
||||
|
||||
int numDesignateOverseers = TEST_NIGHTLY ? 16 : 2;
|
||||
numDesignateOverseers = Math.max(getShardCount(), numDesignateOverseers);
|
||||
|
@ -78,7 +78,7 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
|
|||
|
||||
waitUntilOverseerDesignateIsLeader(cloudClient.getZkStateReader().getZkClient(), designates, MAX_WAIT_TIME);
|
||||
|
||||
cloudClient.getZkStateReader().getZkClient().printLayoutToStdOut();
|
||||
cloudClient.getZkStateReader().getZkClient().printLayoutToStream(System.out);
|
||||
|
||||
boolean sawLiveDesignate = false;
|
||||
int numRestarts = 1 + random().nextInt(TEST_NIGHTLY ? 12 : 2);
|
||||
|
@ -111,20 +111,20 @@ public class RollingRestartTest extends AbstractFullDistribZkTestBase {
|
|||
"/overseer_elect/election"));
|
||||
fail("No overseer leader found after restart #" + (i + 1) + ": " + leader);
|
||||
}
|
||||
|
||||
|
||||
cloudClient.getZkStateReader().updateLiveNodes();
|
||||
sawLiveDesignate = CollectionUtils.intersection(cloudClient.getZkStateReader().getClusterState().getLiveNodes(), designates).size() > 0;
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
assertTrue("Test may not be working if we never saw a live designate", sawLiveDesignate);
|
||||
|
||||
leader = OverseerCollectionConfigSetProcessor.getLeaderNode(cloudClient.getZkStateReader().getZkClient());
|
||||
assertNotNull(leader);
|
||||
log.info("Current overseer leader (after restart) = {}", leader);
|
||||
|
||||
cloudClient.getZkStateReader().getZkClient().printLayoutToStdOut();
|
||||
cloudClient.getZkStateReader().getZkClient().printLayoutToStream(System.out);
|
||||
}
|
||||
|
||||
static boolean waitUntilOverseerDesignateIsLeader(SolrZkClient testZkClient, List<String> overseerDesignates, long timeoutInNanos) throws KeeperException, InterruptedException {
|
||||
|
|
|
@ -70,12 +70,12 @@ import com.carrotsearch.randomizedtesting.annotations.Repeat;
|
|||
@Slow
|
||||
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
|
||||
public class TestPullReplica extends SolrCloudTestCase {
|
||||
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
|
||||
private String collectionName = null;
|
||||
private final static int REPLICATION_TIMEOUT_SECS = 30;
|
||||
|
||||
|
||||
private String suggestedCollectionName() {
|
||||
return (getTestClass().getSimpleName().replace("Test", "") + "_" + getSaferTestName().split(" ")[0]).replaceAll("(.)(\\p{Upper})", "$1_$2").toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
@ -85,8 +85,8 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
// cloudSolrClientMaxStaleRetries
|
||||
System.setProperty("cloudSolrClientMaxStaleRetries", "1");
|
||||
System.setProperty("zkReaderGetLeaderRetryTimeoutMs", "1000");
|
||||
|
||||
configureCluster(2) // 2 + random().nextInt(3)
|
||||
|
||||
configureCluster(2) // 2 + random().nextInt(3)
|
||||
.addConfig("conf", configset("cloud-minimal"))
|
||||
.configure();
|
||||
Boolean useLegacyCloud = rarely();
|
||||
|
@ -95,18 +95,18 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
CollectionAdminResponse response = clusterPropRequest.process(cluster.getSolrClient());
|
||||
assertEquals(0, response.getStatus());
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownCluster() {
|
||||
System.clearProperty("cloudSolrClientMaxStaleRetries");
|
||||
System.clearProperty("zkReaderGetLeaderRetryTimeoutMs");
|
||||
TestInjection.reset();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
|
||||
|
||||
collectionName = suggestedCollectionName();
|
||||
expectThrows(SolrException.class, () -> getCollectionState(collectionName));
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
}
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
||||
@Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection
|
||||
// commented out on: 17-Feb-2019 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // 21-May-2018
|
||||
public void testCreateDelete() throws Exception {
|
||||
|
@ -181,7 +181,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
// read-only replicas can never become leaders
|
||||
assertFalse(s.getLeader().getType() == Replica.Type.PULL);
|
||||
List<String> shardElectionNodes = cluster.getZkClient().getChildren(ZkStateReader.getShardLeadersElectPath(collectionName, s.getName()), null, true);
|
||||
assertEquals("Unexpected election nodes for Shard: " + s.getName() + ": " + Arrays.toString(shardElectionNodes.toArray()),
|
||||
assertEquals("Unexpected election nodes for Shard: " + s.getName() + ": " + Arrays.toString(shardElectionNodes.toArray()),
|
||||
1, shardElectionNodes.size());
|
||||
}
|
||||
assertUlogPresence(docCollection);
|
||||
|
@ -196,10 +196,10 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
zkClient().printLayoutToStdOut();
|
||||
zkClient().printLayoutToStream(System.out);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Asserts that Update logs don't exist for replicas of type {@link org.apache.solr.common.cloud.Replica.Type#PULL}
|
||||
*/
|
||||
|
@ -221,7 +221,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
// 12-Jun-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028")
|
||||
public void testAddDocs() throws Exception {
|
||||
|
@ -232,19 +232,19 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
waitForState("Expected collection to be created with 1 shard and " + (numPullReplicas + 1) + " replicas", collectionName, clusterShape(1, numPullReplicas + 1));
|
||||
DocCollection docCollection = assertNumberOfReplicas(1, 0, numPullReplicas, false, true);
|
||||
assertEquals(1, docCollection.getSlices().size());
|
||||
|
||||
|
||||
boolean reloaded = false;
|
||||
int numDocs = 0;
|
||||
while (true) {
|
||||
numDocs++;
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", String.valueOf(numDocs), "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
|
||||
|
||||
Slice s = docCollection.getSlices().iterator().next();
|
||||
try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
|
||||
assertEquals(numDocs, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
}
|
||||
|
||||
|
||||
TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
for (Replica r:s.getReplicas(EnumSet.of(Replica.Type.PULL))) {
|
||||
//TODO: assert replication < REPLICATION_TIMEOUT_SECS
|
||||
|
@ -266,7 +266,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
"qt", "/admin/plugins",
|
||||
"stats", "true");
|
||||
QueryResponse statsResponse = pullReplicaClient.query(req);
|
||||
assertEquals("Replicas shouldn't process the add document request: " + statsResponse,
|
||||
assertEquals("Replicas shouldn't process the add document request: " + statsResponse,
|
||||
0L, ((Map<String, Object>)((NamedList<Object>)statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.adds"));
|
||||
}
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
}
|
||||
assertUlogPresence(docCollection);
|
||||
}
|
||||
|
||||
|
||||
public void testAddRemovePullReplica() throws Exception {
|
||||
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1, 0, 0)
|
||||
.setMaxShardsPerNode(100)
|
||||
|
@ -290,23 +290,23 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
waitForState("Expected collection to be created with 2 shards and 1 replica each", collectionName, clusterShape(2, 2));
|
||||
DocCollection docCollection = assertNumberOfReplicas(2, 0, 0, false, true);
|
||||
assertEquals(2, docCollection.getSlices().size());
|
||||
|
||||
|
||||
addReplicaToShard("shard1", Replica.Type.PULL);
|
||||
docCollection = assertNumberOfReplicas(2, 0, 1, true, false);
|
||||
addReplicaToShard("shard2", Replica.Type.PULL);
|
||||
docCollection = assertNumberOfReplicas(2, 0, 2, true, false);
|
||||
|
||||
|
||||
waitForState("Expecting collection to have 2 shards and 2 replica each", collectionName, clusterShape(2, 4));
|
||||
|
||||
|
||||
//Delete pull replica from shard1
|
||||
CollectionAdminRequest.deleteReplica(
|
||||
collectionName,
|
||||
"shard1",
|
||||
collectionName,
|
||||
"shard1",
|
||||
docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getName())
|
||||
.process(cluster.getSolrClient());
|
||||
assertNumberOfReplicas(2, 0, 1, true, true);
|
||||
}
|
||||
|
||||
|
||||
public void testRemoveAllWriterReplicas() throws Exception {
|
||||
doTestNoLeader(true);
|
||||
}
|
||||
|
@ -316,14 +316,14 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
public void testKillLeader() throws Exception {
|
||||
doTestNoLeader(false);
|
||||
}
|
||||
|
||||
|
||||
@Ignore("Ignore until I figure out a way to reliably record state transitions")
|
||||
public void testPullReplicaStates() throws Exception {
|
||||
// Validate that pull replicas go through the correct states when starting, stopping, reconnecting
|
||||
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, 0)
|
||||
.setMaxShardsPerNode(100)
|
||||
.process(cluster.getSolrClient());
|
||||
// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
|
||||
// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
|
||||
waitForState("Replica not added", collectionName, activeReplicaCount(1, 0, 0));
|
||||
addDocs(500);
|
||||
List<Replica.State> statesSeen = new ArrayList<>(3);
|
||||
|
@ -339,14 +339,14 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
});
|
||||
CollectionAdminRequest.addReplicaToShard(collectionName, "shard1", Replica.Type.PULL).process(cluster.getSolrClient());
|
||||
waitForState("Replica not added", collectionName, activeReplicaCount(1, 0, 1));
|
||||
zkClient().printLayoutToStdOut();
|
||||
zkClient().printLayoutToStream(System.out);
|
||||
log.info("Saw states: " + Arrays.toString(statesSeen.toArray()));
|
||||
assertEquals("Expecting DOWN->RECOVERING->ACTIVE but saw: " + Arrays.toString(statesSeen.toArray()), 3, statesSeen.size());
|
||||
assertEquals("Expecting DOWN->RECOVERING->ACTIVE but saw: " + Arrays.toString(statesSeen.toArray()), Replica.State.DOWN, statesSeen.get(0));
|
||||
assertEquals("Expecting DOWN->RECOVERING->ACTIVE but saw: " + Arrays.toString(statesSeen.toArray()), Replica.State.RECOVERING, statesSeen.get(0));
|
||||
assertEquals("Expecting DOWN->RECOVERING->ACTIVE but saw: " + Arrays.toString(statesSeen.toArray()), Replica.State.ACTIVE, statesSeen.get(0));
|
||||
}
|
||||
|
||||
|
||||
public void testRealTimeGet() throws SolrServerException, IOException, KeeperException, InterruptedException {
|
||||
// should be redirected to Replica.Type.NRT
|
||||
int numReplicas = random().nextBoolean()?1:2;
|
||||
|
@ -389,7 +389,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
id++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* validate that replication still happens on a new leader
|
||||
*/
|
||||
|
@ -399,7 +399,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
.process(cluster.getSolrClient());
|
||||
waitForState("Expected collection to be created with 1 shard and 2 replicas", collectionName, clusterShape(1, 2));
|
||||
DocCollection docCollection = assertNumberOfReplicas(1, 0, 1, false, true);
|
||||
|
||||
|
||||
// Add a document and commit
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
|
@ -407,16 +407,16 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
|
||||
assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
}
|
||||
|
||||
|
||||
waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)));
|
||||
|
||||
|
||||
// Delete leader replica from shard1
|
||||
ignoreException("No registered leader was found"); //These are expected
|
||||
JettySolrRunner leaderJetty = null;
|
||||
if (removeReplica) {
|
||||
CollectionAdminRequest.deleteReplica(
|
||||
collectionName,
|
||||
"shard1",
|
||||
collectionName,
|
||||
"shard1",
|
||||
s.getLeader().getName())
|
||||
.process(cluster.getSolrClient());
|
||||
} else {
|
||||
|
@ -424,15 +424,15 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
leaderJetty.stop();
|
||||
waitForState("Leader replica not removed", collectionName, clusterShape(1, 1));
|
||||
// Wait for cluster state to be updated
|
||||
waitForState("Replica state not updated in cluster state",
|
||||
waitForState("Replica state not updated in cluster state",
|
||||
collectionName, clusterStateReflectsActiveAndDownReplicas());
|
||||
}
|
||||
docCollection = assertNumberOfReplicas(0, 0, 1, true, true);
|
||||
|
||||
|
||||
// Check that there is no leader for the shard
|
||||
Replica leader = docCollection.getSlice("shard1").getLeader();
|
||||
assertTrue(leader == null || !leader.isActive(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()));
|
||||
|
||||
|
||||
// Pull replica on the other hand should be active
|
||||
Replica pullReplica = docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.PULL)).get(0);
|
||||
assertTrue(pullReplica.isActive(cluster.getSolrClient().getZkStateReader().getClusterState().getLiveNodes()));
|
||||
|
@ -442,7 +442,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
highestTerm = zkShardTerms.getHighestTerm();
|
||||
}
|
||||
// add document, this should fail since there is no leader. Pull replica should not accept the update
|
||||
expectThrows(SolrException.class, () ->
|
||||
expectThrows(SolrException.class, () ->
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"))
|
||||
);
|
||||
if (removeReplica) {
|
||||
|
@ -450,10 +450,10 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
assertEquals(highestTerm, zkShardTerms.getHighestTerm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Also fails if I send the update to the pull replica explicitly
|
||||
try (HttpSolrClient pullReplicaClient = getHttpSolrClient(docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).get(0).getCoreUrl())) {
|
||||
expectThrows(SolrException.class, () ->
|
||||
expectThrows(SolrException.class, () ->
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"))
|
||||
);
|
||||
}
|
||||
|
@ -462,7 +462,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
assertEquals(highestTerm, zkShardTerms.getHighestTerm());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Queries should still work
|
||||
waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)));
|
||||
// Add nrt replica back. Since there is no nrt now, new nrt will have no docs. There will be data loss, since the it will become the leader
|
||||
|
@ -487,7 +487,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
// Pull replicas will replicate the empty index if a new replica was added and becomes leader
|
||||
waitForNumDocsInAllReplicas(0, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)));
|
||||
}
|
||||
|
||||
|
||||
// add docs agin
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"));
|
||||
s = docCollection.getSlices().iterator().next();
|
||||
|
@ -498,49 +498,49 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)), "id:2");
|
||||
waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)));
|
||||
}
|
||||
|
||||
|
||||
public void testKillPullReplica() throws Exception {
|
||||
CollectionAdminRequest.createCollection(collectionName, "conf", 1, 1, 0, 1)
|
||||
.setMaxShardsPerNode(100)
|
||||
.process(cluster.getSolrClient());
|
||||
// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
|
||||
// cluster.getSolrClient().getZkStateReader().registerCore(collectionName); //TODO: Is this needed?
|
||||
waitForState("Expected collection to be created with 1 shard and 2 replicas", collectionName, clusterShape(1, 2));
|
||||
DocCollection docCollection = assertNumberOfReplicas(1, 0, 1, false, true);
|
||||
assertEquals(1, docCollection.getSlices().size());
|
||||
|
||||
|
||||
waitForNumDocsInAllActiveReplicas(0);
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
waitForNumDocsInAllActiveReplicas(1);
|
||||
|
||||
|
||||
JettySolrRunner pullReplicaJetty = cluster.getReplicaJetty(docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.PULL)).get(0));
|
||||
pullReplicaJetty.stop();
|
||||
waitForState("Replica not removed", collectionName, activeReplicaCount(1, 0, 0));
|
||||
// Also wait for the replica to be placed in state="down"
|
||||
waitForState("Didn't update state", collectionName, clusterStateReflectsActiveAndDownReplicas());
|
||||
|
||||
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
waitForNumDocsInAllActiveReplicas(2);
|
||||
|
||||
|
||||
pullReplicaJetty.start();
|
||||
waitForState("Replica not added", collectionName, activeReplicaCount(1, 0, 1));
|
||||
waitForNumDocsInAllActiveReplicas(2);
|
||||
}
|
||||
|
||||
|
||||
public void testSearchWhileReplicationHappens() {
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void waitForNumDocsInAllActiveReplicas(int numDocs) throws IOException, SolrServerException, InterruptedException {
|
||||
DocCollection docCollection = getCollectionState(collectionName);
|
||||
waitForNumDocsInAllReplicas(numDocs, docCollection.getReplicas().stream().filter(r -> r.getState() == Replica.State.ACTIVE).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
|
||||
private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas) throws IOException, SolrServerException, InterruptedException {
|
||||
waitForNumDocsInAllReplicas(numDocs, replicas, "*:*");
|
||||
}
|
||||
|
||||
|
||||
private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas, String query) throws IOException, SolrServerException, InterruptedException {
|
||||
TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
for (Replica r:replicas) {
|
||||
|
@ -561,7 +561,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
|
||||
TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
|
||||
|
@ -575,25 +575,25 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
} catch(SolrException e) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private DocCollection assertNumberOfReplicas(int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
|
||||
if (updateCollection) {
|
||||
cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
|
||||
}
|
||||
DocCollection docCollection = getCollectionState(collectionName);
|
||||
assertNotNull(docCollection);
|
||||
assertEquals("Unexpected number of writer replicas: " + docCollection, numNrtReplicas,
|
||||
assertEquals("Unexpected number of writer replicas: " + docCollection, numNrtReplicas,
|
||||
docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
|
||||
assertEquals("Unexpected number of pull replicas: " + docCollection, numPullReplicas,
|
||||
assertEquals("Unexpected number of pull replicas: " + docCollection, numPullReplicas,
|
||||
docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
|
||||
assertEquals("Unexpected number of active replicas: " + docCollection, numTlogReplicas,
|
||||
assertEquals("Unexpected number of active replicas: " + docCollection, numTlogReplicas,
|
||||
docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
|
||||
return docCollection;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* passes only if all replicas are active or down, and the "liveNodes" reflect the same status
|
||||
*/
|
||||
|
@ -613,8 +613,8 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
private CollectionStatePredicate activeReplicaCount(int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
|
||||
return (liveNodes, collectionState) -> {
|
||||
int nrtFound = 0, tlogFound = 0, pullFound = 0;
|
||||
|
@ -641,7 +641,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
return numNrtReplicas == nrtFound && numTlogReplicas == tlogFound && numPullReplicas == pullFound;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
private void addDocs(int numDocs) throws SolrServerException, IOException {
|
||||
List<SolrInputDocument> docs = new ArrayList<>(numDocs);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
|
@ -650,7 +650,7 @@ public class TestPullReplica extends SolrCloudTestCase {
|
|||
cluster.getSolrClient().add(collectionName, docs);
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
}
|
||||
|
||||
|
||||
private void addReplicaToShard(String shardName, Replica.Type type) throws ClientProtocolException, IOException, SolrServerException {
|
||||
switch (random().nextInt(3)) {
|
||||
case 0: // Add replica with SolrJ
|
||||
|
|
|
@ -78,12 +78,12 @@ import org.slf4j.LoggerFactory;
|
|||
@Slow
|
||||
@AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/SOLR-12313")
|
||||
public class TestTlogReplica extends SolrCloudTestCase {
|
||||
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
|
||||
private String collectionName = null;
|
||||
private final static int REPLICATION_TIMEOUT_SECS = 10;
|
||||
|
||||
|
||||
private String suggestedCollectionName() {
|
||||
return (getTestClass().getSimpleName().replace("Test", "") + "_" + getSaferTestName().split(" ")[0]).replaceAll("(.)(\\p{Upper})", "$1_$2").toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
@ -99,12 +99,12 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
CollectionAdminResponse response = clusterPropRequest.process(cluster.getSolrClient());
|
||||
assertEquals(0, response.getStatus());
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownCluster() {
|
||||
TestInjection.reset();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
|
@ -127,7 +127,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Asserts that Update logs exist for replicas of type {@link org.apache.solr.common.cloud.Replica.Type#NRT}, but not
|
||||
* for replicas of type {@link org.apache.solr.common.cloud.Replica.Type#PULL}
|
||||
|
@ -139,7 +139,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
try {
|
||||
core = cluster.getReplicaJetty(r).getCoreContainer().getCore(r.getCoreName());
|
||||
assertNotNull(core);
|
||||
assertTrue("Update log should exist for replicas of type Append",
|
||||
assertTrue("Update log should exist for replicas of type Append",
|
||||
new java.io.File(core.getUlogDir()).exists());
|
||||
} finally {
|
||||
core.close();
|
||||
|
@ -147,7 +147,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection
|
||||
// commented out on: 17-Feb-2019 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 09-Aug-2018
|
||||
public void testCreateDelete() throws Exception {
|
||||
|
@ -188,7 +188,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
cluster.waitForActiveCollection(collectionName, 2, 8);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
boolean reloaded = false;
|
||||
while (true) {
|
||||
DocCollection docCollection = getCollectionState(collectionName);
|
||||
|
@ -206,7 +206,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
for (Slice s:docCollection.getSlices()) {
|
||||
assertTrue(s.getLeader().getType() == Replica.Type.TLOG);
|
||||
List<String> shardElectionNodes = cluster.getZkClient().getChildren(ZkStateReader.getShardLeadersElectPath(collectionName, s.getName()), null, true);
|
||||
assertEquals("Unexpected election nodes for Shard: " + s.getName() + ": " + Arrays.toString(shardElectionNodes.toArray()),
|
||||
assertEquals("Unexpected election nodes for Shard: " + s.getName() + ": " + Arrays.toString(shardElectionNodes.toArray()),
|
||||
4, shardElectionNodes.size());
|
||||
}
|
||||
assertUlogPresence(docCollection);
|
||||
|
@ -222,24 +222,24 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
zkClient().printLayoutToStdOut();
|
||||
zkClient().printLayoutToStream(System.out);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testAddDocs() throws Exception {
|
||||
int numTlogReplicas = 1 + random().nextInt(3);
|
||||
DocCollection docCollection = createAndWaitForCollection(1, 0, numTlogReplicas, 0);
|
||||
assertEquals(1, docCollection.getSlices().size());
|
||||
|
||||
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
|
||||
|
||||
Slice s = docCollection.getSlices().iterator().next();
|
||||
try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
|
||||
assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
}
|
||||
|
||||
|
||||
TimeOut t = new TimeOut(REPLICATION_TIMEOUT_SECS, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
for (Replica r:s.getReplicas(EnumSet.of(Replica.Type.TLOG))) {
|
||||
//TODO: assert replication < REPLICATION_TIMEOUT_SECS
|
||||
|
@ -253,7 +253,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
"qt", "/admin/plugins",
|
||||
"stats", "true");
|
||||
QueryResponse statsResponse = tlogReplicaClient.query(req);
|
||||
assertEquals("Append replicas should recive all updates. Replica: " + r + ", response: " + statsResponse,
|
||||
assertEquals("Append replicas should recive all updates. Replica: " + r + ", response: " + statsResponse,
|
||||
1L, ((Map<String, Object>)((NamedList<Object>)statsResponse.getResponse()).findRecursive("plugins", "UPDATE", "updateHandler", "stats")).get("UPDATE.updateHandler.cumulativeAdds.count"));
|
||||
break;
|
||||
} catch (AssertionError e) {
|
||||
|
@ -268,27 +268,27 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
assertUlogPresence(docCollection);
|
||||
}
|
||||
|
||||
|
||||
public void testAddRemoveTlogReplica() throws Exception {
|
||||
DocCollection docCollection = createAndWaitForCollection(2, 0, 1, 0);
|
||||
assertEquals(2, docCollection.getSlices().size());
|
||||
|
||||
|
||||
addReplicaToShard("shard1", Replica.Type.TLOG);
|
||||
docCollection = assertNumberOfReplicas(0, 3, 0, true, false);
|
||||
addReplicaToShard("shard2", Replica.Type.TLOG);
|
||||
docCollection = assertNumberOfReplicas(0, 4, 0, true, false);
|
||||
|
||||
|
||||
waitForState("Expecting collection to have 2 shards and 2 replica each", collectionName, clusterShape(2, 4));
|
||||
|
||||
|
||||
//Delete tlog replica from shard1
|
||||
CollectionAdminRequest.deleteReplica(
|
||||
collectionName,
|
||||
"shard1",
|
||||
collectionName,
|
||||
"shard1",
|
||||
docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.TLOG)).get(0).getName())
|
||||
.process(cluster.getSolrClient());
|
||||
assertNumberOfReplicas(0, 3, 0, true, true);
|
||||
}
|
||||
|
||||
|
||||
private void addReplicaToShard(String shardName, Replica.Type type) throws ClientProtocolException, IOException, SolrServerException {
|
||||
switch (random().nextInt(3)) {
|
||||
case 0: // Add replica with SolrJ
|
||||
|
@ -296,8 +296,8 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
assertEquals("Unexpected response status: " + response.getStatus(), 0, response.getStatus());
|
||||
break;
|
||||
case 1: // Add replica with V1 API
|
||||
String url = String.format(Locale.ROOT, "%s/admin/collections?action=ADDREPLICA&collection=%s&shard=%s&type=%s",
|
||||
cluster.getRandomJetty(random()).getBaseUrl(),
|
||||
String url = String.format(Locale.ROOT, "%s/admin/collections?action=ADDREPLICA&collection=%s&shard=%s&type=%s",
|
||||
cluster.getRandomJetty(random()).getBaseUrl(),
|
||||
collectionName,
|
||||
shardName,
|
||||
type);
|
||||
|
@ -306,10 +306,10 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
assertEquals(200, httpResponse.getStatusLine().getStatusCode());
|
||||
break;
|
||||
case 2:// Add replica with V2 API
|
||||
url = String.format(Locale.ROOT, "%s/____v2/c/%s/shards",
|
||||
cluster.getRandomJetty(random()).getBaseUrl(),
|
||||
url = String.format(Locale.ROOT, "%s/____v2/c/%s/shards",
|
||||
cluster.getRandomJetty(random()).getBaseUrl(),
|
||||
collectionName);
|
||||
String requestBody = String.format(Locale.ROOT, "{add-replica:{shard:%s, type:%s}}",
|
||||
String requestBody = String.format(Locale.ROOT, "{add-replica:{shard:%s, type:%s}}",
|
||||
shardName,
|
||||
type);
|
||||
HttpPost addReplicaPost = new HttpPost(url);
|
||||
|
@ -320,15 +320,15 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void testRemoveLeader() throws Exception {
|
||||
doReplaceLeader(true);
|
||||
}
|
||||
|
||||
|
||||
public void testKillLeader() throws Exception {
|
||||
doReplaceLeader(false);
|
||||
}
|
||||
|
||||
|
||||
public void testRealTimeGet() throws SolrServerException, IOException, KeeperException, InterruptedException {
|
||||
// should be redirected to Replica.Type.REALTIME
|
||||
int numReplicas = random().nextBoolean()?1:2;
|
||||
|
@ -373,13 +373,13 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
id++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* validate leader election and that replication still happens on a new leader
|
||||
*/
|
||||
private void doReplaceLeader(boolean removeReplica) throws Exception {
|
||||
DocCollection docCollection = createAndWaitForCollection(1, 0, 2, 0);
|
||||
|
||||
|
||||
// Add a document and commit
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
|
@ -387,15 +387,15 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
try (HttpSolrClient leaderClient = getHttpSolrClient(s.getLeader().getCoreUrl())) {
|
||||
assertEquals(1, leaderClient.query(new SolrQuery("*:*")).getResults().getNumFound());
|
||||
}
|
||||
|
||||
|
||||
waitForNumDocsInAllReplicas(1, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)), REPLICATION_TIMEOUT_SECS);
|
||||
|
||||
|
||||
// Delete leader replica from shard1
|
||||
JettySolrRunner leaderJetty = null;
|
||||
if (removeReplica) {
|
||||
CollectionAdminRequest.deleteReplica(
|
||||
collectionName,
|
||||
"shard1",
|
||||
collectionName,
|
||||
"shard1",
|
||||
s.getLeader().getName())
|
||||
.process(cluster.getSolrClient());
|
||||
} else {
|
||||
|
@ -403,11 +403,11 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
leaderJetty.stop();
|
||||
waitForState("Leader replica not removed", collectionName, clusterShape(1, 1));
|
||||
// Wait for cluster state to be updated
|
||||
waitForState("Replica state not updated in cluster state",
|
||||
waitForState("Replica state not updated in cluster state",
|
||||
collectionName, clusterStateReflectsActiveAndDownReplicas());
|
||||
}
|
||||
docCollection = assertNumberOfReplicas(0, 1, 0, true, true);
|
||||
|
||||
|
||||
// Wait until a new leader is elected
|
||||
TimeOut t = new TimeOut(30, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (!t.hasTimedOut()) {
|
||||
|
@ -419,11 +419,11 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
Thread.sleep(500);
|
||||
}
|
||||
assertFalse("Timeout waiting for a new leader to be elected", t.hasTimedOut());
|
||||
|
||||
|
||||
// There is a new leader, I should be able to add and commit
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "zoo"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
|
||||
|
||||
// Queries should still work
|
||||
waitForNumDocsInAllReplicas(2, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)), REPLICATION_TIMEOUT_SECS);
|
||||
// Start back the node
|
||||
|
@ -436,25 +436,25 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
// added replica should replicate from the leader
|
||||
waitForNumDocsInAllReplicas(2, docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)), REPLICATION_TIMEOUT_SECS);
|
||||
}
|
||||
|
||||
|
||||
public void testKillTlogReplica() throws Exception {
|
||||
DocCollection docCollection = createAndWaitForCollection(1, 0, 2, 0);
|
||||
|
||||
|
||||
waitForNumDocsInAllActiveReplicas(0);
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "1", "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
waitForNumDocsInAllActiveReplicas(1);
|
||||
|
||||
|
||||
JettySolrRunner pullReplicaJetty = cluster.getReplicaJetty(docCollection.getSlice("shard1").getReplicas(EnumSet.of(Replica.Type.TLOG)).get(0));
|
||||
pullReplicaJetty.stop();
|
||||
waitForState("Replica not removed", collectionName, activeReplicaCount(0, 1, 0));
|
||||
// // Also wait for the replica to be placed in state="down"
|
||||
// waitForState("Didn't update state", collectionName, clusterStateReflectsActiveAndDownReplicas());
|
||||
|
||||
|
||||
cluster.getSolrClient().add(collectionName, new SolrInputDocument("id", "2", "foo", "bar"));
|
||||
cluster.getSolrClient().commit(collectionName);
|
||||
waitForNumDocsInAllActiveReplicas(2);
|
||||
|
||||
|
||||
pullReplicaJetty.start();
|
||||
waitForState("Replica not added", collectionName, activeReplicaCount(0, 2, 0));
|
||||
waitForNumDocsInAllActiveReplicas(2);
|
||||
|
@ -464,7 +464,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
// Removed BadApple on 2018-05-21
|
||||
public void testOnlyLeaderIndexes() throws Exception {
|
||||
createAndWaitForCollection(1, 0, 2, 0);
|
||||
|
||||
|
||||
CloudSolrClient cloudClient = cluster.getSolrClient();
|
||||
new UpdateRequest()
|
||||
.add(sdoc("id", "1"))
|
||||
|
@ -527,11 +527,11 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
checkRTG(120,150, cluster.getJettySolrRunners());
|
||||
waitForReplicasCatchUp(20);
|
||||
}
|
||||
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testRecovery() throws Exception {
|
||||
createAndWaitForCollection(1, 0, 2, 0);
|
||||
|
||||
|
||||
CloudSolrClient cloudClient = cluster.getSolrClient();
|
||||
new UpdateRequest()
|
||||
.add(sdoc("id", "3"))
|
||||
|
@ -551,7 +551,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
// We skip peerSync, so replica will always trigger commit on leader
|
||||
// We query only the non-leader replicas, since we haven't opened a new searcher on the leader yet
|
||||
waitForNumDocsInAllReplicas(4, getNonLeaderReplias(collectionName), 10); //timeout for stale collection state
|
||||
|
||||
|
||||
// If I add the doc immediately, the leader fails to communicate with the follower with broken pipe.
|
||||
// Options are, wait or retry...
|
||||
for (int i = 0; i < 3; i++) {
|
||||
|
@ -616,12 +616,12 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
iwRef.decref();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private List<Replica> getNonLeaderReplias(String collectionName) {
|
||||
return getCollectionState(collectionName).getReplicas().stream().filter(
|
||||
(r)-> !r.getBool("leader", false)).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
|
||||
public void testDeleteById() throws Exception{
|
||||
createAndWaitForCollection(1,0,2,0);
|
||||
CloudSolrClient cloudClient = cluster.getSolrClient();
|
||||
|
@ -644,7 +644,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
assertFalse("Doc1 is deleted but it's still exist", successs);
|
||||
}
|
||||
|
||||
|
||||
public void testBasicLeaderElection() throws Exception {
|
||||
createAndWaitForCollection(1,0,2,0);
|
||||
CloudSolrClient cloudClient = cluster.getSolrClient();
|
||||
|
@ -669,7 +669,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
.commit(cloudClient, collectionName);
|
||||
waitForNumDocsInAllActiveReplicas(4, 0);
|
||||
}
|
||||
|
||||
|
||||
public void testOutOfOrderDBQWithInPlaceUpdates() throws Exception {
|
||||
createAndWaitForCollection(1,0,2,0);
|
||||
assertFalse(getSolrCore(true).get(0).getLatestSchema().getField("inplace_updatable_int").indexed());
|
||||
|
@ -703,7 +703,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
SolrDocument doc = cluster.getSolrClient().getById(collectionName,"1");
|
||||
assertNotNull(doc.get("title_s"));
|
||||
}
|
||||
|
||||
|
||||
private UpdateRequest simulatedUpdateRequest(Long prevVersion, Object... fields) throws SolrServerException, IOException {
|
||||
SolrInputDocument doc = sdoc(fields);
|
||||
|
||||
|
@ -747,20 +747,20 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
collectionName, clusterShape(numShards, numShards * numReplicasPerShard));
|
||||
return assertNumberOfReplicas(numNrtReplicas*numShards, numTlogReplicas*numShards, numPullReplicas*numShards, false, true);
|
||||
}
|
||||
|
||||
|
||||
private void waitForNumDocsInAllActiveReplicas(int numDocs) throws IOException, SolrServerException, InterruptedException {
|
||||
waitForNumDocsInAllActiveReplicas(numDocs, REPLICATION_TIMEOUT_SECS);
|
||||
}
|
||||
|
||||
|
||||
private void waitForNumDocsInAllActiveReplicas(int numDocs, int timeout) throws IOException, SolrServerException, InterruptedException {
|
||||
DocCollection docCollection = getCollectionState(collectionName);
|
||||
waitForNumDocsInAllReplicas(numDocs, docCollection.getReplicas().stream().filter(r -> r.getState() == Replica.State.ACTIVE).collect(Collectors.toList()), timeout);
|
||||
}
|
||||
|
||||
|
||||
private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas, int timeout) throws IOException, SolrServerException, InterruptedException {
|
||||
waitForNumDocsInAllReplicas(numDocs, replicas, "*:*", timeout);
|
||||
}
|
||||
|
||||
|
||||
private void waitForNumDocsInAllReplicas(int numDocs, Collection<Replica> replicas, String query, int timeout) throws IOException, SolrServerException, InterruptedException {
|
||||
TimeOut t = new TimeOut(timeout, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
for (Replica r:replicas) {
|
||||
|
@ -784,7 +784,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void waitForDeletion(String collection) throws InterruptedException, KeeperException {
|
||||
TimeOut t = new TimeOut(10, TimeUnit.SECONDS, TimeSource.NANO_TIME);
|
||||
while (cluster.getSolrClient().getZkStateReader().getClusterState().hasCollection(collection)) {
|
||||
|
@ -797,25 +797,25 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
} catch(SolrException e) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private DocCollection assertNumberOfReplicas(int numNrtReplicas, int numTlogReplicas, int numPullReplicas, boolean updateCollection, boolean activeOnly) throws KeeperException, InterruptedException {
|
||||
if (updateCollection) {
|
||||
cluster.getSolrClient().getZkStateReader().forceUpdateCollection(collectionName);
|
||||
}
|
||||
DocCollection docCollection = getCollectionState(collectionName);
|
||||
assertNotNull(docCollection);
|
||||
assertEquals("Unexpected number of nrt replicas: " + docCollection, numNrtReplicas,
|
||||
assertEquals("Unexpected number of nrt replicas: " + docCollection, numNrtReplicas,
|
||||
docCollection.getReplicas(EnumSet.of(Replica.Type.NRT)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
|
||||
assertEquals("Unexpected number of pull replicas: " + docCollection, numPullReplicas,
|
||||
assertEquals("Unexpected number of pull replicas: " + docCollection, numPullReplicas,
|
||||
docCollection.getReplicas(EnumSet.of(Replica.Type.PULL)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
|
||||
assertEquals("Unexpected number of tlog replicas: " + docCollection, numTlogReplicas,
|
||||
assertEquals("Unexpected number of tlog replicas: " + docCollection, numTlogReplicas,
|
||||
docCollection.getReplicas(EnumSet.of(Replica.Type.TLOG)).stream().filter(r->!activeOnly || r.getState() == Replica.State.ACTIVE).count());
|
||||
return docCollection;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* passes only if all replicas are active or down, and the "liveNodes" reflect the same status
|
||||
*/
|
||||
|
@ -835,8 +835,8 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
return true;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
private CollectionStatePredicate activeReplicaCount(int numNrtReplicas, int numTlogReplicas, int numPullReplicas) {
|
||||
return (liveNodes, collectionState) -> {
|
||||
int nrtFound = 0, tlogFound = 0, pullFound = 0;
|
||||
|
@ -863,7 +863,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
return numNrtReplicas == nrtFound && numTlogReplicas == tlogFound && numPullReplicas == pullFound;
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
private List<SolrCore> getSolrCore(boolean isLeader) {
|
||||
List<SolrCore> rs = new ArrayList<>();
|
||||
|
||||
|
@ -885,7 +885,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
return rs;
|
||||
}
|
||||
|
||||
|
||||
private void checkRTG(int from, int to, List<JettySolrRunner> solrRunners) throws Exception{
|
||||
for (JettySolrRunner solrRunner: solrRunners) {
|
||||
try (SolrClient client = solrRunner.newClient()) {
|
||||
|
@ -900,7 +900,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private List<JettySolrRunner> getSolrRunner(boolean isLeader) {
|
||||
List<JettySolrRunner> rs = new ArrayList<>();
|
||||
CloudSolrClient cloudClient = cluster.getSolrClient();
|
||||
|
@ -920,7 +920,7 @@ public class TestTlogReplica extends SolrCloudTestCase {
|
|||
}
|
||||
return rs;
|
||||
}
|
||||
|
||||
|
||||
private void waitForReplicasCatchUp(int numTry) throws IOException, InterruptedException {
|
||||
String leaderTimeCommit = getSolrCore(true).get(0).getDeletionPolicy().getLatestCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
|
||||
if (leaderTimeCommit == null) return;
|
||||
|
|
|
@ -55,25 +55,25 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
protected ZkTestServer zkServer;
|
||||
|
||||
|
||||
protected String zkDir;
|
||||
|
||||
|
||||
private String solrHome;
|
||||
|
||||
private SolrZkClient zkClient;
|
||||
|
||||
protected static final String SOLR_HOME = SolrTestCaseJ4.TEST_HOME();
|
||||
|
||||
|
||||
@BeforeClass
|
||||
public static void beforeClass() {
|
||||
System.setProperty("solrcloud.skip.autorecovery", "true");
|
||||
}
|
||||
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws InterruptedException {
|
||||
System.clearProperty("solrcloud.skip.autorecovery");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
|
@ -94,13 +94,13 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
zkClient.makePath("/solr", false, true);
|
||||
zkClient.close();
|
||||
|
||||
|
||||
|
||||
this.zkClient = new SolrZkClient(zkServer.getZkAddress(),
|
||||
AbstractZkTestCase.TIMEOUT);
|
||||
|
||||
|
||||
log.info("####SETUP_END " + getTestName());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testCmdConstants() throws Exception {
|
||||
assertEquals("upconfig", ZkCLI.UPCONFIG);
|
||||
|
@ -113,12 +113,12 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
public void testBootstrapWithChroot() throws Exception {
|
||||
String chroot = "/foo/bar";
|
||||
assertFalse(zkClient.exists(chroot, true));
|
||||
|
||||
|
||||
String[] args = new String[] {"-zkhost", zkServer.getZkAddress() + chroot,
|
||||
"-cmd", "bootstrap", "-solrhome", this.solrHome};
|
||||
|
||||
|
||||
ZkCLI.main(args);
|
||||
|
||||
|
||||
assertTrue(zkClient.exists(chroot + ZkConfigManager.CONFIGS_ZKNODE
|
||||
+ "/collection1", true));
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
ZkCLI.setStdout(myOut);
|
||||
|
||||
ZkCLI.main(args);
|
||||
|
||||
|
||||
final String standardOutput = byteStream.toString(StandardCharsets.UTF_8.name());
|
||||
String separator = System.lineSeparator();
|
||||
assertEquals("/test (1)" + separator + " /test/path (0)" + separator + separator, standardOutput);
|
||||
|
@ -220,7 +220,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
@Test
|
||||
public void testUpConfigLinkConfigClearZk() throws Exception {
|
||||
File tmpDir = createTempDir().toFile();
|
||||
|
||||
|
||||
// test upconfig
|
||||
String confsetname = "confsetone";
|
||||
final String[] upconfigArgs;
|
||||
|
@ -240,21 +240,21 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
"-confname", confsetname};
|
||||
}
|
||||
ZkCLI.main(upconfigArgs);
|
||||
|
||||
|
||||
assertTrue(zkClient.exists(ZkConfigManager.CONFIGS_ZKNODE + "/" + confsetname, true));
|
||||
|
||||
// print help
|
||||
// ZkCLI.main(new String[0]);
|
||||
|
||||
|
||||
// test linkconfig
|
||||
String[] args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
|
||||
"linkconfig", "-collection", "collection1", "-confname", confsetname};
|
||||
ZkCLI.main(args);
|
||||
|
||||
|
||||
ZkNodeProps collectionProps = ZkNodeProps.load(zkClient.getData(ZkStateReader.COLLECTIONS_ZKNODE + "/collection1", null, null, true));
|
||||
assertTrue(collectionProps.containsKey("configName"));
|
||||
assertEquals(confsetname, collectionProps.getStr("configName"));
|
||||
|
||||
|
||||
// test down config
|
||||
File confDir = new File(tmpDir,
|
||||
"solrtest-confdropspot-" + this.getClass().getName() + "-" + System.nanoTime());
|
||||
|
@ -263,11 +263,11 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
|
||||
"downconfig", "-confdir", confDir.getAbsolutePath(), "-confname", confsetname};
|
||||
ZkCLI.main(args);
|
||||
|
||||
|
||||
File[] files = confDir.listFiles();
|
||||
List<String> zkFiles = zkClient.getChildren(ZkConfigManager.CONFIGS_ZKNODE + "/" + confsetname, null, true);
|
||||
assertEquals(files.length, zkFiles.size());
|
||||
|
||||
|
||||
File sourceConfDir = new File(ExternalPaths.TECHPRODUCTS_CONFIGSET);
|
||||
// filter out all directories starting with . (e.g. .svn)
|
||||
Collection<File> sourceFiles = FileUtils.listFiles(sourceConfDir, TrueFileFilter.INSTANCE, new RegexFileFilter("[^\\.].*"));
|
||||
|
@ -282,8 +282,8 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
assertTrue(relativePathofFile+" content changed",FileUtils.contentEquals(sourceFile,downloadedFile));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// test reset zk
|
||||
args = new String[] {"-zkhost", zkServer.getZkAddress(), "-cmd",
|
||||
"clear", "/"};
|
||||
|
@ -291,7 +291,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
|
||||
assertEquals(0, zkClient.getChildren("/", null, true).size());
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testGet() throws Exception {
|
||||
String getNode = "/getNode";
|
||||
|
@ -305,7 +305,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
@Test
|
||||
public void testGetFile() throws Exception {
|
||||
File tmpDir = createTempDir().toFile();
|
||||
|
||||
|
||||
String getNode = "/getFileNode";
|
||||
byte [] data = "getFileNode-data".getBytes(StandardCharsets.UTF_8);
|
||||
this.zkClient.create(getNode, data, CreateMode.PERSISTENT, true);
|
||||
|
@ -353,7 +353,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
assertNull(properties.getClusterProperty("urlScheme", (String) null));
|
||||
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testUpdateAcls() throws Exception {
|
||||
try {
|
||||
|
@ -369,7 +369,7 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
System.clearProperty(VMParamsAllAndReadonlyDigestZkACLProvider.DEFAULT_DIGEST_READONLY_USERNAME_VM_PARAM_NAME);
|
||||
System.clearProperty(VMParamsAllAndReadonlyDigestZkACLProvider.DEFAULT_DIGEST_READONLY_PASSWORD_VM_PARAM_NAME);
|
||||
}
|
||||
|
||||
|
||||
boolean excepted = false;
|
||||
try (SolrZkClient zkClient = new SolrZkClient(zkServer.getZkAddress(), AbstractDistribZkTestBase.DEFAULT_CONNECTION_TIMEOUT)) {
|
||||
zkClient.getData("/", null, null, true);
|
||||
|
@ -385,10 +385,10 @@ public class ZkCLITest extends SolrTestCaseJ4 {
|
|||
zkServer.shutdown();
|
||||
super.tearDown();
|
||||
}
|
||||
|
||||
|
||||
private void printLayout(String zkHost) throws Exception {
|
||||
SolrZkClient zkClient = new SolrZkClient(zkHost, AbstractZkTestCase.TIMEOUT);
|
||||
zkClient.printLayoutToStdOut();
|
||||
zkClient.printLayoutToStream(System.out);
|
||||
zkClient.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,30 +44,30 @@ import org.slf4j.LoggerFactory;
|
|||
* useful for blocking traffic on a specified port.
|
||||
*/
|
||||
public class SocketProxy {
|
||||
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
|
||||
public static final int ACCEPT_TIMEOUT_MILLIS = 100;
|
||||
|
||||
// should be as large as the HttpShardHandlerFactory socket timeout ... or larger?
|
||||
public static final int PUMP_SOCKET_TIMEOUT_MS = 100 * 1000;
|
||||
|
||||
|
||||
private URI proxyUrl;
|
||||
private URI target;
|
||||
|
||||
|
||||
private Acceptor acceptor;
|
||||
private ServerSocket serverSocket;
|
||||
|
||||
|
||||
private CountDownLatch closed = new CountDownLatch(1);
|
||||
|
||||
|
||||
public List<Bridge> connections = new LinkedList<Bridge>();
|
||||
|
||||
|
||||
private final int listenPort;
|
||||
|
||||
|
||||
private int receiveBufferSize = -1;
|
||||
|
||||
|
||||
private boolean pauseAtStart = false;
|
||||
|
||||
|
||||
private int acceptBacklog = 50;
|
||||
|
||||
private boolean usesSSL;
|
||||
|
@ -75,11 +75,11 @@ public class SocketProxy {
|
|||
public SocketProxy() throws Exception {
|
||||
this(0, false);
|
||||
}
|
||||
|
||||
|
||||
public SocketProxy( boolean useSSL) throws Exception {
|
||||
this(0, useSSL);
|
||||
}
|
||||
|
||||
|
||||
public SocketProxy(int port, boolean useSSL) throws Exception {
|
||||
int listenPort = port;
|
||||
this.usesSSL = useSSL;
|
||||
|
@ -91,27 +91,27 @@ public class SocketProxy {
|
|||
serverSocket.bind(new InetSocketAddress(listenPort), acceptBacklog);
|
||||
this.listenPort = serverSocket.getLocalPort();
|
||||
}
|
||||
|
||||
|
||||
public void open(URI uri) throws Exception {
|
||||
target = uri;
|
||||
proxyUrl = urlFromSocket(target, serverSocket);
|
||||
doOpen();
|
||||
}
|
||||
|
||||
|
||||
public String toString() {
|
||||
return "SocketyProxy: port="+listenPort+"; target="+target;
|
||||
}
|
||||
|
||||
|
||||
public void setReceiveBufferSize(int receiveBufferSize) {
|
||||
this.receiveBufferSize = receiveBufferSize;
|
||||
}
|
||||
|
||||
|
||||
public void setTarget(URI tcpBrokerUri) {
|
||||
target = tcpBrokerUri;
|
||||
}
|
||||
|
||||
|
||||
private void doOpen() throws Exception {
|
||||
|
||||
|
||||
acceptor = new Acceptor(serverSocket, target);
|
||||
if (pauseAtStart) {
|
||||
acceptor.pause();
|
||||
|
@ -120,29 +120,29 @@ public class SocketProxy {
|
|||
+ serverSocket.getLocalPort()).start();
|
||||
closed = new CountDownLatch(1);
|
||||
}
|
||||
|
||||
|
||||
public int getListenPort() {
|
||||
return listenPort;
|
||||
}
|
||||
|
||||
|
||||
private ServerSocket createServerSocket(boolean useSSL) throws Exception {
|
||||
if (useSSL) {
|
||||
return SSLServerSocketFactory.getDefault().createServerSocket();
|
||||
}
|
||||
return new ServerSocket();
|
||||
}
|
||||
|
||||
|
||||
private Socket createSocket(boolean useSSL) throws Exception {
|
||||
if (useSSL) {
|
||||
return SSLSocketFactory.getDefault().createSocket();
|
||||
}
|
||||
return new Socket();
|
||||
}
|
||||
|
||||
|
||||
public URI getUrl() {
|
||||
return proxyUrl;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* close all proxy connections and acceptor
|
||||
*/
|
||||
|
@ -158,7 +158,7 @@ public class SocketProxy {
|
|||
acceptor.close();
|
||||
closed.countDown();
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* close all proxy receive connections, leaving acceptor open
|
||||
*/
|
||||
|
@ -172,12 +172,12 @@ public class SocketProxy {
|
|||
halfCloseConnection(con);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public boolean waitUntilClosed(long timeoutSeconds)
|
||||
throws InterruptedException {
|
||||
return closed.await(timeoutSeconds, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* called after a close to restart the acceptor on the same port
|
||||
*/
|
||||
|
@ -198,7 +198,7 @@ public class SocketProxy {
|
|||
log.debug("exception on reopen url:" + getUrl(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* pause accepting new connections and data transfer through existing proxy
|
||||
* connections. All sockets remain open
|
||||
|
@ -212,7 +212,7 @@ public class SocketProxy {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* continue after pause
|
||||
*/
|
||||
|
@ -225,7 +225,7 @@ public class SocketProxy {
|
|||
}
|
||||
acceptor.goOn();
|
||||
}
|
||||
|
||||
|
||||
private void closeConnection(Bridge c) {
|
||||
try {
|
||||
c.close();
|
||||
|
@ -233,7 +233,7 @@ public class SocketProxy {
|
|||
log.debug("exception on close of: " + c, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void halfCloseConnection(Bridge c) {
|
||||
try {
|
||||
c.halfClose();
|
||||
|
@ -241,38 +241,38 @@ public class SocketProxy {
|
|||
log.debug("exception on half close of: " + c, e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public boolean isPauseAtStart() {
|
||||
return pauseAtStart;
|
||||
}
|
||||
|
||||
|
||||
public void setPauseAtStart(boolean pauseAtStart) {
|
||||
this.pauseAtStart = pauseAtStart;
|
||||
}
|
||||
|
||||
|
||||
public int getAcceptBacklog() {
|
||||
return acceptBacklog;
|
||||
}
|
||||
|
||||
|
||||
public void setAcceptBacklog(int acceptBacklog) {
|
||||
this.acceptBacklog = acceptBacklog;
|
||||
}
|
||||
|
||||
|
||||
private URI urlFromSocket(URI uri, ServerSocket serverSocket)
|
||||
throws Exception {
|
||||
int listenPort = serverSocket.getLocalPort();
|
||||
|
||||
|
||||
return new URI(uri.getScheme(), uri.getUserInfo(), uri.getHost(),
|
||||
listenPort, uri.getPath(), uri.getQuery(), uri.getFragment());
|
||||
}
|
||||
|
||||
|
||||
public class Bridge {
|
||||
|
||||
|
||||
private Socket receiveSocket;
|
||||
private Socket sendSocket;
|
||||
private Pump requestThread;
|
||||
private Pump responseThread;
|
||||
|
||||
|
||||
public Bridge(Socket socket, URI target) throws Exception {
|
||||
receiveSocket = socket;
|
||||
sendSocket = createSocket(usesSSL);
|
||||
|
@ -285,17 +285,17 @@ public class SocketProxy {
|
|||
log.info("proxy connection " + sendSocket + ", receiveBufferSize="
|
||||
+ sendSocket.getReceiveBufferSize());
|
||||
}
|
||||
|
||||
|
||||
public void goOn() {
|
||||
responseThread.goOn();
|
||||
requestThread.goOn();
|
||||
}
|
||||
|
||||
|
||||
public void pause() {
|
||||
requestThread.pause();
|
||||
responseThread.pause();
|
||||
}
|
||||
|
||||
|
||||
public void close() throws Exception {
|
||||
synchronized (connections) {
|
||||
connections.remove(this);
|
||||
|
@ -303,24 +303,24 @@ public class SocketProxy {
|
|||
receiveSocket.close();
|
||||
sendSocket.close();
|
||||
}
|
||||
|
||||
|
||||
public void halfClose() throws Exception {
|
||||
receiveSocket.close();
|
||||
}
|
||||
|
||||
|
||||
private void linkWithThreads(Socket source, Socket dest) {
|
||||
requestThread = new Pump("Request", source, dest);
|
||||
requestThread.start();
|
||||
responseThread = new Pump("Response", dest, source);
|
||||
responseThread.start();
|
||||
}
|
||||
|
||||
|
||||
public class Pump extends Thread {
|
||||
|
||||
|
||||
protected Socket src;
|
||||
private Socket destination;
|
||||
private AtomicReference<CountDownLatch> pause = new AtomicReference<CountDownLatch>();
|
||||
|
||||
|
||||
public Pump(String kind, Socket source, Socket dest) {
|
||||
super("SocketProxy-"+kind+"-" + source.getPort() + ":"
|
||||
+ dest.getPort());
|
||||
|
@ -328,15 +328,15 @@ public class SocketProxy {
|
|||
destination = dest;
|
||||
pause.set(new CountDownLatch(0));
|
||||
}
|
||||
|
||||
|
||||
public void pause() {
|
||||
pause.set(new CountDownLatch(1));
|
||||
}
|
||||
|
||||
|
||||
public void goOn() {
|
||||
pause.get().countDown();
|
||||
}
|
||||
|
||||
|
||||
public void run() {
|
||||
byte[] buf = new byte[1024];
|
||||
|
||||
|
@ -400,32 +400,28 @@ public class SocketProxy {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public class Acceptor implements Runnable {
|
||||
|
||||
|
||||
private ServerSocket socket;
|
||||
private URI target;
|
||||
private AtomicReference<CountDownLatch> pause = new AtomicReference<CountDownLatch>();
|
||||
|
||||
public Acceptor(ServerSocket serverSocket, URI uri) {
|
||||
|
||||
public Acceptor(ServerSocket serverSocket, URI uri) throws SocketException {
|
||||
socket = serverSocket;
|
||||
target = uri;
|
||||
pause.set(new CountDownLatch(0));
|
||||
try {
|
||||
socket.setSoTimeout(ACCEPT_TIMEOUT_MILLIS);
|
||||
} catch (SocketException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
socket.setSoTimeout(ACCEPT_TIMEOUT_MILLIS);
|
||||
}
|
||||
|
||||
|
||||
public void pause() {
|
||||
pause.set(new CountDownLatch(1));
|
||||
}
|
||||
|
||||
|
||||
public void goOn() {
|
||||
pause.get().countDown();
|
||||
}
|
||||
|
||||
|
||||
public void run() {
|
||||
try {
|
||||
while (!socket.isClosed()) {
|
||||
|
@ -447,7 +443,7 @@ public class SocketProxy {
|
|||
log.debug("acceptor: finished for reason: " + e.getLocalizedMessage());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public void close() {
|
||||
try {
|
||||
socket.close();
|
||||
|
@ -456,5 +452,5 @@ public class SocketProxy {
|
|||
} catch (IOException ignored) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
*
|
||||
* @since solr 1.3
|
||||
*/
|
||||
|
@ -76,13 +76,13 @@ public class XMLResponseParser extends ResponseParser
|
|||
}
|
||||
|
||||
public XMLResponseParser() {}
|
||||
|
||||
|
||||
@Override
|
||||
public String getWriterType()
|
||||
{
|
||||
return "xml";
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public String getContentType() {
|
||||
return XML_CONTENT_TYPE;
|
||||
|
@ -97,7 +97,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR, "parsing error", e);
|
||||
}
|
||||
|
||||
return processResponse(parser);
|
||||
return processResponse(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -120,9 +120,9 @@ public class XMLResponseParser extends ResponseParser
|
|||
{
|
||||
try {
|
||||
NamedList<Object> response = null;
|
||||
for (int event = parser.next();
|
||||
for (int event = parser.next();
|
||||
event != XMLStreamConstants.END_DOCUMENT;
|
||||
event = parser.next())
|
||||
event = parser.next())
|
||||
{
|
||||
switch (event) {
|
||||
case XMLStreamConstants.START_ELEMENT:
|
||||
|
@ -130,7 +130,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
if( response != null ) {
|
||||
throw new Exception( "already read the response!" );
|
||||
}
|
||||
|
||||
|
||||
// only top-level element is "response
|
||||
String name = parser.getLocalName();
|
||||
if( name.equals( "response" ) || name.equals( "result" ) ) {
|
||||
|
@ -144,8 +144,8 @@ public class XMLResponseParser extends ResponseParser
|
|||
"not:"+parser.getLocalName() );
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return response;
|
||||
}
|
||||
catch( Exception ex ) {
|
||||
|
@ -159,7 +159,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
protected enum KnownType {
|
||||
STR (true) { @Override public String read( String txt ) { return txt; } },
|
||||
INT (true) { @Override public Integer read( String txt ) { return Integer.valueOf(txt); } },
|
||||
|
@ -168,33 +168,33 @@ public class XMLResponseParser extends ResponseParser
|
|||
LONG (true) { @Override public Long read( String txt ) { return Long.valueOf(txt); } },
|
||||
BOOL (true) { @Override public Boolean read( String txt ) { return Boolean.valueOf(txt); } },
|
||||
NULL (true) { @Override public Object read( String txt ) { return null; } },
|
||||
DATE (true) {
|
||||
@Override
|
||||
public Date read( String txt ) {
|
||||
DATE (true) {
|
||||
@Override
|
||||
public Date read( String txt ) {
|
||||
try {
|
||||
return new Date(Instant.parse(txt).toEpochMilli());
|
||||
}
|
||||
catch( Exception ex ) {
|
||||
ex.printStackTrace();
|
||||
log.info(ex.getMessage(),ex);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
|
||||
ARR (false) { @Override public Object read( String txt ) { return null; } },
|
||||
LST (false) { @Override public Object read( String txt ) { return null; } },
|
||||
RESULT (false) { @Override public Object read( String txt ) { return null; } },
|
||||
DOC (false) { @Override public Object read( String txt ) { return null; } };
|
||||
|
||||
|
||||
final boolean isLeaf;
|
||||
|
||||
|
||||
KnownType( boolean isLeaf )
|
||||
{
|
||||
this.isLeaf = isLeaf;
|
||||
}
|
||||
|
||||
|
||||
public abstract Object read( String txt );
|
||||
|
||||
|
||||
public static KnownType get( String v )
|
||||
{
|
||||
if( v != null ) {
|
||||
|
@ -206,7 +206,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
protected NamedList<Object> readNamedList( XMLStreamReader parser ) throws XMLStreamException
|
||||
{
|
||||
if( XMLStreamConstants.START_ELEMENT != parser.getEventType() ) {
|
||||
|
@ -217,10 +217,10 @@ public class XMLResponseParser extends ResponseParser
|
|||
NamedList<Object> nl = new SimpleOrderedMap<>();
|
||||
KnownType type = null;
|
||||
String name = null;
|
||||
|
||||
|
||||
// just eat up the events...
|
||||
int depth = 0;
|
||||
while( true )
|
||||
while( true )
|
||||
{
|
||||
switch (parser.next()) {
|
||||
case XMLStreamConstants.START_ELEMENT:
|
||||
|
@ -230,7 +230,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
if( type == null ) {
|
||||
throw new RuntimeException( "this must be known type! not: "+parser.getLocalName() );
|
||||
}
|
||||
|
||||
|
||||
name = null;
|
||||
int cnt = parser.getAttributeCount();
|
||||
for( int i=0; i<cnt; i++ ) {
|
||||
|
@ -245,7 +245,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
throw new XMLStreamException( "requires 'name' attribute: "+parser.getLocalName(), parser.getLocation() );
|
||||
}
|
||||
**/
|
||||
|
||||
|
||||
if( !type.isLeaf ) {
|
||||
switch( type ) {
|
||||
case LST: nl.add( name, readNamedList( parser ) ); depth--; continue;
|
||||
|
@ -265,7 +265,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
throw new XMLStreamException( "branch element not handled!", parser.getLocation() );
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case XMLStreamConstants.END_ELEMENT:
|
||||
if( --depth < 0 ) {
|
||||
return nl;
|
||||
|
@ -291,14 +291,14 @@ public class XMLResponseParser extends ResponseParser
|
|||
if( !"arr".equals( parser.getLocalName().toLowerCase(Locale.ROOT) ) ) {
|
||||
throw new RuntimeException( "must be 'arr', not: "+parser.getLocalName() );
|
||||
}
|
||||
|
||||
|
||||
StringBuilder builder = new StringBuilder();
|
||||
KnownType type = null;
|
||||
|
||||
List<Object> vals = new ArrayList<>();
|
||||
|
||||
int depth = 0;
|
||||
while( true )
|
||||
while( true )
|
||||
{
|
||||
switch (parser.next()) {
|
||||
case XMLStreamConstants.START_ELEMENT:
|
||||
|
@ -318,7 +318,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
type = t;
|
||||
|
||||
builder.setLength( 0 ); // reset the text
|
||||
|
||||
|
||||
if( !type.isLeaf ) {
|
||||
switch( type ) {
|
||||
case LST: vals.add( readNamedList( parser ) ); depth--; continue;
|
||||
|
@ -338,7 +338,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
throw new XMLStreamException( "branch element not handled!", parser.getLocation() );
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case XMLStreamConstants.END_ELEMENT:
|
||||
if( --depth < 0 ) {
|
||||
return vals; // the last element is itself
|
||||
|
@ -359,7 +359,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected SolrDocumentList readDocuments( XMLStreamReader parser ) throws XMLStreamException
|
||||
{
|
||||
SolrDocumentList docs = new SolrDocumentList();
|
||||
|
@ -378,7 +378,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
docs.setMaxScore( Float.parseFloat( v ) );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Read through each document
|
||||
int event;
|
||||
while( true ) {
|
||||
|
@ -408,10 +408,10 @@ public class XMLResponseParser extends ResponseParser
|
|||
StringBuilder builder = new StringBuilder();
|
||||
KnownType type = null;
|
||||
String name = null;
|
||||
|
||||
|
||||
// just eat up the events...
|
||||
int depth = 0;
|
||||
while( true )
|
||||
while( true )
|
||||
{
|
||||
switch (parser.next()) {
|
||||
case XMLStreamConstants.START_ELEMENT:
|
||||
|
@ -421,7 +421,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
if( type == null ) {
|
||||
throw new RuntimeException( "this must be known type! not: "+parser.getLocalName() );
|
||||
}
|
||||
|
||||
|
||||
if ( type == KnownType.DOC) {
|
||||
doc.addChildDocument(readDocument(parser));
|
||||
depth--; // (nested) readDocument clears out the (nested) 'endElement'
|
||||
|
@ -429,7 +429,7 @@ public class XMLResponseParser extends ResponseParser
|
|||
}
|
||||
|
||||
// other then nested documents, all other possible nested elements require a name...
|
||||
|
||||
|
||||
name = null;
|
||||
int cnt = parser.getAttributeCount();
|
||||
for( int i=0; i<cnt; i++ ) {
|
||||
|
@ -438,11 +438,11 @@ public class XMLResponseParser extends ResponseParser
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if( name == null ) {
|
||||
throw new XMLStreamException( "requires 'name' attribute: "+parser.getLocalName(), parser.getLocation() );
|
||||
}
|
||||
|
||||
|
||||
// Handle multi-valued fields
|
||||
if( type == KnownType.ARR ) {
|
||||
for( Object val : readArray( parser ) ) {
|
||||
|
@ -451,14 +451,12 @@ public class XMLResponseParser extends ResponseParser
|
|||
depth--; // the array reading clears out the 'endElement'
|
||||
} else if( type == KnownType.LST ) {
|
||||
doc.addField( name, readNamedList( parser ) );
|
||||
depth--;
|
||||
depth--;
|
||||
} else if( !type.isLeaf ) {
|
||||
System.out.println("nbot leaf!:" + type);
|
||||
|
||||
throw new XMLStreamException( "must be value or array", parser.getLocation() );
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case XMLStreamConstants.END_ELEMENT:
|
||||
if( --depth < 0 ) {
|
||||
return doc;
|
||||
|
@ -480,5 +478,5 @@ public class XMLResponseParser extends ResponseParser
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -239,7 +239,6 @@ public class ZplotStream extends TupleStream implements Expressible {
|
|||
while(it.hasNext()) {
|
||||
values.add((Long)it.next());
|
||||
}
|
||||
System.out.println(values);
|
||||
int[] x = new int[values.size()];
|
||||
double[] y = new double[values.size()];
|
||||
for(int i=0; i<values.size(); i++) {
|
||||
|
|
|
@ -17,10 +17,13 @@
|
|||
|
||||
package org.apache.solr.client.solrj.response.json;
|
||||
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Represents the result of a "heatmap" JSON facet.
|
||||
|
@ -29,6 +32,8 @@ import org.apache.solr.common.util.NamedList;
|
|||
* itself in one of two forms.
|
||||
*/
|
||||
public class HeatmapJsonFacet {
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
private int gridLevel;
|
||||
private int columns;
|
||||
private int rows;
|
||||
|
@ -47,9 +52,9 @@ public class HeatmapJsonFacet {
|
|||
maxX = (double) heatmapNL.get("maxX");
|
||||
minY = (double) heatmapNL.get("minY");
|
||||
maxY = (double) heatmapNL.get("maxY");
|
||||
System.out.println("Rows is: " + rows);
|
||||
System.out.println("Cols is " + columns);
|
||||
System.out.println("Whole deal is: " + heatmapNL);
|
||||
log.debug("Rows is: {}", rows);
|
||||
log.debug("Cols is {}", columns);
|
||||
log.debug("Whole deal is: {}", heatmapNL);
|
||||
|
||||
if (heatmapNL.get("counts_ints2D") == null) {
|
||||
countEncodedAsBase64PNG = (String) heatmapNL.get("counts_png");
|
||||
|
|
|
@ -148,7 +148,7 @@ public class SolrZkClient implements Closeable {
|
|||
this.zkClientTimeout = zkClientTimeout;
|
||||
// we must retry at least as long as the session timeout
|
||||
zkCmdExecutor = new ZkCmdExecutor(zkClientTimeout, new IsClosed() {
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return SolrZkClient.this.isClosed();
|
||||
|
@ -156,7 +156,7 @@ public class SolrZkClient implements Closeable {
|
|||
});
|
||||
connManager = new ConnectionManager("ZooKeeperConnection Watcher:"
|
||||
+ zkServerAddress, this, zkServerAddress, strat, onReconnect, beforeReconnect, new IsClosed() {
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return SolrZkClient.this.isClosed();
|
||||
|
@ -487,7 +487,7 @@ public class SolrZkClient implements Closeable {
|
|||
Watcher watcher, boolean retryOnConnLoss) throws KeeperException, InterruptedException {
|
||||
makePath(path, data, createMode, watcher, true, retryOnConnLoss, 0);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates the path in ZooKeeper, creating each node as necessary.
|
||||
*
|
||||
|
@ -506,7 +506,7 @@ public class SolrZkClient implements Closeable {
|
|||
*
|
||||
* e.g. If <code>path=/solr/group/node</code> and none of the nodes, solr,
|
||||
* group, node exist, each will be created.
|
||||
*
|
||||
*
|
||||
* skipPathParts will force the call to fail if the first skipPathParts do not exist already.
|
||||
*
|
||||
* Note: retryOnConnLoss is only respected for the final node - nodes
|
||||
|
@ -551,7 +551,7 @@ public class SolrZkClient implements Closeable {
|
|||
} catch (NoAuthException e) {
|
||||
// in auth cases, we may not have permission for an earlier part of a path, which is fine
|
||||
if (i == paths.length - 1 || !exists(currentPath, retryOnConnLoss)) {
|
||||
|
||||
|
||||
throw e;
|
||||
}
|
||||
} catch (NodeExistsException e) {
|
||||
|
@ -647,13 +647,6 @@ public class SolrZkClient implements Closeable {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints current ZooKeeper layout to stdout.
|
||||
*/
|
||||
public void printLayoutToStdOut() throws KeeperException,
|
||||
InterruptedException {
|
||||
printLayoutToStream(System.out);
|
||||
}
|
||||
public void printLayoutToStream(PrintStream out) throws KeeperException,
|
||||
InterruptedException {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
@ -824,7 +817,7 @@ public class SolrZkClient implements Closeable {
|
|||
ZkMaintenanceUtils.downConfig(this, confName, confPath);
|
||||
}
|
||||
|
||||
public void zkTransfer(String src, Boolean srcIsZk,
|
||||
public void zkTransfer(String src, Boolean srcIsZk,
|
||||
String dst, Boolean dstIsZk,
|
||||
Boolean recurse) throws SolrServerException, KeeperException, InterruptedException, IOException {
|
||||
ZkMaintenanceUtils.zkTransfer(this, src, srcIsZk, dst, dstIsZk, recurse);
|
||||
|
|
|
@ -776,15 +776,6 @@ public class FastJavaBinDecoder implements DataEntry.FastDecoder {
|
|||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
for (int i = 0; i < lower5BitTags.length; i++) {
|
||||
Tag tag = lower5BitTags[i];
|
||||
if (tag == null) continue;
|
||||
System.out.println(tag.name() + " : " + tag.code + (tag.isLower5Bits ? " lower" : " upper"));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private static void addObj(DataEntry e) {
|
||||
if (e.type().isContainer) {
|
||||
Object ctx = e.type() == DataEntry.Type.KEYVAL_ITER ?
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
<compile srcdir="${src.dir}" destdir="${build.dir}/classes/java">
|
||||
<classpath refid="test.base.classpath"/>
|
||||
</compile>
|
||||
|
||||
|
||||
<!-- Copy the resources folder (if existent) -->
|
||||
<copy todir="${build.dir}/classes/java">
|
||||
<fileset dir="${resources.dir}" erroronmissingdir="no"/>
|
||||
|
@ -51,7 +51,7 @@
|
|||
|
||||
<!-- redefine the forbidden apis for tests, as we check ourselves -->
|
||||
<target name="-check-forbidden-tests" depends="-init-forbidden-apis,compile-core">
|
||||
<forbidden-apis suppressAnnotation="**.SuppressForbidden" signaturesFile="${common.dir}/tools/forbiddenApis/tests.txt" classpathref="forbidden-apis.allclasses.classpath">
|
||||
<forbidden-apis suppressAnnotation="**.SuppressForbidden" signaturesFile="${common.dir}/tools/forbiddenApis/tests.txt" classpathref="forbidden-apis.allclasses.classpath">
|
||||
<fileset dir="${build.dir}/classes/java"/>
|
||||
</forbidden-apis>
|
||||
</target>
|
||||
|
@ -62,14 +62,14 @@
|
|||
depends="compile-core,jar-test-framework,lucene-javadocs,javadocs-test-framework,define-lucene-javadoc-url,check-javadocs-uptodate" unless="javadocs-uptodate-${name}">
|
||||
<sequential>
|
||||
<mkdir dir="${javadoc.dir}/${name}"/>
|
||||
<!-- NOTE: explicitly not using solr-invoke-javadoc, or attempting to
|
||||
link to lucene-test-framework because if we did javadoc would
|
||||
attempt to link class refs in in org.apache.lucene, causing
|
||||
broken links. (either broken links to things like "Directory" if
|
||||
lucene-test-framework was first, or broken links to things like
|
||||
<!-- NOTE: explicitly not using solr-invoke-javadoc, or attempting to
|
||||
link to lucene-test-framework because if we did javadoc would
|
||||
attempt to link class refs in in org.apache.lucene, causing
|
||||
broken links. (either broken links to things like "Directory" if
|
||||
lucene-test-framework was first, or broken links to things like
|
||||
LuceneTestCase if lucene-core was first)
|
||||
-->
|
||||
<invoke-javadoc destdir="${javadoc.dir}/${name}"
|
||||
<invoke-javadoc destdir="${javadoc.dir}/${name}"
|
||||
title="${Name} ${version} Test Framework API">
|
||||
<sources>
|
||||
<link offline="true" href="${javadoc.link.junit}"
|
||||
|
@ -116,5 +116,6 @@
|
|||
</copy>
|
||||
</target>
|
||||
|
||||
<target name="-check-forbidden-sysout"/>
|
||||
</project>
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS;
|
|||
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||
|
||||
public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTestCase {
|
||||
|
||||
|
||||
private static final String REMOVE_VERSION_FIELD = "remove.version.field";
|
||||
private static final String ENABLE_UPDATE_LOG = "enable.update.log";
|
||||
private static final String ZK_HOST = "zkHost";
|
||||
|
@ -66,12 +66,12 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
@Override
|
||||
public void distribSetUp() throws Exception {
|
||||
super.distribSetUp();
|
||||
|
||||
|
||||
String zkDir = testDir.getAbsolutePath() + File.separator
|
||||
+ "zookeeper/server1/data";
|
||||
zkServer = new ZkTestServer(zkDir);
|
||||
zkServer.run();
|
||||
|
||||
|
||||
System.setProperty(ZK_HOST, zkServer.getZkAddress());
|
||||
System.setProperty(ENABLE_UPDATE_LOG, "true");
|
||||
System.setProperty(REMOVE_VERSION_FIELD, "true");
|
||||
|
@ -86,15 +86,15 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
System.setProperty("solr.test.sys.prop1", "propone");
|
||||
System.setProperty("solr.test.sys.prop2", "proptwo");
|
||||
}
|
||||
|
||||
|
||||
protected String getCloudSolrConfig() {
|
||||
return "solrconfig-tlog.xml";
|
||||
}
|
||||
|
||||
|
||||
protected String getCloudSchemaFile() {
|
||||
return getSchemaFile();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void createServers(int numShards) throws Exception {
|
||||
// give everyone there own solrhome
|
||||
|
@ -110,7 +110,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
.createCollection("control_collection", 1, 1)
|
||||
.setCreateNodeSet(controlJetty.getNodeName())
|
||||
.process(controlClient).isSuccess());
|
||||
|
||||
|
||||
ZkStateReader zkStateReader = jettys.get(0).getCoreContainer().getZkController()
|
||||
.getZkStateReader();
|
||||
|
||||
|
@ -132,17 +132,17 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
shards = sb.toString();
|
||||
|
||||
}
|
||||
|
||||
|
||||
protected void waitForRecoveriesToFinish(String collection, ZkStateReader zkStateReader, boolean verbose)
|
||||
throws Exception {
|
||||
waitForRecoveriesToFinish(collection, zkStateReader, verbose, true);
|
||||
}
|
||||
|
||||
|
||||
protected void waitForRecoveriesToFinish(String collection, ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout)
|
||||
throws Exception {
|
||||
waitForRecoveriesToFinish(collection, zkStateReader, verbose, failOnTimeout, 330);
|
||||
}
|
||||
|
||||
|
||||
public static void waitForRecoveriesToFinish(String collection,
|
||||
ZkStateReader zkStateReader, boolean verbose, boolean failOnTimeout, long timeoutSeconds)
|
||||
throws Exception {
|
||||
|
@ -191,7 +191,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
});
|
||||
} catch (TimeoutException | InterruptedException e) {
|
||||
Diagnostics.logThreadDumps("Gave up waiting for recovery to finish. THREAD DUMP:");
|
||||
zkStateReader.getZkClient().printLayoutToStdOut();
|
||||
zkStateReader.getZkClient().printLayoutToStream(System.out);
|
||||
fail("There are still nodes recoverying - waited for " + timeoutSeconds + " seconds");
|
||||
}
|
||||
|
||||
|
@ -211,7 +211,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
});
|
||||
log.info("Collection has disappeared - collection: " + collection);
|
||||
}
|
||||
|
||||
|
||||
static void waitForNewLeader(CloudSolrClient cloudClient, String shardName, Replica oldLeader, TimeOut timeOut)
|
||||
throws Exception {
|
||||
log.info("Will wait for a node to become leader for {} secs", timeOut.timeLeft(SECONDS));
|
||||
|
@ -229,17 +229,17 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
|
||||
if (timeOut.hasTimedOut()) {
|
||||
Diagnostics.logThreadDumps("Could not find new leader in specified timeout");
|
||||
zkStateReader.getZkClient().printLayoutToStdOut();
|
||||
zkStateReader.getZkClient().printLayoutToStream(System.out);
|
||||
fail("Could not find new leader even after waiting for " + timeOut.timeElapsed(MILLISECONDS) + "ms");
|
||||
}
|
||||
|
||||
Thread.sleep(100);
|
||||
}
|
||||
|
||||
|
||||
zkStateReader.waitForState("collection1", timeOut.timeLeft(SECONDS), TimeUnit.SECONDS, (liveNodes, docCollection) -> {
|
||||
if (docCollection == null)
|
||||
return false;
|
||||
|
||||
|
||||
Slice slice = docCollection.getSlice(shardName);
|
||||
if (slice != null && slice.getLeader() != null && !slice.getLeader().equals(oldLeader) && slice.getLeader().getState() == Replica.State.ACTIVE) {
|
||||
log.info("Old leader {}, new leader {}. New leader got elected in {} ms", oldLeader, slice.getLeader(), timeOut.timeElapsed(MILLISECONDS) );
|
||||
|
@ -256,7 +256,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
&& collectionState.getSlice(shard).getReplicasMap().get(coreNodeName) != null
|
||||
&& collectionState.getSlice(shard).getReplicasMap().get(coreNodeName).getState() == expectedState);
|
||||
}
|
||||
|
||||
|
||||
protected static void assertAllActive(String collection, ZkStateReader zkStateReader)
|
||||
throws KeeperException, InterruptedException {
|
||||
|
||||
|
@ -266,7 +266,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
if (docCollection == null || docCollection.getSlices() == null) {
|
||||
throw new IllegalArgumentException("Cannot find collection:" + collection);
|
||||
}
|
||||
|
||||
|
||||
Map<String,Slice> slices = docCollection.getSlicesMap();
|
||||
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
|
||||
Slice slice = entry.getValue();
|
||||
|
@ -282,7 +282,7 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void distribTearDown() throws Exception {
|
||||
resetExceptionIgnores();
|
||||
|
@ -309,10 +309,10 @@ public abstract class AbstractDistribZkTestBase extends BaseDistributedSearchTes
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected void printLayout() throws Exception {
|
||||
SolrZkClient zkClient = new SolrZkClient(zkServer.getZkHost(), AbstractZkTestCase.TIMEOUT);
|
||||
zkClient.printLayoutToStdOut();
|
||||
zkClient.printLayoutToStream(System.out);
|
||||
zkClient.close();
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ import java.util.concurrent.TimeUnit;
|
|||
public class ZkTestServer {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
|
||||
|
||||
|
||||
public static File SOLRHOME;
|
||||
static {
|
||||
try {
|
||||
|
@ -84,10 +84,10 @@ public class ZkTestServer {
|
|||
// must override getSolrHome
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static final int TIMEOUT = 45000;
|
||||
public static final int TICK_TIME = 1000;
|
||||
|
||||
|
||||
protected final ZKServerMain zkServer = new ZKServerMain();
|
||||
|
||||
private volatile String zkDir;
|
||||
|
@ -95,12 +95,12 @@ public class ZkTestServer {
|
|||
private volatile int clientPort;
|
||||
|
||||
private volatile Thread zooThread;
|
||||
|
||||
|
||||
private volatile int theTickTime = TICK_TIME;
|
||||
// SOLR-12101 - provide defaults to avoid max timeout 20 enforced by our server instance when tick time is 1000
|
||||
private volatile int maxSessionTimeout = 90000;
|
||||
private volatile int minSessionTimeout = 3000;
|
||||
|
||||
|
||||
protected volatile SolrZkClient rootClient;
|
||||
protected volatile SolrZkClient chRootClient;
|
||||
|
||||
|
@ -435,7 +435,7 @@ public class ZkTestServer {
|
|||
log.info("Overriding limiter action to: {}", limiterAction);
|
||||
getLimiter().setAction(LimitViolationAction.valueOf(limiterAction));
|
||||
}
|
||||
|
||||
|
||||
ObjectReleaseTracker.track(this);
|
||||
}
|
||||
|
||||
|
@ -446,12 +446,12 @@ public class ZkTestServer {
|
|||
log.error("error making rootClient, trying one more time", e);
|
||||
rootClient = new SolrZkClient(getZkHost(), TIMEOUT, 30000);
|
||||
}
|
||||
|
||||
|
||||
if (solrFormat) {
|
||||
tryCleanSolrZkNode();
|
||||
makeSolrZkNode();
|
||||
}
|
||||
|
||||
|
||||
chRootClient = new SolrZkClient(getZkAddress(), AbstractZkTestCase.TIMEOUT, 30000);
|
||||
}
|
||||
|
||||
|
@ -491,7 +491,7 @@ public class ZkTestServer {
|
|||
public int getPort() {
|
||||
return zkServer.getLocalPort();
|
||||
}
|
||||
|
||||
|
||||
public void expire(final long sessionId) {
|
||||
zkServer.zooKeeperServer.expire(new Session() {
|
||||
@Override
|
||||
|
@ -519,7 +519,7 @@ public class ZkTestServer {
|
|||
this.zkDb = zkDb;
|
||||
zkServer.zooKeeperServer.setZKDatabase(zkDb);
|
||||
}
|
||||
|
||||
|
||||
public void run() throws InterruptedException, IOException {
|
||||
run(true);
|
||||
}
|
||||
|
@ -614,11 +614,11 @@ public class ZkTestServer {
|
|||
} catch (Exception e) {
|
||||
log.error("Exception shutting down ZooKeeper Test Server",e);
|
||||
}
|
||||
|
||||
|
||||
if (zkDb != null) {
|
||||
zkDb.close();
|
||||
}
|
||||
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
zooThread.join();
|
||||
|
@ -635,7 +635,7 @@ public class ZkTestServer {
|
|||
}
|
||||
ObjectReleaseTracker.release(this);
|
||||
}
|
||||
|
||||
|
||||
public static boolean waitForServerDown(String hp, long timeoutMs) {
|
||||
final TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME);
|
||||
while (true) {
|
||||
|
@ -645,7 +645,7 @@ public class ZkTestServer {
|
|||
} catch (IOException e) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
if (timeout.hasTimedOut()) {
|
||||
throw new RuntimeException("Time out waiting for ZooKeeper shutdown!");
|
||||
}
|
||||
|
@ -656,7 +656,7 @@ public class ZkTestServer {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static boolean waitForServerUp(String hp, long timeoutMs) {
|
||||
final TimeOut timeout = new TimeOut(timeoutMs, TimeUnit.MILLISECONDS, TimeSource.NANO_TIME);
|
||||
while (true) {
|
||||
|
@ -667,7 +667,7 @@ public class ZkTestServer {
|
|||
} catch (IOException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
|
||||
if (timeout.hasTimedOut()) {
|
||||
throw new RuntimeException("Time out waiting for ZooKeeper to startup!");
|
||||
}
|
||||
|
@ -678,7 +678,7 @@ public class ZkTestServer {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static class HostPort {
|
||||
String host;
|
||||
int port;
|
||||
|
@ -725,7 +725,7 @@ public class ZkTestServer {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static List<HostPort> parseHostPortList(String hplist) {
|
||||
log.info("parse host and port list: " + hplist);
|
||||
ArrayList<HostPort> alist = new ArrayList<>();
|
||||
|
@ -778,7 +778,7 @@ public class ZkTestServer {
|
|||
public void setMinSessionTimeout(int minSessionTimeout) {
|
||||
this.minSessionTimeout = minSessionTimeout;
|
||||
}
|
||||
|
||||
|
||||
void buildZooKeeper(String config,
|
||||
String schema) throws Exception {
|
||||
buildZooKeeper(SOLRHOME, config, schema);
|
||||
|
@ -802,15 +802,15 @@ public class ZkTestServer {
|
|||
log.info("put " + file.getAbsolutePath() + " to " + destPath);
|
||||
zkClient.makePath(destPath, file, false, true);
|
||||
}
|
||||
|
||||
|
||||
// static to share with distrib test
|
||||
public void buildZooKeeper(File solrhome, String config, String schema) throws Exception {
|
||||
|
||||
Map<String,Object> props = new HashMap<>();
|
||||
props.put("configName", "conf1");
|
||||
final ZkNodeProps zkProps = new ZkNodeProps(props);
|
||||
|
||||
|
||||
|
||||
|
||||
List<Op> ops = new ArrayList<>(2);
|
||||
String path = "/collections";
|
||||
ops.add(Op.create(path, null, chRootClient.getZkACLProvider().getACLsToAdd(path), CreateMode.PERSISTENT));
|
||||
|
@ -845,23 +845,23 @@ public class ZkTestServer {
|
|||
putConfig("conf1", chRootClient, solrhome, "old_synonyms.txt");
|
||||
putConfig("conf1", chRootClient, solrhome, "synonyms.txt");
|
||||
}
|
||||
|
||||
|
||||
public void makeSolrZkNode() throws Exception {
|
||||
rootClient.makePath("/solr", false, true);
|
||||
}
|
||||
|
||||
|
||||
public void tryCleanSolrZkNode() throws Exception {
|
||||
tryCleanPath("/solr");
|
||||
}
|
||||
|
||||
|
||||
void tryCleanPath(String path) throws Exception {
|
||||
if (rootClient.exists(path, true)) {
|
||||
rootClient.clean(path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
protected void printLayout() throws Exception {
|
||||
rootClient.printLayoutToStdOut();
|
||||
rootClient.printLayoutToStream(System.out);
|
||||
}
|
||||
|
||||
public SolrZkClient getZkClient() {
|
||||
|
|
Loading…
Reference in New Issue