Merge remote-tracking branch 'origin/master' into gradle-master

This commit is contained in:
Dawid Weiss 2019-12-03 18:45:12 +01:00
commit 7c26c6de02
52 changed files with 605 additions and 391 deletions

View File

@ -1,4 +1,6 @@
<!--
_(If you are a project committer then you may remove some/all of the following template.)_
Before creating a pull request, please file an issue in the ASF Jira system for Lucene or Solr:
* https://issues.apache.org/jira/projects/LUCENE

View File

@ -93,6 +93,8 @@ Improvements
that match a class of terms to pass a ByteRunAutomaton matching those that class
back to the visitor. (Alan Woodward, David Smiley)
* LUCENE-9073: IntervalQuery to respond field on toString() and explain() (Mikhail Khludnev)
Optimizations
* LUCENE-8928: When building a kd-tree for dimensions n > 2, compute exact bounds for an inner node every N splits
@ -249,7 +251,7 @@ Optimizations
non-competitive hits. (Adrien Grand)
* LUCENE-8935: BooleanQuery with no scoring clause can now early terminate the query when
the total hits is not requested.
the total hits is not requested. (Jim Ferenczi)
* LUCENE-8941: Matches on wildcard queries will defer building their full
disjunction until a MatchesIterator is pulled (Alan Woodward)

View File

@ -97,6 +97,9 @@ public final class IntervalQuery extends Query {
}
private IntervalQuery(String field, IntervalsSource intervalsSource, IntervalScoreFunction scoreFunction) {
Objects.requireNonNull(field, "null field aren't accepted");
Objects.requireNonNull(intervalsSource, "null intervalsSource aren't accepted");
Objects.requireNonNull(scoreFunction, "null scoreFunction aren't accepted");
this.field = field;
this.intervalsSource = intervalsSource;
this.scoreFunction = scoreFunction;
@ -111,7 +114,7 @@ public final class IntervalQuery extends Query {
@Override
public String toString(String field) {
return intervalsSource.toString();
return (!getField().equals(field) ? getField() + ":" : "") + intervalsSource.toString();
}
@Override
@ -158,7 +161,7 @@ public final class IntervalQuery extends Query {
int newDoc = scorer.iterator().advance(doc);
if (newDoc == doc) {
float freq = scorer.freq();
return scoreFunction.explain(intervalsSource.toString(), boost, freq);
return scoreFunction.explain(IntervalQuery.this.toString(), boost, freq);
}
}
return Explanation.noMatch("no matching intervals");

View File

@ -82,7 +82,7 @@ abstract class IntervalScoreFunction {
"Saturation function on interval frequency, computed as w * S / (S + k) from:",
Explanation.match(weight, "w, weight of this function"),
Explanation.match(pivot, "k, pivot feature value that would give a score contribution equal to w/2"),
Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval " + interval));
Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval query " + interval));
}
@Override
@ -136,7 +136,7 @@ abstract class IntervalScoreFunction {
Explanation.match(weight, "w, weight of this function"),
Explanation.match(pivot, "k, pivot feature value that would give a score contribution equal to w/2"),
Explanation.match(a, "a, exponent, higher values make the function grow slower before k and faster after k"),
Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval " + interval));
Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval query " + interval));
}
@Override

View File

@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.CheckHits;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
@ -126,6 +127,25 @@ public class TestIntervalQuery extends LuceneTestCase {
checkHits(q, new int[]{1, 3, 5});
}
public void testFieldInToString() throws IOException {
final IntervalQuery fieldW1 = new IntervalQuery(field, Intervals.term("w1"));
assertTrue(fieldW1.toString().contains(field));
final String field2 = field+"2";
final IntervalQuery f2w1 = new IntervalQuery(field2, Intervals.term("w1"));
assertTrue(f2w1.toString().contains(field2+":"));
assertFalse("suppress default field",f2w1.toString(field2).contains(field2));
final Explanation explain = searcher.explain(new IntervalQuery(field,
Intervals.ordered(Intervals.term("w1"), Intervals.term("w2"))), searcher.search(fieldW1, 1).scoreDocs[0].doc);
assertTrue(explain.toString().contains(field));
}
public void testNullConstructorArgs() throws IOException {
expectThrows(NullPointerException.class, ()-> new IntervalQuery(null, Intervals.term("z")));
expectThrows(NullPointerException.class, ()-> new IntervalQuery("field", null));
}
public void testNotWithinQuery() throws IOException {
Query q = new IntervalQuery(field, Intervals.notWithin(Intervals.term("w1"), 1, Intervals.term("w2")));
checkHits(q, new int[]{ 1, 2, 3 });

View File

@ -15,13 +15,7 @@
* limitations under the License.
*/
// Policy file to prevent tests from writing outside the test sandbox directory
// (must be given as a sysprop: tests.sandbox.dir)
// This policy also disallows stuff like listening on network ports of interfaces
// different than 127.0.0.1.
// PLEASE NOTE: You may need to enable other permissions when new tests are added,
// everything not allowed here is forbidden!
// Policy file for solr tests. Please keep minimal and avoid wildcards.
grant {
// permissions for file access, write access only to sandbox:
@ -45,27 +39,94 @@ grant {
// Basic permissions needed for Lucene to work:
permission java.util.PropertyPermission "*", "read,write";
permission java.lang.reflect.ReflectPermission "*";
permission java.lang.RuntimePermission "*";
// needed by gson serialization of junit4 runner: TODO clean that up
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
permission java.lang.RuntimePermission "accessDeclaredMembers";
// needed by junit4 runner to capture sysout/syserr:
permission java.lang.RuntimePermission "setIO";
// needed by randomized runner to catch failures from other threads:
permission java.lang.RuntimePermission "setDefaultUncaughtExceptionHandler";
// needed by randomized runner getTopThreadGroup:
permission java.lang.RuntimePermission "modifyThreadGroup";
// needed by tests e.g. shutting down executors:
permission java.lang.RuntimePermission "modifyThread";
// needed for tons of test hacks etc
permission java.lang.RuntimePermission "getStackTrace";
// needed for mock filesystems in tests
permission java.lang.RuntimePermission "fileSystemProvider";
// needed for test of IOUtils.spins (maybe it can be avoided)
permission java.lang.RuntimePermission "getFileStoreAttributes";
// analyzers/uima: needed by lucene expressions' JavascriptCompiler
permission java.lang.RuntimePermission "createClassLoader";
// needed to test unmap hack on platforms that support it
permission java.lang.RuntimePermission "accessClassInPackage.sun.misc";
// needed by jacoco to dump coverage
permission java.lang.RuntimePermission "shutdownHooks";
// needed by org.apache.logging.log4j
permission java.lang.RuntimePermission "getenv.*";
permission java.lang.RuntimePermission "getClassLoader";
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "getStackWalkerWithClassReference";
// needed by bytebuddy
permission java.lang.RuntimePermission "defineClass";
// needed by mockito
permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect";
permission java.lang.RuntimePermission "reflectionFactoryAccess";
// needed by SolrResourceLoader
permission java.lang.RuntimePermission "closeClassLoader";
// needed by HttpSolrClient
permission java.lang.RuntimePermission "getFileSystemAttributes";
// needed by hadoop auth (TODO: there is a cleaner way to handle this)
permission java.lang.RuntimePermission "loadLibrary.jaas";
// needed by hadoop hdfs
permission java.lang.RuntimePermission "readFileDescriptor";
permission java.lang.RuntimePermission "writeFileDescriptor";
// needed by hadoop http
permission java.lang.RuntimePermission "getProtectionDomain";
// These two *have* to be spelled out a separate
permission java.lang.management.ManagementPermission "control";
permission java.lang.management.ManagementPermission "monitor";
// Solr needs those:
permission java.net.NetPermission "*";
permission java.sql.SQLPermission "*";
// needed by hadoop htrace
permission java.net.NetPermission "getNetworkInformation";
// needed by DIH
permission java.sql.SQLPermission "deregisterDriver";
permission java.util.logging.LoggingPermission "control";
permission javax.management.MBeanPermission "*", "*";
permission javax.management.MBeanServerPermission "*";
permission javax.management.MBeanTrustPermission "*";
permission javax.security.auth.AuthPermission "*";
// needed by solr mbeans feature/tests
// TODO: can we remove wildcard for class names/members?
permission javax.management.MBeanPermission "*", "getAttribute";
permission javax.management.MBeanPermission "*", "getMBeanInfo";
permission javax.management.MBeanPermission "*", "queryMBeans";
permission javax.management.MBeanPermission "*", "queryNames";
permission javax.management.MBeanPermission "*", "registerMBean";
permission javax.management.MBeanPermission "*", "unregisterMBean";
permission javax.management.MBeanServerPermission "createMBeanServer";
permission javax.management.MBeanServerPermission "findMBeanServer";
permission javax.management.MBeanServerPermission "releaseMBeanServer";
permission javax.management.MBeanTrustPermission "register";
// needed by hadoop auth
permission javax.security.auth.AuthPermission "getSubject";
permission javax.security.auth.AuthPermission "modifyPrincipals";
permission javax.security.auth.AuthPermission "doAs";
permission javax.security.auth.AuthPermission "getLoginConfiguration";
permission javax.security.auth.AuthPermission "setLoginConfiguration";
permission javax.security.auth.AuthPermission "modifyPrivateCredentials";
permission javax.security.auth.PrivateCredentialPermission "org.apache.hadoop.security.Credentials * \"*\"", "read";
permission java.security.SecurityPermission "*";
// needed by hadoop security
permission java.security.SecurityPermission "putProviderProperty.SaslPlainServer";
permission java.security.SecurityPermission "insertProvider";
permission javax.xml.bind.JAXBPermission "setDatatypeConverter";
// SSL related properties for Solr tests
permission javax.net.ssl.SSLPermission "*";
permission javax.net.ssl.SSLPermission "setDefaultSSLContext";
// SASL/Kerberos related properties for Solr tests
permission javax.security.auth.PrivateCredentialPermission "javax.security.auth.kerberos.KerberosTicket * \"*\"", "read";

View File

@ -110,6 +110,10 @@ Upgrade Notes
* SOLR-13817: Deprecate legacy SolrCache implementations. Users are encouraged to transition their
configurations to use org.apache.solr.search.CaffeineCache instead. (ab)
* SOLR-13982: Some security-related http headers such as Content-Security-Policy are now set. If you have custom html served
up by Solr's http server that contains inline javascript, it will no longer execute in modern browsers. You can fix your JS
code to not run inline anymore, or edit etc/jetty.xml and weaken the CSP, or remove/alter the headers with a reverse proxy. (rmuir)
New Features
---------------------
* SOLR-13821: A Package store to store and load package artifacts (noble, Ishan Chattopadhyaya)
@ -215,6 +219,10 @@ Other Changes
* SOLR-12193: Move some log messages to TRACE level (gezapeti, janhoy)
* SOLR-13885: Typos in the documentation. (KoenDG via Cassandra Targett)
* SOLR-13992: Code refactored to have collection name, slice name in Replica, Slice (noble)
================== 8.3.1 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release.

View File

@ -24,6 +24,7 @@ import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.analytics.function.ReductionCollectionManager;
import org.apache.solr.analytics.value.constant.ConstantValue;
import org.apache.solr.schema.IndexSchema;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
@ -55,6 +56,11 @@ public class ExpressionFactoryTest extends SolrTestCaseJ4 {
indexSchema = h.getCore().getLatestSchema();
}
@AfterClass
public static void cleanUp() throws Exception {
indexSchema = null;
}
private ExpressionFactory getExpressionFactory() {
ExpressionFactory fact = new ExpressionFactory(indexSchema);
fact.startRequest();

View File

@ -228,6 +228,9 @@ public class AbstractAnalyticsFieldTest extends SolrTestCaseJ4 {
ref.decref();
ref = null;
}
indexSchema = null;
searcher = null;
ref = null;
}
protected <T> void checkSingleFieldValues(Map<String,T> expected, Map<String,T> found, Set<String> missing) {

View File

@ -296,7 +296,7 @@ class ExclusiveSliceProperty {
if (newSlice != null) {
replica = newSlice.getReplica(replicaName);
} else {
newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy());
newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy(), origSlice.collection);
changedSlices.put(origSlice.getName(), newSlice);
replica = newSlice.getReplica(replicaName);
}

View File

@ -20,6 +20,7 @@ package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.solr.client.solrj.cloud.ShardTerms;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.CoreDescriptor;
import org.apache.solr.core.SolrCore;
@ -44,7 +45,7 @@ public class RecoveringCoreTermWatcher implements ZkShardTerms.CoreTermWatcher {
}
@Override
public boolean onTermChanged(ZkShardTerms.Terms terms) {
public boolean onTermChanged(ShardTerms terms) {
if (coreContainer.isShutDown()) return false;
try (SolrCore solrCore = coreContainer.getCore(coreDescriptor.getName())) {

View File

@ -18,15 +18,14 @@
package org.apache.solr.cloud;
import java.lang.invoke.MethodHandles;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.solr.client.solrj.cloud.ShardTerms;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkStateReader;
@ -74,14 +73,12 @@ public class ZkShardTerms implements AutoCloseable{
private final Set<CoreTermWatcher> listeners = new HashSet<>();
private final AtomicBoolean isClosed = new AtomicBoolean(false);
private static final String RECOVERING_TERM_SUFFIX = "_recovering";
private Terms terms;
private ShardTerms terms;
// Listener of a core for shard's term change events
interface CoreTermWatcher {
// return true if the listener wanna to be triggered in the next time
boolean onTermChanged(Terms terms);
boolean onTermChanged(ShardTerms terms);
}
public ZkShardTerms(String collection, String shard, SolrZkClient zkClient) {
@ -103,12 +100,15 @@ public class ZkShardTerms implements AutoCloseable{
public void ensureTermsIsHigher(String leader, Set<String> replicasNeedingRecovery) {
if (replicasNeedingRecovery.isEmpty()) return;
Terms newTerms;
ShardTerms newTerms;
while( (newTerms = terms.increaseTerms(leader, replicasNeedingRecovery)) != null) {
if (forceSaveTerms(newTerms)) return;
}
}
public ShardTerms getShardTerms() {
return terms;
}
/**
* Can this replica become leader?
* @param coreNodeName of the replica
@ -148,7 +148,7 @@ public class ZkShardTerms implements AutoCloseable{
// package private for testing, only used by tests
Map<String, Long> getTerms() {
synchronized (writingLock) {
return new HashMap<>(terms.values);
return terms.getTerms();
}
}
@ -178,7 +178,7 @@ public class ZkShardTerms implements AutoCloseable{
// package private for testing, only used by tests
// return true if this object should not be reused
boolean removeTerm(String coreNodeName) {
Terms newTerms;
ShardTerms newTerms;
while ( (newTerms = terms.removeTerm(coreNodeName)) != null) {
try {
if (saveTerms(newTerms)) return false;
@ -195,7 +195,7 @@ public class ZkShardTerms implements AutoCloseable{
* @param coreNodeName of the replica
*/
void registerTerm(String coreNodeName) {
Terms newTerms;
ShardTerms newTerms;
while ( (newTerms = terms.registerTerm(coreNodeName)) != null) {
if (forceSaveTerms(newTerms)) break;
}
@ -207,14 +207,14 @@ public class ZkShardTerms implements AutoCloseable{
* @param coreNodeName of the replica
*/
public void setTermEqualsToLeader(String coreNodeName) {
Terms newTerms;
ShardTerms newTerms;
while ( (newTerms = terms.setTermEqualsToLeader(coreNodeName)) != null) {
if (forceSaveTerms(newTerms)) break;
}
}
public void setTermToZero(String coreNodeName) {
Terms newTerms;
ShardTerms newTerms;
while ( (newTerms = terms.setTermToZero(coreNodeName)) != null) {
if (forceSaveTerms(newTerms)) break;
}
@ -224,7 +224,7 @@ public class ZkShardTerms implements AutoCloseable{
* Mark {@code coreNodeName} as recovering
*/
public void startRecovering(String coreNodeName) {
Terms newTerms;
ShardTerms newTerms;
while ( (newTerms = terms.startRecovering(coreNodeName)) != null) {
if (forceSaveTerms(newTerms)) break;
}
@ -234,27 +234,22 @@ public class ZkShardTerms implements AutoCloseable{
* Mark {@code coreNodeName} as finished recovering
*/
public void doneRecovering(String coreNodeName) {
Terms newTerms;
ShardTerms newTerms;
while ( (newTerms = terms.doneRecovering(coreNodeName)) != null) {
if (forceSaveTerms(newTerms)) break;
}
}
public boolean isRecovering(String name) {
return terms.values.containsKey(recoveringTerm(name));
return terms.isRecovering(name);
}
public static String recoveringTerm(String coreNodeName) {
return coreNodeName + RECOVERING_TERM_SUFFIX;
}
/**
* When first updates come in, all replicas have some data now,
* so we must switch from term 0 (registered) to 1 (have some data)
*/
public void ensureHighestTermsAreNotZero() {
Terms newTerms;
ShardTerms newTerms;
while ( (newTerms = terms.ensureHighestTermsAreNotZero()) != null) {
if (forceSaveTerms(newTerms)) break;
}
@ -282,7 +277,7 @@ public class ZkShardTerms implements AutoCloseable{
* @param newTerms to be set
* @return true if terms is saved successfully to ZK, false if otherwise
*/
private boolean forceSaveTerms(Terms newTerms) {
private boolean forceSaveTerms(ShardTerms newTerms) {
try {
return saveTerms(newTerms);
} catch (KeeperException.NoNodeException e) {
@ -297,11 +292,11 @@ public class ZkShardTerms implements AutoCloseable{
* @return true if terms is saved successfully to ZK, false if otherwise
* @throws KeeperException.NoNodeException correspond ZK term node is not created
*/
private boolean saveTerms(Terms newTerms) throws KeeperException.NoNodeException {
byte[] znodeData = Utils.toJSON(newTerms.values);
private boolean saveTerms(ShardTerms newTerms) throws KeeperException.NoNodeException {
byte[] znodeData = Utils.toJSON(newTerms);
try {
Stat stat = zkClient.setData(znodePath, znodeData, newTerms.version, true);
setNewTerms(new Terms(newTerms.values, stat.getVersion()));
Stat stat = zkClient.setData(znodePath, znodeData, newTerms.getVersion(), true);
setNewTerms(new ShardTerms(newTerms, stat.getVersion()));
log.info("Successful update of terms at {} to {}", znodePath, newTerms);
return true;
} catch (KeeperException.BadVersionException e) {
@ -344,11 +339,11 @@ public class ZkShardTerms implements AutoCloseable{
* Fetch latest terms from ZK
*/
public void refreshTerms() {
Terms newTerms;
ShardTerms newTerms;
try {
Stat stat = new Stat();
byte[] data = zkClient.getData(znodePath, null, stat, true);
newTerms = new Terms((Map<String, Long>) Utils.fromJSON(data), stat.getVersion());
newTerms = new ShardTerms((Map<String, Long>) Utils.fromJSON(data), stat.getVersion());
} catch (KeeperException e) {
Thread.interrupted();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error updating shard term for collection: " + collection, e);
@ -411,10 +406,10 @@ public class ZkShardTerms implements AutoCloseable{
* Atomically update {@link ZkShardTerms#terms} and call listeners
* @param newTerms to be set
*/
private void setNewTerms(Terms newTerms) {
private void setNewTerms(ShardTerms newTerms) {
boolean isChanged = false;
synchronized (writingLock) {
if (terms == null || newTerms.version > terms.version) {
if (terms == null || newTerms.getVersion() > terms.getVersion()) {
terms = newTerms;
isChanged = true;
}
@ -422,211 +417,9 @@ public class ZkShardTerms implements AutoCloseable{
if (isChanged) onTermUpdates(newTerms);
}
private void onTermUpdates(Terms newTerms) {
private void onTermUpdates(ShardTerms newTerms) {
synchronized (listeners) {
listeners.removeIf(coreTermWatcher -> !coreTermWatcher.onTermChanged(newTerms));
}
}
/**
* Hold values of terms, this class is immutable
*/
static class Terms {
private final Map<String, Long> values;
private final long maxTerm;
// ZK node version
private final int version;
public Terms () {
this(new HashMap<>(), 0);
}
public Terms(Map<String, Long> values, int version) {
this.values = values;
this.version = version;
if (values.isEmpty()) this.maxTerm = 0;
else this.maxTerm = Collections.max(values.values());
}
/**
* Can {@code coreNodeName} become leader?
* @param coreNodeName of the replica
* @return true if {@code coreNodeName} can become leader, false if otherwise
*/
boolean canBecomeLeader(String coreNodeName) {
return haveHighestTermValue(coreNodeName) && !values.containsKey(recoveringTerm(coreNodeName));
}
/**
* Is {@code coreNodeName}'s term highest?
* @param coreNodeName of the replica
* @return true if term of {@code coreNodeName} is highest
*/
boolean haveHighestTermValue(String coreNodeName) {
if (values.isEmpty()) return true;
long maxTerm = Collections.max(values.values());
return values.getOrDefault(coreNodeName, 0L) == maxTerm;
}
Long getTerm(String coreNodeName) {
return values.get(coreNodeName);
}
/**
* Return a new {@link Terms} in which term of {@code leader} is higher than {@code replicasNeedingRecovery}
* @param leader coreNodeName of leader
* @param replicasNeedingRecovery set of replicas in which their terms should be lower than leader's term
* @return null if term of {@code leader} is already higher than {@code replicasNeedingRecovery}
*/
Terms increaseTerms(String leader, Set<String> replicasNeedingRecovery) {
if (!values.containsKey(leader)) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Can not find leader's term " + leader);
}
boolean changed = false;
boolean foundReplicasInLowerTerms = false;
HashMap<String, Long> newValues = new HashMap<>(values);
long leaderTerm = newValues.get(leader);
for (Map.Entry<String, Long> entry : newValues.entrySet()) {
String key = entry.getKey();
if (replicasNeedingRecovery.contains(key)) foundReplicasInLowerTerms = true;
if (Objects.equals(entry.getValue(), leaderTerm)) {
if(skipIncreaseTermOf(key, replicasNeedingRecovery)) {
changed = true;
} else {
newValues.put(key, leaderTerm+1);
}
}
}
// We should skip the optimization if there are no replicasNeedingRecovery present in local terms,
// this may indicate that the current value is stale
if (!changed && foundReplicasInLowerTerms) return null;
return new Terms(newValues, version);
}
private boolean skipIncreaseTermOf(String key, Set<String> replicasNeedingRecovery) {
if (key.endsWith(RECOVERING_TERM_SUFFIX)) {
key = key.substring(0, key.length() - RECOVERING_TERM_SUFFIX.length());
}
return replicasNeedingRecovery.contains(key);
}
/**
* Return a new {@link Terms} in which highest terms are not zero
* @return null if highest terms are already larger than zero
*/
Terms ensureHighestTermsAreNotZero() {
if (maxTerm > 0) return null;
else {
HashMap<String, Long> newValues = new HashMap<>(values);
for (String replica : values.keySet()) {
newValues.put(replica, 1L);
}
return new Terms(newValues, version);
}
}
/**
* Return a new {@link Terms} in which terms for the {@code coreNodeName} are removed
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already not exist
*/
Terms removeTerm(String coreNodeName) {
if (!values.containsKey(recoveringTerm(coreNodeName)) && !values.containsKey(coreNodeName)) {
return null;
}
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.remove(coreNodeName);
newValues.remove(recoveringTerm(coreNodeName));
return new Terms(newValues, version);
}
/**
* Return a new {@link Terms} in which the associate term of {@code coreNodeName} is not null
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already exist
*/
Terms registerTerm(String coreNodeName) {
if (values.containsKey(coreNodeName)) return null;
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.put(coreNodeName, 0L);
return new Terms(newValues, version);
}
Terms setTermToZero(String coreNodeName) {
if (values.getOrDefault(coreNodeName, -1L) == 0) {
return null;
}
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.put(coreNodeName, 0L);
return new Terms(newValues, version);
}
/**
* Return a new {@link Terms} in which the term of {@code coreNodeName} is max
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already maximum
*/
Terms setTermEqualsToLeader(String coreNodeName) {
long maxTerm = getMaxTerm();
if (values.get(coreNodeName) == maxTerm) return null;
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.put(coreNodeName, maxTerm);
newValues.remove(recoveringTerm(coreNodeName));
return new Terms(newValues, version);
}
long getMaxTerm() {
return maxTerm;
}
/**
* Mark {@code coreNodeName} as recovering
* @param coreNodeName of the replica
* @return null if {@code coreNodeName} is already marked as doing recovering
*/
Terms startRecovering(String coreNodeName) {
long maxTerm = getMaxTerm();
if (values.get(coreNodeName) == maxTerm)
return null;
HashMap<String, Long> newValues = new HashMap<>(values);
if (!newValues.containsKey(recoveringTerm(coreNodeName))) {
long currentTerm = newValues.getOrDefault(coreNodeName, 0L);
// by keeping old term, we will have more information in leader election
newValues.put(recoveringTerm(coreNodeName), currentTerm);
}
newValues.put(coreNodeName, maxTerm);
return new Terms(newValues, version);
}
/**
* Mark {@code coreNodeName} as finished recovering
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already finished doing recovering
*/
Terms doneRecovering(String coreNodeName) {
if (!values.containsKey(recoveringTerm(coreNodeName))) {
return null;
}
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.remove(recoveringTerm(coreNodeName));
return new Terms(newValues, version);
}
@Override
public String toString() {
return "Terms{" +
"values=" + values +
", version=" + version +
'}';
}
}
}

View File

@ -80,9 +80,9 @@ import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF;
import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH;
import static org.apache.solr.common.params.CollectionAdminParams.ALIAS;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;

View File

@ -18,20 +18,6 @@
package org.apache.solr.cloud.api.collections;
import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
import static org.apache.solr.common.params.CommonParams.NAME;
import java.lang.invoke.MethodHandles;
import java.net.URI;
import java.util.ArrayList;
@ -78,6 +64,20 @@ import org.apache.solr.handler.component.ShardHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT;
import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE;
import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR;
import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE;
import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP;
import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
import static org.apache.solr.common.params.CommonParams.NAME;
public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@ -199,7 +199,7 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd {
Map<String, Slice> newSlices = new LinkedHashMap<>(backupSlices.size());
for (Slice backupSlice : backupSlices) {
newSlices.put(backupSlice.getName(),
new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties()));
new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties(),restoreCollectionName));
}
propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, newSlices);
}

View File

@ -2386,7 +2386,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
props.put(ZkStateReader.CORE_NAME_PROP, ri.getCore());
props.put(ZkStateReader.REPLICA_TYPE, ri.getType().toString());
props.put(ZkStateReader.STATE_PROP, ri.getState().toString());
Replica r = new Replica(ri.getName(), props);
Replica r = new Replica(ri.getName(), props, ri.getCollection(), ri.getShard());
collMap.computeIfAbsent(ri.getCollection(), c -> new HashMap<>())
.computeIfAbsent(ri.getShard(), s -> new HashMap<>())
.put(ri.getName(), r);
@ -2410,7 +2410,7 @@ public class SimClusterStateProvider implements ClusterStateProvider {
Map<String, Slice> slices = new HashMap<>();
shards.forEach((s, replicas) -> {
Map<String, Object> sliceProps = sliceProperties.computeIfAbsent(coll, c -> new ConcurrentHashMap<>()).computeIfAbsent(s, sl -> new ConcurrentHashMap<>());
Slice slice = new Slice(s, replicas, sliceProps);
Slice slice = new Slice(s, replicas, sliceProps, coll);
slices.put(s, slice);
});
Map<String, Object> collProps = collProperties.computeIfAbsent(coll, c -> new ConcurrentHashMap<>());

View File

@ -68,7 +68,7 @@ public class ClusterStateMutator {
Map<String, Slice> slices;
if (messageShardsObj instanceof Map) { // we are being explicitly told the slice data (e.g. coll restore)
slices = Slice.loadAllFromMap((Map<String, Object>)messageShardsObj);
slices = Slice.loadAllFromMap(cName, (Map<String, Object>)messageShardsObj);
} else {
List<String> shardNames = new ArrayList<>();
@ -89,7 +89,7 @@ public class ClusterStateMutator {
Map<String, Object> sliceProps = new LinkedHashMap<>(1);
sliceProps.put(Slice.RANGE, ranges == null ? null : ranges.get(i));
slices.put(sliceName, new Slice(sliceName, null, sliceProps));
slices.put(sliceName, new Slice(sliceName, null, sliceProps,cName));
}
}

View File

@ -77,7 +77,7 @@ public class CollectionMutator {
if (shardParentNode != null) {
sliceProps.put("shard_parent_node", shardParentNode);
}
collection = updateSlice(collectionName, collection, new Slice(shardId, replicas, sliceProps));
collection = updateSlice(collectionName, collection, new Slice(shardId, replicas, sliceProps, collectionName));
return new ZkWriteCommand(collectionName, collection);
} else {
log.error("Unable to create Shard: " + shardId + " because it already exists in collection: " + collectionName);

View File

@ -65,13 +65,13 @@ public class NodeMutator {
log.debug("Update replica state for " + replica + " to " + Replica.State.DOWN.toString());
Map<String, Object> props = replica.shallowCopy();
props.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString());
Replica newReplica = new Replica(replica.getName(), props);
Replica newReplica = new Replica(replica.getName(), props, collection, slice.getName());
newReplicas.put(replica.getName(), newReplica);
needToUpdateCollection = true;
}
}
Slice newSlice = new Slice(slice.getName(), newReplicas, slice.shallowCopy());
Slice newSlice = new Slice(slice.getName(), newReplicas, slice.shallowCopy(),collection);
slicesCopy.put(slice.getName(), newSlice);
}

View File

@ -72,7 +72,7 @@ public class ReplicaMutator {
Map<String, Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
replicaProps.put(key, value);
return new Replica(replica.getName(), replicaProps);
return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice());
}
protected Replica unsetProperty(Replica replica, String key) {
@ -81,7 +81,7 @@ public class ReplicaMutator {
if (!replica.containsKey(key)) return replica;
Map<String, Object> replicaProps = new LinkedHashMap<>(replica.getProperties());
replicaProps.remove(key);
return new Replica(replica.getName(), replicaProps);
return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice());
}
protected Replica setLeader(Replica replica) {
@ -144,10 +144,11 @@ public class ReplicaMutator {
}
log.info("Setting property {} with value {} for collection {}", property, propVal, collectionName);
log.debug("Full message: {}", message);
if (StringUtils.equalsIgnoreCase(replica.getStr(property), propVal)) return ZkStateWriter.NO_OP; // already the value we're going to set
if (StringUtils.equalsIgnoreCase(replica.getStr(property), propVal))
return ZkStateWriter.NO_OP; // already the value we're going to set
// OK, there's no way we won't change the cluster state now
Map<String,Replica> replicas = collection.getSlice(sliceName).getReplicasCopy();
Map<String, Replica> replicas = collection.getSlice(sliceName).getReplicasCopy();
if (isUnique == false) {
replicas.get(replicaName).getProperties().put(property, propVal);
} else { // Set prop for this replica, but remove it for all others.
@ -159,7 +160,7 @@ public class ReplicaMutator {
}
}
}
Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy());
Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy(),collectionName);
DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection,
newSlice);
return new ZkWriteCommand(collectionName, newCollection);
@ -326,8 +327,8 @@ public class ReplicaMutator {
String shardParent = (String) replicaProps.remove(ZkStateReader.SHARD_PARENT_PROP);
Replica replica = new Replica(coreNodeName, replicaProps);
Replica replica = new Replica(coreNodeName, replicaProps, collectionName, sliceName);
log.debug("Will update state for replica: {}", replica);
Map<String, Object> sliceProps = null;
@ -347,7 +348,7 @@ public class ReplicaMutator {
sliceProps.put(Slice.PARENT, shardParent);
}
replicas.put(replica.getName(), replica);
slice = new Slice(sliceName, replicas, sliceProps);
slice = new Slice(sliceName, replicas, sliceProps, collectionName);
DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection, slice);
log.debug("Collection is now: {}", newCollection);
@ -424,10 +425,10 @@ public class ReplicaMutator {
String parentSliceName = (String) sliceProps.remove(Slice.PARENT);
// now lets see if the parent leader is still the same or else there's a chance of data loss
// see SOLR-9438 for details
String shardParentZkSession = (String) sliceProps.remove("shard_parent_zk_session");
String shardParentZkSession = (String) sliceProps.remove("shard_parent_zk_session");
String shardParentNode = (String) sliceProps.remove("shard_parent_node");
boolean isLeaderSame = true;
if (shardParentNode != null && shardParentZkSession != null) {
if (shardParentNode != null && shardParentZkSession != null) {
log.info("Checking whether sub-shard leader node is still the same one at {} with ZK session id {}", shardParentNode, shardParentZkSession);
try {
VersionedData leaderZnode = null;
@ -437,7 +438,7 @@ public class ReplicaMutator {
} catch (NoSuchElementException e) {
// ignore
}
if (leaderZnode == null) {
if (leaderZnode == null) {
log.error("The shard leader node: {} is not live anymore!", shardParentNode);
isLeaderSame = false;
} else if (!shardParentZkSession.equals(leaderZnode.getOwner())) {
@ -471,7 +472,7 @@ public class ReplicaMutator {
log.info("TIMINGS Sub-shard " + subShardSlice.getName() + " not available: " + subShardSlice);
}
}
} else {
} else {
// we must mark the shard split as failed by switching sub-shards to recovery_failed state
propMap.put(sliceName, Slice.State.RECOVERY_FAILED.toString());
for (Slice subShardSlice : subShardSlices) {

View File

@ -80,7 +80,7 @@ public class SliceMutator {
ZkStateReader.BASE_URL_PROP, message.getStr(ZkStateReader.BASE_URL_PROP),
ZkStateReader.STATE_PROP, message.getStr(ZkStateReader.STATE_PROP),
ZkStateReader.NODE_NAME_PROP, message.getStr(ZkStateReader.NODE_NAME_PROP),
ZkStateReader.REPLICA_TYPE, message.get(ZkStateReader.REPLICA_TYPE)));
ZkStateReader.REPLICA_TYPE, message.get(ZkStateReader.REPLICA_TYPE)), coll, slice);
return new ZkWriteCommand(coll, updateReplica(collection, sl, replica.getName(), replica));
}
@ -103,7 +103,7 @@ public class SliceMutator {
if (replica != null && (baseUrl == null || baseUrl.equals(replica.getBaseUrl()))) {
Map<String, Replica> newReplicas = slice.getReplicasCopy();
newReplicas.remove(cnn);
slice = new Slice(slice.getName(), newReplicas, slice.getProperties());
slice = new Slice(slice.getName(), newReplicas, slice.getProperties(),collection);
}
newSlices.put(slice.getName(), slice);
}
@ -150,7 +150,7 @@ public class SliceMutator {
Map<String, Object> newSliceProps = slice.shallowCopy();
newSliceProps.put(Slice.REPLICAS, newReplicas);
slice = new Slice(slice.getName(), newReplicas, slice.getProperties());
slice = new Slice(slice.getName(), newReplicas, slice.getProperties(), collectionName);
return new ZkWriteCommand(collectionName, CollectionMutator.updateSlice(collectionName, coll, slice));
}
@ -180,7 +180,7 @@ public class SliceMutator {
props.put(ZkStateReader.STATE_PROP, message.getStr(key));
// we need to use epoch time so that it's comparable across Overseer restarts
props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props,collectionName);
slicesCopy.put(slice.getName(), newSlice);
}
@ -224,7 +224,7 @@ public class SliceMutator {
Map<String, Object> props = slice.shallowCopy();
props.put("routingRules", routingRules);
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props,collectionName);
return new ZkWriteCommand(collectionName,
CollectionMutator.updateSlice(collectionName, collection, newSlice));
}
@ -249,7 +249,7 @@ public class SliceMutator {
routingRules.remove(routeKeyStr); // no rules left
Map<String, Object> props = slice.shallowCopy();
props.put("routingRules", routingRules);
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props);
Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props,collectionName);
return new ZkWriteCommand(collectionName,
CollectionMutator.updateSlice(collectionName, collection, newSlice));
}
@ -264,7 +264,7 @@ public class SliceMutator {
} else {
replicasCopy.put(replica.getName(), replica);
}
Slice newSlice = new Slice(slice.getName(), replicasCopy, slice.getProperties());
Slice newSlice = new Slice(slice.getName(), replicasCopy, slice.getProperties(), collection.getName());
log.debug("Old Slice: {}", slice);
log.debug("New Slice: {}", newSlice);
return CollectionMutator.updateSlice(collection.getName(), collection, newSlice);

View File

@ -112,6 +112,8 @@ public class ClusterStateMockUtil {
collectionProps.put(ZkStateReader.REPLICATION_FACTOR, Integer.toString(replicationFactor));
Map<String,DocCollection> collectionStates = new HashMap<>();
DocCollection docCollection = null;
String collName = null;
String sliceName = null;
Slice slice = null;
int replicaCount = 1;
@ -121,12 +123,13 @@ public class ClusterStateMockUtil {
switch (m.group(1)) {
case "c":
slices = new HashMap<>();
docCollection = new DocCollection("collection" + (collectionStates.size() + 1), slices, collectionProps, null);
docCollection = new DocCollection(collName = "collection" + (collectionStates.size() + 1), slices, collectionProps, null);
collectionStates.put(docCollection.getName(), docCollection);
break;
case "s":
replicas = new HashMap<>();
slice = new Slice("slice" + (slices.size() + 1), replicas, null);
if(collName == null) collName = "collection" + (collectionStates.size() + 1);
slice = new Slice(sliceName = "slice" + (slices.size() + 1), replicas, null, collName);
slices.put(slice.getName(), slice);
break;
case "r":
@ -168,8 +171,9 @@ public class ClusterStateMockUtil {
replicaPropMap.put(ZkStateReader.NODE_NAME_PROP, nodeName);
replicaPropMap.put(ZkStateReader.BASE_URL_PROP, "http://baseUrl" + node);
replicaPropMap.put(ZkStateReader.STATE_PROP, state.toString());
replica = new Replica(replicaName, replicaPropMap);
if(collName == null) collName = "collection" + (collectionStates.size() + 1);
if(sliceName == null) collName = "slice" + (slices.size() + 1);
replica = new Replica(replicaName, replicaPropMap, collName, sliceName);
replicas.put(replica.getName(), replica);
break;

View File

@ -25,8 +25,8 @@ import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.util.Utils;
import org.junit.Test;
@ -45,11 +45,11 @@ public class ClusterStateTest extends SolrTestCaseJ4 {
props.put("prop1", "value");
props.put("prop2", "value2");
Replica replica = new Replica("node1", props);
Replica replica = new Replica("node1", props, "collection1", "shard1");
sliceToProps.put("node1", replica);
Slice slice = new Slice("shard1", sliceToProps, null);
Slice slice = new Slice("shard1", sliceToProps, null, "collection1");
slices.put("shard1", slice);
Slice slice2 = new Slice("shard2", sliceToProps, null);
Slice slice2 = new Slice("shard2", sliceToProps, null, "collection1");
slices.put("shard2", slice2);
collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));
collectionStates.put("collection2", new DocCollection("collection2", slices, null, DocRouter.DEFAULT));

View File

@ -309,12 +309,12 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 {
String slice = replica.getStr(ZkStateReader.SHARD_ID_PROP);
if (!slices.containsKey(slice)) slices.put(slice, new HashMap<>());
String replicaName = replica.getStr(ZkStateReader.CORE_NAME_PROP);
slices.get(slice).put(replicaName, new Replica(replicaName, replica.getProperties()));
slices.get(slice).put(replicaName, new Replica(replicaName, replica.getProperties(), docCollection.getName(), slice));
}
Map<String, Slice> slicesMap = new HashMap<>();
for (Map.Entry<String, Map<String, Replica>> entry : slices.entrySet()) {
slicesMap.put(entry.getKey(), new Slice(entry.getKey(), entry.getValue(), null));
slicesMap.put(entry.getKey(), new Slice(entry.getKey(), entry.getValue(), null,docCollection.getName()));
}
return docCollection.copyWithSlices(slicesMap);

View File

@ -16,6 +16,11 @@
*/
package org.apache.solr.cloud;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
@ -25,11 +30,6 @@ import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.util.Utils;
import org.junit.Test;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
public class SliceStateTest extends SolrTestCaseJ4 {
@Test
@ -42,9 +42,9 @@ public class SliceStateTest extends SolrTestCaseJ4 {
Map<String, Replica> sliceToProps = new HashMap<>();
Map<String, Object> props = new HashMap<>();
Replica replica = new Replica("node1", props);
Replica replica = new Replica("node1", props, "collection1", "shard1");
sliceToProps.put("node1", replica);
Slice slice = new Slice("shard1", sliceToProps, null);
Slice slice = new Slice("shard1", sliceToProps, null, "collection1");
assertSame("Default state not set to active", Slice.State.ACTIVE, slice.getState());
slices.put("shard1", slice);
collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));

View File

@ -273,7 +273,7 @@ public class TestHashPartitioner extends SolrTestCaseJ4 {
Map<String,Slice> slices = new HashMap<>();
for (int i=0; i<ranges.size(); i++) {
Range range = ranges.get(i);
Slice slice = new Slice("shard"+(i+1), null, map("range",range));
Slice slice = new Slice("shard"+(i+1), null, map("range",range), "collections1");
slices.put(slice.getName(), slice);
}

View File

@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.cloud.ShardTerms;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.common.util.TimeSource;
import org.apache.solr.util.TimeOut;
@ -267,7 +268,7 @@ public class ZkShardTermsTest extends SolrCloudTestCase {
public void testEnsureTermsIsHigher() {
Map<String, Long> map = new HashMap<>();
map.put("leader", 0L);
ZkShardTerms.Terms terms = new ZkShardTerms.Terms(map, 0);
ShardTerms terms = new ShardTerms(map, 0);
terms = terms.increaseTerms("leader", Collections.singleton("replica"));
assertEquals(1L, terms.getTerm("leader").longValue());
}

View File

@ -16,13 +16,6 @@
*/
package org.apache.solr.cloud.api.collections;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
@ -53,6 +46,13 @@ import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class AssignTest extends SolrTestCaseJ4 {
@Override
@ -147,8 +147,8 @@ public class AssignTest extends SolrTestCaseJ4 {
// TODO: fix this to be independent of ZK
ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
Map<String, Slice> slices = new HashMap<>();
slices.put("shard1", new Slice("shard1", new HashMap<>(), null));
slices.put("shard2", new Slice("shard2", new HashMap<>(), null));
slices.put("shard1", new Slice("shard1", new HashMap<>(), null,"collection1"));
slices.put("shard2", new Slice("shard2", new HashMap<>(), null,"collection1"));
DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));

View File

@ -151,7 +151,7 @@ public class CoreSorterTest extends SolrTestCaseJ4 {
}
public Replica getReplica(String node) {
return new Replica(replicaName, Utils.makeMap("core", replicaName, "node_name", node));
return new Replica(replicaName, Utils.makeMap("core", replicaName, "node_name", node), cd.getCollectionName(), cd.getShardId());
}
public boolean equals(String coll, String slice) {

View File

@ -82,13 +82,52 @@
</New>
<!-- =========================================================== -->
<!-- RewriteHandle to redirect root to Solr -->
<!-- RewriteHandle to set headers, redirect root to Solr -->
<!-- =========================================================== -->
<New id="RewriteHandler" class="org.eclipse.jetty.rewrite.handler.RewriteHandler">
<Set name="rewriteRequestURI">true</Set>
<Set name="rewritePathInfo">false</Set>
<Set name="originalPathAttribute">requestedPath</Set>
<!-- security-related headers -->
<Call name="addRule">
<Arg>
<New class="org.eclipse.jetty.rewrite.handler.HeaderPatternRule">
<Set name="pattern">*</Set>
<Set name="name">Content-Security-Policy</Set>
<Set name="value">default-src 'none'; base-uri 'none'; form-action 'self'; frame-ancestors 'none'; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; img-src 'self'; media-src 'self'; font-src 'self'; connect-src 'self';</Set>
</New>
</Arg>
</Call>
<Call name="addRule">
<Arg>
<New class="org.eclipse.jetty.rewrite.handler.HeaderPatternRule">
<Set name="pattern">*</Set>
<Set name="name">X-Content-Type-Options</Set>
<Set name="value">nosniff</Set>
</New>
</Arg>
</Call>
<Call name="addRule">
<Arg>
<New class="org.eclipse.jetty.rewrite.handler.HeaderPatternRule">
<Set name="pattern">*</Set>
<Set name="name">X-Frame-Options</Set>
<Set name="value">SAMEORIGIN</Set>
</New>
</Arg>
</Call>
<Call name="addRule">
<Arg>
<New class="org.eclipse.jetty.rewrite.handler.HeaderPatternRule">
<Set name="pattern">*</Set>
<Set name="name">X-XSS-Protection</Set>
<Set name="value">1; mode=block</Set>
</New>
</Arg>
</Call>
<!-- redirect root to solr -->
<Call name="addRule">
<Arg>
<New class="org.eclipse.jetty.rewrite.handler.RedirectRegexRule">

View File

@ -124,7 +124,7 @@ RAUP first reads TRA configuration from the alias properties when it is initiali
within the target collection.
* If it belongs in the current collection (which is usually the case if processing events as they occur), the document
passes through to DUP. DUP does it's normal collection-level processing that may involve routing the document
passes through to DUP. DUP does its normal collection-level processing that may involve routing the document
to another shard & replica.
* If the timestamp on the document is more recent than the most recent TRA segment, then a new collection needs to be

View File

@ -178,7 +178,7 @@ You can refer <<solr-tutorial.adoc#exercise-1,Solr Tutorial>> for an extensive w
If you want to configure an external ZooKeeper ensemble to avoid using the embedded single-instance ZooKeeper that runs in the same JVM as the Solr node, you need to make few tweaks in the above listed steps as follows.
* When creating the security group, instead of opening port `9983` for ZooKeeper, you'll open `2181` (or whatever port you are using for ZooKeeper: it's default is 2181).
* When creating the security group, instead of opening port `9983` for ZooKeeper, you'll open `2181` (or whatever port you are using for ZooKeeper: its default is 2181).
* When configuring the number of instances to launch, choose to open 3 instances instead of 2.
* When modifying the `/etc/hosts` on each machine, add a third line for the 3rd instance and give it a recognizable name:
+

View File

@ -276,7 +276,7 @@ fq=quantity_in_stock:[5 TO *]
fq={!frange l=10 u=100}mul(popularity,price)
fq={!frange cost=200 l=0}pow(mul(sum(1, query('tag:smartphone')), div(1,avg_rating)), 2.3)
These are the same filters run w/o caching. The simple range query on the `quantity_in_stock` field will be run in parallel with the main query like a traditional lucene filter, while the 2 `frange` filters will only be checked against each document has already matched the main query and the `quantity_in_stock` range query -- first the simpler `mul(popularity,price)` will be checked (because of it's implicit `cost=100`) and only if it matches will the final very complex filter (with it's higher `cost=200`) be checked.
These are the same filters run w/o caching. The simple range query on the `quantity_in_stock` field will be run in parallel with the main query like a traditional lucene filter, while the 2 `frange` filters will only be checked against each document has already matched the main query and the `quantity_in_stock` range query -- first the simpler `mul(popularity,price)` will be checked (because of its implicit `cost=100`) and only if it matches will the final very complex filter (with its higher `cost=200`) be checked.
[source,text]
q=some keywords

View File

@ -170,7 +170,7 @@ The "alternate" fallback options are more primitive.
<<The Original Highlighter,Original Highlighter>>:: (`hl.method=original`, the default)
+
The Original Highlighter, sometimes called the "Standard Highlighter" or "Default Highlighter", is Lucene's original highlighter a venerable option with a high degree of customization options.
It's query accuracy is good enough for most needs, although it's not quite as good/perfect as the Unified Highlighter.
Its query accuracy is good enough for most needs, although it's not quite as good/perfect as the Unified Highlighter.
+
The Original Highlighter will normally analyze stored text on the fly in order to highlight. It will use full term vectors if available.
+

View File

@ -909,7 +909,7 @@ ____
The `relatedness(...)` function is used to "score" these relationships, relative to "Foreground" and "Background" sets of documents, specified in the function params as queries.
Unlike most aggregation functions, the `relatedness(...)` function is aware of whether and how it's used in <<nested-facets,Nested Facets>>. It evaluates the query defining the current bucket _independently_ from it's parent/ancestor buckets, and intersects those documents with a "Foreground Set" defined by the foreground query _combined with the ancestor buckets_. The result is then compared to a similar intersection done against the "Background Set" (defined exclusively by background query) to see if there is a positive, or negative, correlation between the current bucket and the Foreground Set, relative to the Background Set.
Unlike most aggregation functions, the `relatedness(...)` function is aware of whether and how it's used in <<nested-facets,Nested Facets>>. It evaluates the query defining the current bucket _independently_ from its parent/ancestor buckets, and intersects those documents with a "Foreground Set" defined by the foreground query _combined with the ancestor buckets_. The result is then compared to a similar intersection done against the "Background Set" (defined exclusively by background query) to see if there is a positive, or negative, correlation between the current bucket and the Foreground Set, relative to the Background Set.
NOTE: While it's very common to define the Background Set as `\*:*`, or some other super-set of the Foreground Query, it is not strictly required. The `relatedness(...)` function can be used to compare the statistical relatedness of sets of documents to orthogonal foreground/background queries.

View File

@ -235,7 +235,7 @@ Example:
A `graph` domain change option works similarly to the `join` domain option, but can do traversal multiple hops `from` the existing domain `to` other documents.
This works very similar to the <<other-parsers.adoc#graph-query-parser,Graph Query Parser>>, supporting all of it's optional parameters, and has the same limitations when dealing with multi-shard collections.
This works very similar to the <<other-parsers.adoc#graph-query-parser,Graph Query Parser>>, supporting all of its optional parameters, and has the same limitations when dealing with multi-shard collections.
Example:
[source,json]

View File

@ -73,7 +73,7 @@ Several changes have been made to configsets that ship with Solr; not only their
* The `data_driven_configset` and `basic_configset` have been removed, and replaced by the `_default` configset. The `sample_techproducts_configset` also remains, and is designed for use with the example documents shipped with Solr in the `example/exampledocs` directory.
* When creating a new collection, if you do not specify a configset, the `_default` will be used.
** If you use SolrCloud, the `_default` configset will be automatically uploaded to ZooKeeper.
** If you use standalone mode, the instanceDir will be created automatically, using the `_default` configset as it's basis.
** If you use standalone mode, the instanceDir will be created automatically, using the `_default` configset as its basis.
=== Schemaless Improvements

View File

@ -92,7 +92,7 @@ When building the Guide, the `solr-root-path` attribute will be automatically se
In order for editors (such as ATOM) to be able to offer "live preview" of the `*.adoc` files using these includes, the `solr-root-path` attribute must also be set as a document level attribute in each file, with the correct relative path.
For example, `using-solrj.adoc` sets `solr-root-path` in it's header, along with an `example-source-dir` attribute (that depends on `solr-root-path`) in order to reduce redundancy in the many `include::` directives it specifies...
For example, `using-solrj.adoc` sets `solr-root-path` in its header, along with an `example-source-dir` attribute (that depends on `solr-root-path`) in order to reduce redundancy in the many `include::` directives it specifies...
[source]
--

View File

@ -390,7 +390,7 @@ The graph is built according to linkages between documents based on the terms fo
Supported field types are point fields with docValues enabled, or string fields with `indexed=true` or `docValues=true`.
TIP: For string fields which are `indexed=false` and `docValues=true`, please refer to the javadocs for {lucene-javadocs}sandbox/org/apache/lucene/search/DocValuesTermsQuery.html[`DocValuesTermsQuery`]
for it's performance characteristics so `indexed=true` will perform better for most use-cases.
for its performance characteristics so `indexed=true` will perform better for most use-cases.
=== Graph Query Parameters

View File

@ -46,7 +46,7 @@ These issues are not a problem for most users. However, some use cases would per
Solr accomplishes this by allowing you to set the replica type when creating a new collection or when adding a replica. The available types are:
* *NRT*: This is the default. A NRT replica (NRT = NearRealTime) maintains a transaction log and writes new documents to it's indexes locally. Any replica of this type is eligible to become a leader. Traditionally, this was the only type supported by Solr.
* *NRT*: This is the default. A NRT replica (NRT = NearRealTime) maintains a transaction log and writes new documents to its indexes locally. Any replica of this type is eligible to become a leader. Traditionally, this was the only type supported by Solr.
* *TLOG*: This type of replica maintains a transaction log but does not index document changes locally. This type helps speed up indexing since no commits need to occur in the replicas. When this type of replica needs to update its index, it does so by replicating the index from the leader. This type of replica is also eligible to become a shard leader; it would do so by first processing its transaction log. If it does become a leader, it will behave the same as if it was a NRT type of replica.
* *PULL*: This type of replica does not maintain a transaction log nor index document changes locally. It only replicates the index from the shard leader. It is not eligible to become a shard leader and doesn't participate in shard leader election at all.

View File

@ -415,7 +415,7 @@ The format, either `ints2D` (default) or `png`.
[TIP]
====
You'll experiment with different `distErrPct` values (probably 0.10 - 0.20) with various input geometries till the default size is what you're looking for. The specific details of how it's computed isn't important. For high-detail grids used in point-plotting (loosely one cell per pixel), set `distErr` to be the number of decimal-degrees of several pixels or so of the map being displayed. Also, you probably don't want to use a geohash-based grid because the cell orientation between grid levels flip-flops between being square and rectangle. Quad is consistent and has more levels, albeit at the expense of a larger index.
You'll experiment with different `distErrPct` values (probably 0.10 - 0.20) with various input geometries till the default size is what you're looking for. The specific details of how it's computed aren't important. For high-detail grids used in point-plotting (loosely one cell per pixel), set `distErr` to be the number of decimal-degrees of several pixels or so of the map being displayed. Also, you probably don't want to use a geohash-based grid because the cell orientation between grid levels flip-flops between being square and rectangle. Quad is consistent and has more levels, albeit at the expense of a larger index.
====
Here's some sample output in JSON (with "..." inserted for brevity):

View File

@ -925,7 +925,7 @@ merge(
== null
The null expression is a useful utility function for understanding bottlenecks when performing parallel relational algebra (joins, intersections, rollups etc.). The null function reads all the tuples from an underlying stream and returns a single tuple with the count and processing time. Because the null stream adds minimal overhead of it's own, it can be used to isolate the performance of Solr's /export handler. If the /export handlers performance is not the bottleneck, then the bottleneck is likely occurring in the workers where the stream decorators are running.
The null expression is a useful utility function for understanding bottlenecks when performing parallel relational algebra (joins, intersections, rollups etc.). The null function reads all the tuples from an underlying stream and returns a single tuple with the count and processing time. Because the null stream adds minimal overhead of its own, it can be used to isolate the performance of Solr's /export handler. If the /export handlers performance is not the bottleneck, then the bottleneck is likely occurring in the workers where the stream decorators are running.
The null expression can be wrapped by the parallel function and sent to worker nodes. In this scenario each worker will return one tuple with the count of tuples processed on the worker and the timing information for that worker. This gives valuable information such as:
@ -1080,7 +1080,7 @@ plist(tuple(a=search(collection1, q="*:*", fl="id, prod_ss", sort="id asc")),
== priority
The `priority` function is a simple priority scheduler for the <<executor>> function. The `executor` function doesn't directly have a concept of task prioritization; instead it simply executes tasks in the order that they are read from it's underlying stream. The `priority` function provides the ability to schedule a higher priority task ahead of lower priority tasks that were submitted earlier.
The `priority` function is a simple priority scheduler for the <<executor>> function. The `executor` function doesn't directly have a concept of task prioritization; instead it simply executes tasks in the order that they are read from its underlying stream. The `priority` function provides the ability to schedule a higher priority task ahead of lower priority tasks that were submitted earlier.
The `priority` function wraps two <<stream-source-reference.adoc#topic,topics>> that are both emitting tuples that contain streaming expressions to execute. The first topic is considered the higher priority task queue.

View File

@ -26,7 +26,7 @@ If you are unfamiliar with JMX, you may find the following overview useful: htt
JMX support is configured by defining a metrics reporter, as described in the section the section <<metrics-reporting.adoc#jmx-reporter,JMX Reporter>>.
If you have an existing MBean server running in Solr's JVM, or if you start Solr with the system property `-Dcom.sun.management.jmxremote`, Solr will automatically identify it's location on startup even if you have not defined a reporter explicitly in `solr.xml`. You can also define the location of the MBean server with parameters defined in the reporter definition.
If you have an existing MBean server running in Solr's JVM, or if you start Solr with the system property `-Dcom.sun.management.jmxremote`, Solr will automatically identify its location on startup even if you have not defined a reporter explicitly in `solr.xml`. You can also define the location of the MBean server with parameters defined in the reporter definition.
== Configuring MBean Servers

View File

@ -0,0 +1,256 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.cloud;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import org.apache.solr.common.MapWriter;
import org.apache.solr.common.SolrException;
/**
* Hold values of terms, this class is immutable. Create a new instance for every mutation
*/
public class ShardTerms implements MapWriter {
private static final String RECOVERING_TERM_SUFFIX = "_recovering";
private final Map<String, Long> values;
private final long maxTerm;
// ZK node version
private final int version;
public ShardTerms () {
this(new HashMap<>(), 0);
}
public ShardTerms(ShardTerms newTerms, int version) {
this(newTerms.values, version);
}
@Override
public void writeMap(EntryWriter ew) throws IOException {
values.forEach(ew.getBiConsumer());
}
public ShardTerms(Map<String, Long> values, int version) {
this.values = values;
this.version = version;
if (values.isEmpty()) this.maxTerm = 0;
else this.maxTerm = Collections.max(values.values());
}
/**
* Can {@code coreNodeName} become leader?
* @param coreNodeName of the replica
* @return true if {@code coreNodeName} can become leader, false if otherwise
*/
public boolean canBecomeLeader(String coreNodeName) {
return haveHighestTermValue(coreNodeName) && !values.containsKey(recoveringTerm(coreNodeName));
}
/**
* Is {@code coreNodeName}'s term highest?
* @param coreNodeName of the replica
* @return true if term of {@code coreNodeName} is highest
*/
public boolean haveHighestTermValue(String coreNodeName) {
if (values.isEmpty()) return true;
long maxTerm = Collections.max(values.values());
return values.getOrDefault(coreNodeName, 0L) == maxTerm;
}
public Long getTerm(String coreNodeName) {
return values.get(coreNodeName);
}
/**
* Return a new {@link ShardTerms} in which term of {@code leader} is higher than {@code replicasNeedingRecovery}
* @param leader coreNodeName of leader
* @param replicasNeedingRecovery set of replicas in which their terms should be lower than leader's term
* @return null if term of {@code leader} is already higher than {@code replicasNeedingRecovery}
*/
public ShardTerms increaseTerms(String leader, Set<String> replicasNeedingRecovery) {
if (!values.containsKey(leader)) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Can not find leader's term " + leader);
}
boolean changed = false;
boolean foundReplicasInLowerTerms = false;
HashMap<String, Long> newValues = new HashMap<>(values);
long leaderTerm = newValues.get(leader);
for (Map.Entry<String, Long> entry : newValues.entrySet()) {
String key = entry.getKey();
if (replicasNeedingRecovery.contains(key)) foundReplicasInLowerTerms = true;
if (Objects.equals(entry.getValue(), leaderTerm)) {
if(skipIncreaseTermOf(key, replicasNeedingRecovery)) {
changed = true;
} else {
newValues.put(key, leaderTerm+1);
}
}
}
// We should skip the optimization if there are no replicasNeedingRecovery present in local terms,
// this may indicate that the current value is stale
if (!changed && foundReplicasInLowerTerms) return null;
return new ShardTerms(newValues, version);
}
private boolean skipIncreaseTermOf(String key, Set<String> replicasNeedingRecovery) {
if (key.endsWith(RECOVERING_TERM_SUFFIX)) {
key = key.substring(0, key.length() - RECOVERING_TERM_SUFFIX.length());
}
return replicasNeedingRecovery.contains(key);
}
/**
* Return a new {@link ShardTerms} in which highest terms are not zero
* @return null if highest terms are already larger than zero
*/
public ShardTerms ensureHighestTermsAreNotZero() {
if (maxTerm > 0) return null;
else {
HashMap<String, Long> newValues = new HashMap<>(values);
for (String replica : values.keySet()) {
newValues.put(replica, 1L);
}
return new ShardTerms(newValues, version);
}
}
/**
* Return a new {@link ShardTerms} in which terms for the {@code coreNodeName} are removed
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already not exist
*/
public ShardTerms removeTerm(String coreNodeName) {
if (!values.containsKey(recoveringTerm(coreNodeName)) && !values.containsKey(coreNodeName)) {
return null;
}
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.remove(coreNodeName);
newValues.remove(recoveringTerm(coreNodeName));
return new ShardTerms(newValues, version);
}
/**
* Return a new {@link ShardTerms} in which the associate term of {@code coreNodeName} is not null
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already exist
*/
public ShardTerms registerTerm(String coreNodeName) {
if (values.containsKey(coreNodeName)) return null;
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.put(coreNodeName, 0L);
return new ShardTerms(newValues, version);
}
public ShardTerms setTermToZero(String coreNodeName) {
if (values.getOrDefault(coreNodeName, -1L) == 0) {
return null;
}
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.put(coreNodeName, 0L);
return new ShardTerms(newValues, version);
}
/**
* Return a new {@link ShardTerms} in which the term of {@code coreNodeName} is max
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already maximum
*/
public ShardTerms setTermEqualsToLeader(String coreNodeName) {
long maxTerm = getMaxTerm();
if (values.get(coreNodeName) == maxTerm) return null;
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.put(coreNodeName, maxTerm);
newValues.remove(recoveringTerm(coreNodeName));
return new ShardTerms(newValues, version);
}
public long getMaxTerm() {
return maxTerm;
}
/**
* Mark {@code coreNodeName} as recovering
* @param coreNodeName of the replica
* @return null if {@code coreNodeName} is already marked as doing recovering
*/
public ShardTerms startRecovering(String coreNodeName) {
long maxTerm = getMaxTerm();
if (values.get(coreNodeName) == maxTerm)
return null;
HashMap<String, Long> newValues = new HashMap<>(values);
if (!newValues.containsKey(recoveringTerm(coreNodeName))) {
long currentTerm = newValues.getOrDefault(coreNodeName, 0L);
// by keeping old term, we will have more information in leader election
newValues.put(recoveringTerm(coreNodeName), currentTerm);
}
newValues.put(coreNodeName, maxTerm);
return new ShardTerms(newValues, version);
}
/**
* Mark {@code coreNodeName} as finished recovering
* @param coreNodeName of the replica
* @return null if term of {@code coreNodeName} is already finished doing recovering
*/
public ShardTerms doneRecovering(String coreNodeName) {
if (!values.containsKey(recoveringTerm(coreNodeName))) {
return null;
}
HashMap<String, Long> newValues = new HashMap<>(values);
newValues.remove(recoveringTerm(coreNodeName));
return new ShardTerms(newValues, version);
}
public static String recoveringTerm(String coreNodeName) {
return coreNodeName + RECOVERING_TERM_SUFFIX;
}
@Override
public String toString() {
return "Terms{" +
"values=" + values +
", version=" + version +
'}';
}
public int getVersion() {
return version;
}
public Map<String , Long> getTerms() {
return new HashMap<>(this.values);
}
public boolean isRecovering(String name) {
return values.containsKey(recoveringTerm(name));
}
}

View File

@ -31,7 +31,6 @@ import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.util.Utils;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.Watcher;
import org.noggit.JSONWriter;
/**
@ -104,7 +103,6 @@ public class ClusterState implements JSONWriter.Writable {
* Implementation note: This method resolves the collection reference by calling
* {@link CollectionRef#get()} which can make a call to ZooKeeper. This is necessary
* because the semantics of how collection list is loaded have changed in SOLR-6629.
* Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)}
*/
public boolean hasCollection(String collectionName) {
return getCollectionOrNull(collectionName) != null;
@ -141,7 +139,6 @@ public class ClusterState implements JSONWriter.Writable {
* Implementation note: This method resolves the collection reference by calling
* {@link CollectionRef#get()} which may make a call to ZooKeeper. This is necessary
* because the semantics of how collection list is loaded have changed in SOLR-6629.
* Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)}
*/
public DocCollection getCollectionOrNull(String collectionName, boolean allowCached) {
CollectionRef ref = collectionStates.get(collectionName);
@ -154,7 +151,6 @@ public class ClusterState implements JSONWriter.Writable {
* Implementation note: This method resolves the collection reference by calling
* {@link CollectionRef#get()} which can make a call to ZooKeeper. This is necessary
* because the semantics of how collection list is loaded have changed in SOLR-6629.
* Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)}
*
* @return a map of collection name vs DocCollection object
*/
@ -258,13 +254,13 @@ public class ClusterState implements JSONWriter.Writable {
Map<String,Object> props;
Map<String,Slice> slices;
Map<String,Object> sliceObjs = (Map<String,Object>)objs.get(DocCollection.SHARDS);
Map<String, Object> sliceObjs = (Map<String, Object>) objs.get(DocCollection.SHARDS);
if (sliceObjs == null) {
// legacy format from 4.0... there was no separate "shards" level to contain the collection shards.
slices = Slice.loadAllFromMap(objs);
slices = Slice.loadAllFromMap(name, objs);
props = Collections.emptyMap();
} else {
slices = Slice.loadAllFromMap(sliceObjs);
slices = Slice.loadAllFromMap(name, sliceObjs);
props = new HashMap<>(objs);
objs.remove(DocCollection.SHARDS);
}

View File

@ -110,9 +110,12 @@ public class Replica extends ZkNodeProps {
private final String nodeName;
private final State state;
private final Type type;
public final String slice, collection;
public Replica(String name, Map<String,Object> propMap) {
public Replica(String name, Map<String,Object> propMap, String collection, String slice) {
super(propMap);
this.collection = collection;
this.slice = slice;
this.name = name;
this.nodeName = (String) propMap.get(ZkStateReader.NODE_NAME_PROP);
if (propMap.get(ZkStateReader.STATE_PROP) != null) {
@ -124,6 +127,13 @@ public class Replica extends ZkNodeProps {
type = Type.get((String) propMap.get(ZkStateReader.REPLICA_TYPE));
}
public String getCollection(){
return collection;
}
public String getSlice(){
return slice;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;

View File

@ -37,9 +37,10 @@ import static org.apache.solr.common.util.Utils.toJSONString;
* A Slice contains immutable information about a logical shard (all replicas that share the same shard id).
*/
public class Slice extends ZkNodeProps implements Iterable<Replica> {
public final String collection;
/** Loads multiple slices into a Map from a generic Map that probably came from deserialized JSON. */
public static Map<String,Slice> loadAllFromMap(Map<String, Object> genericSlices) {
public static Map<String,Slice> loadAllFromMap(String collection, Map<String, Object> genericSlices) {
if (genericSlices == null) return Collections.emptyMap();
Map<String, Slice> result = new LinkedHashMap<>(genericSlices.size());
for (Map.Entry<String, Object> entry : genericSlices.entrySet()) {
@ -48,7 +49,7 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
if (val instanceof Slice) {
result.put(name, (Slice) val);
} else if (val instanceof Map) {
result.put(name, new Slice(name, null, (Map<String, Object>) val));
result.put(name, new Slice(name, null, (Map<String, Object>) val, collection));
}
}
return result;
@ -128,9 +129,10 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
* @param replicas The replicas of the slice. This is used directly and a copy is not made. If null, replicas will be constructed from props.
* @param props The properties of the slice - a shallow copy will always be made.
*/
public Slice(String name, Map<String,Replica> replicas, Map<String,Object> props) {
public Slice(String name, Map<String,Replica> replicas, Map<String,Object> props, String collection) {
super( props==null ? new LinkedHashMap<String,Object>(2) : new LinkedHashMap<>(props));
this.name = name;
this.collection = collection;
Object rangeObj = propMap.get(RANGE);
if (propMap.get(ZkStateReader.STATE_PROP) != null) {
@ -162,7 +164,7 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
replicationFactor = null; // future
// add the replicas *after* the other properties (for aesthetics, so it's easy to find slice properties in the JSON output)
this.replicas = replicas != null ? replicas : makeReplicas((Map<String,Object>)propMap.get(REPLICAS));
this.replicas = replicas != null ? replicas : makeReplicas(collection,name, (Map<String,Object>)propMap.get(REPLICAS));
propMap.put(REPLICAS, this.replicas);
Map<String, Object> rules = (Map<String, Object>) propMap.get("routingRules");
@ -186,7 +188,7 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
}
private Map<String,Replica> makeReplicas(Map<String,Object> genericReplicas) {
private Map<String,Replica> makeReplicas(String collection, String slice,Map<String,Object> genericReplicas) {
if (genericReplicas == null) return new HashMap<>(1);
Map<String,Replica> result = new LinkedHashMap<>(genericReplicas.size());
for (Map.Entry<String,Object> entry : genericReplicas.entrySet()) {
@ -196,7 +198,7 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
if (val instanceof Replica) {
r = (Replica)val;
} else {
r = new Replica(name, (Map<String,Object>)val);
r = new Replica(name, (Map<String,Object>)val, collection, slice);
}
result.put(name, r);
}
@ -213,6 +215,9 @@ public class Slice extends ZkNodeProps implements Iterable<Replica> {
return null;
}
public String getCollection() {
return collection;
}
/**
* Return slice name (shard id).
*/

View File

@ -944,6 +944,11 @@ public class ZkStateReader implements SolrCloseable {
return null;
}
public boolean isNodeLive(String node) {
return liveNodes.contains(node);
}
/**
* Get shard leader properties, with retry if none exist.
*/

View File

@ -18,16 +18,6 @@
package org.apache.solr.client.solrj.cloud.autoscaling;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.CLUSTER_PREFERENCES;
import static org.apache.solr.client.solrj.cloud.autoscaling.TestPolicy2.loadFromResource;
import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORES;
import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK;
import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.REPLICA;
import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_STATE;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
import java.io.IOException;
import java.io.StringWriter;
import java.lang.invoke.MethodHandles;
@ -45,6 +35,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import org.apache.solr.SolrTestCaseJ4;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrResponse;
@ -83,8 +75,15 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.CLUSTER_PREFERENCES;
import static org.apache.solr.client.solrj.cloud.autoscaling.TestPolicy2.loadFromResource;
import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORES;
import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK;
import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.REPLICA;
import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_STATE;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA;
public class TestPolicy extends SolrTestCaseJ4 {
boolean useNodeset ;
@ -2595,13 +2594,13 @@ public class TestPolicy extends SolrTestCaseJ4 {
if (node.equals("node1")) {
Map m = Utils.makeMap("newColl",
Utils.makeMap("shard1", Collections.singletonList(new ReplicaInfo("r1", "shard1",
new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1")),
new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1"), "newColl", "shard1"),
Utils.makeMap(FREEDISK.perReplicaValue, 200)))));
return m;
} else if (node.equals("node2")) {
Map m = Utils.makeMap("newColl",
Utils.makeMap("shard2", Collections.singletonList(new ReplicaInfo("r1", "shard2",
new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2")),
new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2"),"newColl", "shard2"),
Utils.makeMap(FREEDISK.perReplicaValue, 200)))));
return m;
}
@ -2624,9 +2623,9 @@ public class TestPolicy extends SolrTestCaseJ4 {
@Override
public Replica getLeader(String sliceName) {
if (sliceName.equals("shard1"))
return new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1"));
return new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1"), name, "shard1");
if (sliceName.equals("shard2"))
return new Replica("r2", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2"));
return new Replica("r2", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2"),name, "shard2");
return null;
}
};
@ -2643,20 +2642,20 @@ public class TestPolicy extends SolrTestCaseJ4 {
public void testMoveReplicaLeaderlast() {
List<Pair<ReplicaInfo, Row>> validReplicas = new ArrayList<>();
Replica replica = new Replica("r1", Utils.makeMap("leader", "true"));
ReplicaInfo replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>());
Replica replica = new Replica("r1", Utils.makeMap("leader", "true"), "c1", "s1");
ReplicaInfo replicaInfo = new ReplicaInfo(replica.collection, replica.slice ,replica, new HashMap<>());
validReplicas.add(new Pair<>(replicaInfo, null));
replicaInfo = new ReplicaInfo("r4", "c1_s2_r1", "c1", "s2", Replica.Type.NRT, "n1", Collections.singletonMap("leader", "true"));
validReplicas.add(new Pair<>(replicaInfo, null));
replica = new Replica("r2", Utils.makeMap("leader", false));
replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>());
replica = new Replica("r2", Utils.makeMap("leader", false),"c1","s1");
replicaInfo = new ReplicaInfo(replica.collection, replica.slice, replica, new HashMap<>());
validReplicas.add(new Pair<>(replicaInfo, null));
replica = new Replica("r3", Utils.makeMap("leader", false));
replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>());
replica = new Replica("r3", Utils.makeMap("leader", false),"c1","s1");
replicaInfo = new ReplicaInfo(replica.collection,replica.slice, replica, new HashMap<>());
validReplicas.add(new Pair<>(replicaInfo, null));

View File

@ -75,7 +75,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node4",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
)
),"collection1","shard1"
)
);
@ -125,7 +125,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "NRT"
)
),"collection1","shard1"
)
);
replicas.add(
@ -136,7 +136,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node2",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
)
),"collection1","shard1"
)
);
replicas.add(
@ -147,7 +147,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node3",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "PULL"
)
),"collection1","shard1"
)
);
return replicas;

View File

@ -23,7 +23,6 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.solr.SolrTestCase;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.params.ModifiableSolrParams;
@ -128,7 +127,7 @@ public class ReplicaListTransformerTest extends SolrTestCase {
final Map<String,Object> propMap = new HashMap<String,Object>();
propMap.put("base_url", url);
// a skeleton replica, good enough for this test's purposes
final Replica replica = new Replica(name, propMap);
final Replica replica = new Replica(name, propMap,"c1","s1");
inputs.add(replica);
if (url.matches(regex)) {

View File

@ -84,7 +84,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node4",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
)
), "c1","s1"
)
);
@ -122,7 +122,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node1",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "NRT"
)
),"c1","s1"
)
);
replicas.add(
@ -133,7 +133,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node2",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "TLOG"
)
),"c1","s1"
)
);
replicas.add(
@ -144,7 +144,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 {
ZkStateReader.NODE_NAME_PROP, "node3",
ZkStateReader.CORE_NAME_PROP, "collection1",
ZkStateReader.REPLICA_TYPE, "PULL"
)
),"c1","s1"
)
);
return replicas;

View File

@ -35,7 +35,7 @@ public class ShufflingReplicaListTransformerTest extends SolrTestCase {
public void testTransformReplicas() throws Exception {
final List<Replica> replicas = new ArrayList<>();
for (final String url : createRandomUrls()) {
replicas.add(new Replica(url, new HashMap<String,Object>()));
replicas.add(new Replica(url, new HashMap<String,Object>(),"c1","s1"));
}
implTestTransform(replicas);
}