From eb3a4757ffc2cffaee062187622c2ce5a394e996 Mon Sep 17 00:00:00 2001 From: Mikhail Khludnev Date: Mon, 2 Dec 2019 19:07:55 +0300 Subject: [PATCH 01/11] LUCENE-9073: IntervalQuery expose field on toString and explain --- lucene/CHANGES.txt | 2 ++ .../queries/intervals/IntervalQuery.java | 7 +++++-- .../intervals/IntervalScoreFunction.java | 4 ++-- .../queries/intervals/TestIntervalQuery.java | 20 +++++++++++++++++++ 4 files changed, 29 insertions(+), 4 deletions(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index cf1f2ff14d1..65f40987f8e 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -93,6 +93,8 @@ Improvements that match a class of terms to pass a ByteRunAutomaton matching those that class back to the visitor. (Alan Woodward, David Smiley) +* LUCENE-9073: IntervalQuery to respond field on toString() and explain() (Mikhail Khludnev) + Optimizations * LUCENE-8928: When building a kd-tree for dimensions n > 2, compute exact bounds for an inner node every N splits diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java index 4bbfd6838da..10bff723b17 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalQuery.java @@ -97,6 +97,9 @@ public final class IntervalQuery extends Query { } private IntervalQuery(String field, IntervalsSource intervalsSource, IntervalScoreFunction scoreFunction) { + Objects.requireNonNull(field, "null field aren't accepted"); + Objects.requireNonNull(intervalsSource, "null intervalsSource aren't accepted"); + Objects.requireNonNull(scoreFunction, "null scoreFunction aren't accepted"); this.field = field; this.intervalsSource = intervalsSource; this.scoreFunction = scoreFunction; @@ -111,7 +114,7 @@ public final class IntervalQuery extends Query { @Override public String toString(String field) { - return intervalsSource.toString(); + return (!getField().equals(field) ? getField() + ":" : "") + intervalsSource.toString(); } @Override @@ -158,7 +161,7 @@ public final class IntervalQuery extends Query { int newDoc = scorer.iterator().advance(doc); if (newDoc == doc) { float freq = scorer.freq(); - return scoreFunction.explain(intervalsSource.toString(), boost, freq); + return scoreFunction.explain(IntervalQuery.this.toString(), boost, freq); } } return Explanation.noMatch("no matching intervals"); diff --git a/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalScoreFunction.java b/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalScoreFunction.java index 9adc442be46..4dc0c747580 100644 --- a/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalScoreFunction.java +++ b/lucene/queries/src/java/org/apache/lucene/queries/intervals/IntervalScoreFunction.java @@ -82,7 +82,7 @@ abstract class IntervalScoreFunction { "Saturation function on interval frequency, computed as w * S / (S + k) from:", Explanation.match(weight, "w, weight of this function"), Explanation.match(pivot, "k, pivot feature value that would give a score contribution equal to w/2"), - Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval " + interval)); + Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval query " + interval)); } @Override @@ -136,7 +136,7 @@ abstract class IntervalScoreFunction { Explanation.match(weight, "w, weight of this function"), Explanation.match(pivot, "k, pivot feature value that would give a score contribution equal to w/2"), Explanation.match(a, "a, exponent, higher values make the function grow slower before k and faster after k"), - Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval " + interval)); + Explanation.match(sloppyFreq, "S, the sloppy frequency of the interval query " + interval)); } @Override diff --git a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervalQuery.java b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervalQuery.java index 0f18446ce38..097b4abf4a7 100644 --- a/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervalQuery.java +++ b/lucene/queries/src/test/org/apache/lucene/queries/intervals/TestIntervalQuery.java @@ -26,6 +26,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.RandomIndexWriter; import org.apache.lucene.search.BoostQuery; import org.apache.lucene.search.CheckHits; +import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.TopDocs; @@ -126,6 +127,25 @@ public class TestIntervalQuery extends LuceneTestCase { checkHits(q, new int[]{1, 3, 5}); } + public void testFieldInToString() throws IOException { + final IntervalQuery fieldW1 = new IntervalQuery(field, Intervals.term("w1")); + assertTrue(fieldW1.toString().contains(field)); + final String field2 = field+"2"; + final IntervalQuery f2w1 = new IntervalQuery(field2, Intervals.term("w1")); + assertTrue(f2w1.toString().contains(field2+":")); + assertFalse("suppress default field",f2w1.toString(field2).contains(field2)); + + final Explanation explain = searcher.explain(new IntervalQuery(field, + Intervals.ordered(Intervals.term("w1"), Intervals.term("w2"))), searcher.search(fieldW1, 1).scoreDocs[0].doc); + assertTrue(explain.toString().contains(field)); + } + + public void testNullConstructorArgs() throws IOException { + expectThrows(NullPointerException.class, ()-> new IntervalQuery(null, Intervals.term("z"))); + expectThrows(NullPointerException.class, ()-> new IntervalQuery("field", null)); + } + + public void testNotWithinQuery() throws IOException { Query q = new IntervalQuery(field, Intervals.notWithin(Intervals.term("w1"), 1, Intervals.term("w2"))); checkHits(q, new int[]{ 1, 2, 3 }); From de1c9fb9e80600de4e0606e448fe36d15475f5e3 Mon Sep 17 00:00:00 2001 From: Cassandra Targett Date: Mon, 2 Dec 2019 13:09:54 -0600 Subject: [PATCH 02/11] SOLR-13885: various Ref Guide typos. This closes #990 --- solr/CHANGES.txt | 2 ++ solr/solr-ref-guide/src/aliases.adoc | 2 +- solr/solr-ref-guide/src/aws-solrcloud-tutorial.adoc | 2 +- solr/solr-ref-guide/src/common-query-parameters.adoc | 2 +- solr/solr-ref-guide/src/highlighting.adoc | 2 +- solr/solr-ref-guide/src/json-facet-api.adoc | 2 +- solr/solr-ref-guide/src/json-faceting-domain-changes.adoc | 2 +- solr/solr-ref-guide/src/major-changes-in-solr-7.adoc | 2 +- solr/solr-ref-guide/src/meta-docs/asciidoc-syntax.adoc | 2 +- solr/solr-ref-guide/src/other-parsers.adoc | 2 +- .../src/shards-and-indexing-data-in-solrcloud.adoc | 2 +- solr/solr-ref-guide/src/spatial-search.adoc | 2 +- solr/solr-ref-guide/src/stream-decorator-reference.adoc | 4 ++-- solr/solr-ref-guide/src/using-jmx-with-solr.adoc | 2 +- 14 files changed, 16 insertions(+), 14 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 9ebcc575f81..f678546bf67 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -215,6 +215,8 @@ Other Changes * SOLR-12193: Move some log messages to TRACE level (gezapeti, janhoy) +* SOLR-13885: Typos in the documentation. (KoenDG via Cassandra Targett) + ================== 8.3.1 ================== Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release. diff --git a/solr/solr-ref-guide/src/aliases.adoc b/solr/solr-ref-guide/src/aliases.adoc index c595f2dfbb2..58615d87bdf 100644 --- a/solr/solr-ref-guide/src/aliases.adoc +++ b/solr/solr-ref-guide/src/aliases.adoc @@ -124,7 +124,7 @@ RAUP first reads TRA configuration from the alias properties when it is initiali within the target collection. * If it belongs in the current collection (which is usually the case if processing events as they occur), the document - passes through to DUP. DUP does it's normal collection-level processing that may involve routing the document + passes through to DUP. DUP does its normal collection-level processing that may involve routing the document to another shard & replica. * If the timestamp on the document is more recent than the most recent TRA segment, then a new collection needs to be diff --git a/solr/solr-ref-guide/src/aws-solrcloud-tutorial.adoc b/solr/solr-ref-guide/src/aws-solrcloud-tutorial.adoc index 82976206475..524243a718c 100644 --- a/solr/solr-ref-guide/src/aws-solrcloud-tutorial.adoc +++ b/solr/solr-ref-guide/src/aws-solrcloud-tutorial.adoc @@ -178,7 +178,7 @@ You can refer <> for an extensive w If you want to configure an external ZooKeeper ensemble to avoid using the embedded single-instance ZooKeeper that runs in the same JVM as the Solr node, you need to make few tweaks in the above listed steps as follows. -* When creating the security group, instead of opening port `9983` for ZooKeeper, you'll open `2181` (or whatever port you are using for ZooKeeper: it's default is 2181). +* When creating the security group, instead of opening port `9983` for ZooKeeper, you'll open `2181` (or whatever port you are using for ZooKeeper: its default is 2181). * When configuring the number of instances to launch, choose to open 3 instances instead of 2. * When modifying the `/etc/hosts` on each machine, add a third line for the 3rd instance and give it a recognizable name: + diff --git a/solr/solr-ref-guide/src/common-query-parameters.adoc b/solr/solr-ref-guide/src/common-query-parameters.adoc index 0486ff1a41a..671000699cf 100644 --- a/solr/solr-ref-guide/src/common-query-parameters.adoc +++ b/solr/solr-ref-guide/src/common-query-parameters.adoc @@ -276,7 +276,7 @@ fq=quantity_in_stock:[5 TO *] fq={!frange l=10 u=100}mul(popularity,price) fq={!frange cost=200 l=0}pow(mul(sum(1, query('tag:smartphone')), div(1,avg_rating)), 2.3) -These are the same filters run w/o caching. The simple range query on the `quantity_in_stock` field will be run in parallel with the main query like a traditional lucene filter, while the 2 `frange` filters will only be checked against each document has already matched the main query and the `quantity_in_stock` range query -- first the simpler `mul(popularity,price)` will be checked (because of it's implicit `cost=100`) and only if it matches will the final very complex filter (with it's higher `cost=200`) be checked. +These are the same filters run w/o caching. The simple range query on the `quantity_in_stock` field will be run in parallel with the main query like a traditional lucene filter, while the 2 `frange` filters will only be checked against each document has already matched the main query and the `quantity_in_stock` range query -- first the simpler `mul(popularity,price)` will be checked (because of its implicit `cost=100`) and only if it matches will the final very complex filter (with its higher `cost=200`) be checked. [source,text] q=some keywords diff --git a/solr/solr-ref-guide/src/highlighting.adoc b/solr/solr-ref-guide/src/highlighting.adoc index 7f9eef98d1e..8055b79d7de 100644 --- a/solr/solr-ref-guide/src/highlighting.adoc +++ b/solr/solr-ref-guide/src/highlighting.adoc @@ -170,7 +170,7 @@ The "alternate" fallback options are more primitive. <>:: (`hl.method=original`, the default) + The Original Highlighter, sometimes called the "Standard Highlighter" or "Default Highlighter", is Lucene's original highlighter – a venerable option with a high degree of customization options. -It's query accuracy is good enough for most needs, although it's not quite as good/perfect as the Unified Highlighter. +Its query accuracy is good enough for most needs, although it's not quite as good/perfect as the Unified Highlighter. + The Original Highlighter will normally analyze stored text on the fly in order to highlight. It will use full term vectors if available. + diff --git a/solr/solr-ref-guide/src/json-facet-api.adoc b/solr/solr-ref-guide/src/json-facet-api.adoc index bbe3ed20dd5..f395bf7e98e 100644 --- a/solr/solr-ref-guide/src/json-facet-api.adoc +++ b/solr/solr-ref-guide/src/json-facet-api.adoc @@ -909,7 +909,7 @@ ____ The `relatedness(...)` function is used to "score" these relationships, relative to "Foreground" and "Background" sets of documents, specified in the function params as queries. -Unlike most aggregation functions, the `relatedness(...)` function is aware of whether and how it's used in <>. It evaluates the query defining the current bucket _independently_ from it's parent/ancestor buckets, and intersects those documents with a "Foreground Set" defined by the foreground query _combined with the ancestor buckets_. The result is then compared to a similar intersection done against the "Background Set" (defined exclusively by background query) to see if there is a positive, or negative, correlation between the current bucket and the Foreground Set, relative to the Background Set. +Unlike most aggregation functions, the `relatedness(...)` function is aware of whether and how it's used in <>. It evaluates the query defining the current bucket _independently_ from its parent/ancestor buckets, and intersects those documents with a "Foreground Set" defined by the foreground query _combined with the ancestor buckets_. The result is then compared to a similar intersection done against the "Background Set" (defined exclusively by background query) to see if there is a positive, or negative, correlation between the current bucket and the Foreground Set, relative to the Background Set. NOTE: While it's very common to define the Background Set as `\*:*`, or some other super-set of the Foreground Query, it is not strictly required. The `relatedness(...)` function can be used to compare the statistical relatedness of sets of documents to orthogonal foreground/background queries. diff --git a/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc b/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc index 88f6d1cfbf8..437c9a22f37 100644 --- a/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc +++ b/solr/solr-ref-guide/src/json-faceting-domain-changes.adoc @@ -235,7 +235,7 @@ Example: A `graph` domain change option works similarly to the `join` domain option, but can do traversal multiple hops `from` the existing domain `to` other documents. -This works very similar to the <>, supporting all of it's optional parameters, and has the same limitations when dealing with multi-shard collections. +This works very similar to the <>, supporting all of its optional parameters, and has the same limitations when dealing with multi-shard collections. Example: [source,json] diff --git a/solr/solr-ref-guide/src/major-changes-in-solr-7.adoc b/solr/solr-ref-guide/src/major-changes-in-solr-7.adoc index 02523367c34..17d571cb801 100644 --- a/solr/solr-ref-guide/src/major-changes-in-solr-7.adoc +++ b/solr/solr-ref-guide/src/major-changes-in-solr-7.adoc @@ -73,7 +73,7 @@ Several changes have been made to configsets that ship with Solr; not only their * The `data_driven_configset` and `basic_configset` have been removed, and replaced by the `_default` configset. The `sample_techproducts_configset` also remains, and is designed for use with the example documents shipped with Solr in the `example/exampledocs` directory. * When creating a new collection, if you do not specify a configset, the `_default` will be used. ** If you use SolrCloud, the `_default` configset will be automatically uploaded to ZooKeeper. -** If you use standalone mode, the instanceDir will be created automatically, using the `_default` configset as it's basis. +** If you use standalone mode, the instanceDir will be created automatically, using the `_default` configset as its basis. === Schemaless Improvements diff --git a/solr/solr-ref-guide/src/meta-docs/asciidoc-syntax.adoc b/solr/solr-ref-guide/src/meta-docs/asciidoc-syntax.adoc index 776d8b2566c..3bacab644b1 100644 --- a/solr/solr-ref-guide/src/meta-docs/asciidoc-syntax.adoc +++ b/solr/solr-ref-guide/src/meta-docs/asciidoc-syntax.adoc @@ -92,7 +92,7 @@ When building the Guide, the `solr-root-path` attribute will be automatically se In order for editors (such as ATOM) to be able to offer "live preview" of the `*.adoc` files using these includes, the `solr-root-path` attribute must also be set as a document level attribute in each file, with the correct relative path. -For example, `using-solrj.adoc` sets `solr-root-path` in it's header, along with an `example-source-dir` attribute (that depends on `solr-root-path`) in order to reduce redundancy in the many `include::` directives it specifies... +For example, `using-solrj.adoc` sets `solr-root-path` in its header, along with an `example-source-dir` attribute (that depends on `solr-root-path`) in order to reduce redundancy in the many `include::` directives it specifies... [source] -- diff --git a/solr/solr-ref-guide/src/other-parsers.adoc b/solr/solr-ref-guide/src/other-parsers.adoc index ed5e0f2c9c2..f7a17eb1290 100644 --- a/solr/solr-ref-guide/src/other-parsers.adoc +++ b/solr/solr-ref-guide/src/other-parsers.adoc @@ -390,7 +390,7 @@ The graph is built according to linkages between documents based on the terms fo Supported field types are point fields with docValues enabled, or string fields with `indexed=true` or `docValues=true`. TIP: For string fields which are `indexed=false` and `docValues=true`, please refer to the javadocs for {lucene-javadocs}sandbox/org/apache/lucene/search/DocValuesTermsQuery.html[`DocValuesTermsQuery`] -for it's performance characteristics so `indexed=true` will perform better for most use-cases. +for its performance characteristics so `indexed=true` will perform better for most use-cases. === Graph Query Parameters diff --git a/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc b/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc index f8938725eb8..253f23002ba 100644 --- a/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc +++ b/solr/solr-ref-guide/src/shards-and-indexing-data-in-solrcloud.adoc @@ -46,7 +46,7 @@ These issues are not a problem for most users. However, some use cases would per Solr accomplishes this by allowing you to set the replica type when creating a new collection or when adding a replica. The available types are: -* *NRT*: This is the default. A NRT replica (NRT = NearRealTime) maintains a transaction log and writes new documents to it's indexes locally. Any replica of this type is eligible to become a leader. Traditionally, this was the only type supported by Solr. +* *NRT*: This is the default. A NRT replica (NRT = NearRealTime) maintains a transaction log and writes new documents to its indexes locally. Any replica of this type is eligible to become a leader. Traditionally, this was the only type supported by Solr. * *TLOG*: This type of replica maintains a transaction log but does not index document changes locally. This type helps speed up indexing since no commits need to occur in the replicas. When this type of replica needs to update its index, it does so by replicating the index from the leader. This type of replica is also eligible to become a shard leader; it would do so by first processing its transaction log. If it does become a leader, it will behave the same as if it was a NRT type of replica. * *PULL*: This type of replica does not maintain a transaction log nor index document changes locally. It only replicates the index from the shard leader. It is not eligible to become a shard leader and doesn't participate in shard leader election at all. diff --git a/solr/solr-ref-guide/src/spatial-search.adoc b/solr/solr-ref-guide/src/spatial-search.adoc index a514dc5e251..b01136379fa 100644 --- a/solr/solr-ref-guide/src/spatial-search.adoc +++ b/solr/solr-ref-guide/src/spatial-search.adoc @@ -415,7 +415,7 @@ The format, either `ints2D` (default) or `png`. [TIP] ==== -You'll experiment with different `distErrPct` values (probably 0.10 - 0.20) with various input geometries till the default size is what you're looking for. The specific details of how it's computed isn't important. For high-detail grids used in point-plotting (loosely one cell per pixel), set `distErr` to be the number of decimal-degrees of several pixels or so of the map being displayed. Also, you probably don't want to use a geohash-based grid because the cell orientation between grid levels flip-flops between being square and rectangle. Quad is consistent and has more levels, albeit at the expense of a larger index. +You'll experiment with different `distErrPct` values (probably 0.10 - 0.20) with various input geometries till the default size is what you're looking for. The specific details of how it's computed aren't important. For high-detail grids used in point-plotting (loosely one cell per pixel), set `distErr` to be the number of decimal-degrees of several pixels or so of the map being displayed. Also, you probably don't want to use a geohash-based grid because the cell orientation between grid levels flip-flops between being square and rectangle. Quad is consistent and has more levels, albeit at the expense of a larger index. ==== Here's some sample output in JSON (with "..." inserted for brevity): diff --git a/solr/solr-ref-guide/src/stream-decorator-reference.adoc b/solr/solr-ref-guide/src/stream-decorator-reference.adoc index b1d4b66032c..ab748846eb8 100644 --- a/solr/solr-ref-guide/src/stream-decorator-reference.adoc +++ b/solr/solr-ref-guide/src/stream-decorator-reference.adoc @@ -925,7 +925,7 @@ merge( == null -The null expression is a useful utility function for understanding bottlenecks when performing parallel relational algebra (joins, intersections, rollups etc.). The null function reads all the tuples from an underlying stream and returns a single tuple with the count and processing time. Because the null stream adds minimal overhead of it's own, it can be used to isolate the performance of Solr's /export handler. If the /export handlers performance is not the bottleneck, then the bottleneck is likely occurring in the workers where the stream decorators are running. +The null expression is a useful utility function for understanding bottlenecks when performing parallel relational algebra (joins, intersections, rollups etc.). The null function reads all the tuples from an underlying stream and returns a single tuple with the count and processing time. Because the null stream adds minimal overhead of its own, it can be used to isolate the performance of Solr's /export handler. If the /export handlers performance is not the bottleneck, then the bottleneck is likely occurring in the workers where the stream decorators are running. The null expression can be wrapped by the parallel function and sent to worker nodes. In this scenario each worker will return one tuple with the count of tuples processed on the worker and the timing information for that worker. This gives valuable information such as: @@ -1080,7 +1080,7 @@ plist(tuple(a=search(collection1, q="*:*", fl="id, prod_ss", sort="id asc")), == priority -The `priority` function is a simple priority scheduler for the <> function. The `executor` function doesn't directly have a concept of task prioritization; instead it simply executes tasks in the order that they are read from it's underlying stream. The `priority` function provides the ability to schedule a higher priority task ahead of lower priority tasks that were submitted earlier. +The `priority` function is a simple priority scheduler for the <> function. The `executor` function doesn't directly have a concept of task prioritization; instead it simply executes tasks in the order that they are read from its underlying stream. The `priority` function provides the ability to schedule a higher priority task ahead of lower priority tasks that were submitted earlier. The `priority` function wraps two <> that are both emitting tuples that contain streaming expressions to execute. The first topic is considered the higher priority task queue. diff --git a/solr/solr-ref-guide/src/using-jmx-with-solr.adoc b/solr/solr-ref-guide/src/using-jmx-with-solr.adoc index 47d0241aa0f..d65cbbf7b14 100644 --- a/solr/solr-ref-guide/src/using-jmx-with-solr.adoc +++ b/solr/solr-ref-guide/src/using-jmx-with-solr.adoc @@ -26,7 +26,7 @@ If you are unfamiliar with JMX, you may find the following overview useful: htt JMX support is configured by defining a metrics reporter, as described in the section the section <>. -If you have an existing MBean server running in Solr's JVM, or if you start Solr with the system property `-Dcom.sun.management.jmxremote`, Solr will automatically identify it's location on startup even if you have not defined a reporter explicitly in `solr.xml`. You can also define the location of the MBean server with parameters defined in the reporter definition. +If you have an existing MBean server running in Solr's JVM, or if you start Solr with the system property `-Dcom.sun.management.jmxremote`, Solr will automatically identify its location on startup even if you have not defined a reporter explicitly in `solr.xml`. You can also define the location of the MBean server with parameters defined in the reporter definition. == Configuring MBean Servers From aebf7f7a463329879123b6436dd711e62d3f6d37 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Mon, 2 Dec 2019 20:34:01 -0500 Subject: [PATCH 03/11] SOLR-13991: clean up permissions in solr-tests.policy AKA break all the tests to hell, please ping the issue for repeated test failures --- lucene/tools/junit4/solr-tests.policy | 97 ++++++++++++++++++++++----- 1 file changed, 79 insertions(+), 18 deletions(-) diff --git a/lucene/tools/junit4/solr-tests.policy b/lucene/tools/junit4/solr-tests.policy index 82ed0bfab16..8140ddb5c8c 100644 --- a/lucene/tools/junit4/solr-tests.policy +++ b/lucene/tools/junit4/solr-tests.policy @@ -15,13 +15,7 @@ * limitations under the License. */ -// Policy file to prevent tests from writing outside the test sandbox directory -// (must be given as a sysprop: tests.sandbox.dir) -// This policy also disallows stuff like listening on network ports of interfaces -// different than 127.0.0.1. - -// PLEASE NOTE: You may need to enable other permissions when new tests are added, -// everything not allowed here is forbidden! +// Policy file for solr tests. Please keep minimal and avoid wildcards. grant { // permissions for file access, write access only to sandbox: @@ -45,27 +39,94 @@ grant { // Basic permissions needed for Lucene to work: permission java.util.PropertyPermission "*", "read,write"; - permission java.lang.reflect.ReflectPermission "*"; - permission java.lang.RuntimePermission "*"; + + // needed by gson serialization of junit4 runner: TODO clean that up + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + // needed by junit4 runner to capture sysout/syserr: + permission java.lang.RuntimePermission "setIO"; + // needed by randomized runner to catch failures from other threads: + permission java.lang.RuntimePermission "setDefaultUncaughtExceptionHandler"; + // needed by randomized runner getTopThreadGroup: + permission java.lang.RuntimePermission "modifyThreadGroup"; + // needed by tests e.g. shutting down executors: + permission java.lang.RuntimePermission "modifyThread"; + // needed for tons of test hacks etc + permission java.lang.RuntimePermission "getStackTrace"; + // needed for mock filesystems in tests + permission java.lang.RuntimePermission "fileSystemProvider"; + // needed for test of IOUtils.spins (maybe it can be avoided) + permission java.lang.RuntimePermission "getFileStoreAttributes"; + // analyzers/uima: needed by lucene expressions' JavascriptCompiler + permission java.lang.RuntimePermission "createClassLoader"; + // needed to test unmap hack on platforms that support it + permission java.lang.RuntimePermission "accessClassInPackage.sun.misc"; + // needed by jacoco to dump coverage + permission java.lang.RuntimePermission "shutdownHooks"; + // needed by org.apache.logging.log4j + permission java.lang.RuntimePermission "getenv.*"; + permission java.lang.RuntimePermission "getClassLoader"; + permission java.lang.RuntimePermission "setContextClassLoader"; + permission java.lang.RuntimePermission "getStackWalkerWithClassReference"; + // needed by bytebuddy + permission java.lang.RuntimePermission "defineClass"; + // needed by mockito + permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect"; + permission java.lang.RuntimePermission "reflectionFactoryAccess"; + // needed by SolrResourceLoader + permission java.lang.RuntimePermission "closeClassLoader"; + // needed by HttpSolrClient + permission java.lang.RuntimePermission "getFileSystemAttributes"; + // needed by hadoop auth (TODO: there is a cleaner way to handle this) + permission java.lang.RuntimePermission "loadLibrary.jaas"; + // needed by hadoop hdfs + permission java.lang.RuntimePermission "readFileDescriptor"; + permission java.lang.RuntimePermission "writeFileDescriptor"; + // needed by hadoop http + permission java.lang.RuntimePermission "getProtectionDomain"; // These two *have* to be spelled out a separate permission java.lang.management.ManagementPermission "control"; permission java.lang.management.ManagementPermission "monitor"; - // Solr needs those: - permission java.net.NetPermission "*"; - permission java.sql.SQLPermission "*"; + // needed by hadoop htrace + permission java.net.NetPermission "getNetworkInformation"; + + // needed by DIH + permission java.sql.SQLPermission "deregisterDriver"; + permission java.util.logging.LoggingPermission "control"; - permission javax.management.MBeanPermission "*", "*"; - permission javax.management.MBeanServerPermission "*"; - permission javax.management.MBeanTrustPermission "*"; - permission javax.security.auth.AuthPermission "*"; + + // needed by solr mbeans feature/tests + // TODO: can we remove wildcard for class names/members? + permission javax.management.MBeanPermission "*", "getAttribute"; + permission javax.management.MBeanPermission "*", "getMBeanInfo"; + permission javax.management.MBeanPermission "*", "queryMBeans"; + permission javax.management.MBeanPermission "*", "queryNames"; + permission javax.management.MBeanPermission "*", "registerMBean"; + permission javax.management.MBeanPermission "*", "unregisterMBean"; + permission javax.management.MBeanServerPermission "createMBeanServer"; + permission javax.management.MBeanServerPermission "findMBeanServer"; + permission javax.management.MBeanServerPermission "releaseMBeanServer"; + permission javax.management.MBeanTrustPermission "register"; + + // needed by hadoop auth + permission javax.security.auth.AuthPermission "getSubject"; + permission javax.security.auth.AuthPermission "modifyPrincipals"; + permission javax.security.auth.AuthPermission "doAs"; + permission javax.security.auth.AuthPermission "getLoginConfiguration"; + permission javax.security.auth.AuthPermission "setLoginConfiguration"; + permission javax.security.auth.AuthPermission "modifyPrivateCredentials"; permission javax.security.auth.PrivateCredentialPermission "org.apache.hadoop.security.Credentials * \"*\"", "read"; - permission java.security.SecurityPermission "*"; + + // needed by hadoop security + permission java.security.SecurityPermission "putProviderProperty.SaslPlainServer"; + permission java.security.SecurityPermission "insertProvider"; + permission javax.xml.bind.JAXBPermission "setDatatypeConverter"; // SSL related properties for Solr tests - permission javax.net.ssl.SSLPermission "*"; + permission javax.net.ssl.SSLPermission "setDefaultSSLContext"; // SASL/Kerberos related properties for Solr tests permission javax.security.auth.PrivateCredentialPermission "javax.security.auth.kerberos.KerberosTicket * \"*\"", "read"; From d8f9f47ca0ecf762e91602d984a1882f1f5e3da9 Mon Sep 17 00:00:00 2001 From: Noble Paul Date: Tue, 3 Dec 2019 13:10:19 +1100 Subject: [PATCH 04/11] SOLR-13992: Refactor code to have collection, shard name in Replica,Slice (#1051) * SOLR-13992: Refactor code to have collection name shard name in Replica,Slice --- solr/CHANGES.txt | 2 + .../solr/cloud/ExclusiveSliceProperty.java | 2 +- .../api/collections/CreateCollectionCmd.java | 2 +- .../cloud/api/collections/RestoreCmd.java | 30 ++++++------- .../sim/SimClusterStateProvider.java | 4 +- .../cloud/overseer/ClusterStateMutator.java | 4 +- .../cloud/overseer/CollectionMutator.java | 2 +- .../solr/cloud/overseer/NodeMutator.java | 4 +- .../solr/cloud/overseer/ReplicaMutator.java | 25 +++++------ .../solr/cloud/overseer/SliceMutator.java | 14 +++--- .../solr/cloud/ClusterStateMockUtil.java | 12 ++++-- .../apache/solr/cloud/ClusterStateTest.java | 8 ++-- ...rseerCollectionConfigSetProcessorTest.java | 4 +- .../org/apache/solr/cloud/SliceStateTest.java | 14 +++--- .../solr/cloud/TestHashPartitioner.java | 2 +- .../cloud/api/collections/AssignTest.java | 18 ++++---- .../org/apache/solr/core/CoreSorterTest.java | 2 +- .../solr/common/cloud/ClusterState.java | 10 ++--- .../org/apache/solr/common/cloud/Replica.java | 12 +++++- .../org/apache/solr/common/cloud/Slice.java | 17 +++++--- .../solr/common/cloud/ZkStateReader.java | 5 +++ .../solrj/cloud/autoscaling/TestPolicy.java | 43 +++++++++---------- .../NodePreferenceRulesComparatorTest.java | 8 ++-- .../routing/ReplicaListTransformerTest.java | 3 +- ...stReplicaListTransformerGeneratorTest.java | 8 ++-- .../ShufflingReplicaListTransformerTest.java | 2 +- 26 files changed, 139 insertions(+), 118 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index f678546bf67..96dfc4d9a84 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -217,6 +217,8 @@ Other Changes * SOLR-13885: Typos in the documentation. (KoenDG via Cassandra Targett) +* SOLR-13992: Code refactored to have collection name, slice name in Replica, Slice (noble) + ================== 8.3.1 ================== Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release. diff --git a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java index 4690c11740e..23f684e2813 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java +++ b/solr/core/src/java/org/apache/solr/cloud/ExclusiveSliceProperty.java @@ -296,7 +296,7 @@ class ExclusiveSliceProperty { if (newSlice != null) { replica = newSlice.getReplica(replicaName); } else { - newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy()); + newSlice = new Slice(origSlice.getName(), origSlice.getReplicasCopy(), origSlice.shallowCopy(), origSlice.collection); changedSlices.put(origSlice.getName(), newSlice); replica = newSlice.getReplica(replicaName); } diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java index a38dcbe3f44..bc2a0b5d3bc 100644 --- a/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java +++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/CreateCollectionCmd.java @@ -80,9 +80,9 @@ import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS; import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS; import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR; import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS; +import static org.apache.solr.common.params.CollectionAdminParams.ALIAS; import static org.apache.solr.common.params.CollectionAdminParams.COLL_CONF; import static org.apache.solr.common.params.CollectionAdminParams.COLOCATED_WITH; -import static org.apache.solr.common.params.CollectionAdminParams.ALIAS; import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA; import static org.apache.solr.common.params.CollectionParams.CollectionAction.MODIFYCOLLECTION; import static org.apache.solr.common.params.CommonAdminParams.ASYNC; diff --git a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java index 20dfc6d1399..aa5cbe90f79 100644 --- a/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java +++ b/solr/core/src/java/org/apache/solr/cloud/api/collections/RestoreCmd.java @@ -18,20 +18,6 @@ package org.apache.solr.cloud.api.collections; -import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT; -import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP; -import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE; -import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS; -import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS; -import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR; -import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE; -import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP; -import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS; -import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE; -import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD; -import static org.apache.solr.common.params.CommonAdminParams.ASYNC; -import static org.apache.solr.common.params.CommonParams.NAME; - import java.lang.invoke.MethodHandles; import java.net.URI; import java.util.ArrayList; @@ -78,6 +64,20 @@ import org.apache.solr.handler.component.ShardHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.solr.common.cloud.DocCollection.STATE_FORMAT; +import static org.apache.solr.common.cloud.ZkStateReader.COLLECTION_PROP; +import static org.apache.solr.common.cloud.ZkStateReader.MAX_SHARDS_PER_NODE; +import static org.apache.solr.common.cloud.ZkStateReader.NRT_REPLICAS; +import static org.apache.solr.common.cloud.ZkStateReader.PULL_REPLICAS; +import static org.apache.solr.common.cloud.ZkStateReader.REPLICATION_FACTOR; +import static org.apache.solr.common.cloud.ZkStateReader.REPLICA_TYPE; +import static org.apache.solr.common.cloud.ZkStateReader.SHARD_ID_PROP; +import static org.apache.solr.common.cloud.ZkStateReader.TLOG_REPLICAS; +import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATE; +import static org.apache.solr.common.params.CollectionParams.CollectionAction.CREATESHARD; +import static org.apache.solr.common.params.CommonAdminParams.ASYNC; +import static org.apache.solr.common.params.CommonParams.NAME; + public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd { private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass()); @@ -199,7 +199,7 @@ public class RestoreCmd implements OverseerCollectionMessageHandler.Cmd { Map newSlices = new LinkedHashMap<>(backupSlices.size()); for (Slice backupSlice : backupSlices) { newSlices.put(backupSlice.getName(), - new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties())); + new Slice(backupSlice.getName(), Collections.emptyMap(), backupSlice.getProperties(),restoreCollectionName)); } propMap.put(OverseerCollectionMessageHandler.SHARDS_PROP, newSlices); } diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java index 861ef07fc8d..bb8c654faf9 100644 --- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java +++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/sim/SimClusterStateProvider.java @@ -2386,7 +2386,7 @@ public class SimClusterStateProvider implements ClusterStateProvider { props.put(ZkStateReader.CORE_NAME_PROP, ri.getCore()); props.put(ZkStateReader.REPLICA_TYPE, ri.getType().toString()); props.put(ZkStateReader.STATE_PROP, ri.getState().toString()); - Replica r = new Replica(ri.getName(), props); + Replica r = new Replica(ri.getName(), props, ri.getCollection(), ri.getShard()); collMap.computeIfAbsent(ri.getCollection(), c -> new HashMap<>()) .computeIfAbsent(ri.getShard(), s -> new HashMap<>()) .put(ri.getName(), r); @@ -2410,7 +2410,7 @@ public class SimClusterStateProvider implements ClusterStateProvider { Map slices = new HashMap<>(); shards.forEach((s, replicas) -> { Map sliceProps = sliceProperties.computeIfAbsent(coll, c -> new ConcurrentHashMap<>()).computeIfAbsent(s, sl -> new ConcurrentHashMap<>()); - Slice slice = new Slice(s, replicas, sliceProps); + Slice slice = new Slice(s, replicas, sliceProps, coll); slices.put(s, slice); }); Map collProps = collProperties.computeIfAbsent(coll, c -> new ConcurrentHashMap<>()); diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java index 80f24454ce7..6bd1d21e45e 100644 --- a/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java +++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ClusterStateMutator.java @@ -68,7 +68,7 @@ public class ClusterStateMutator { Map slices; if (messageShardsObj instanceof Map) { // we are being explicitly told the slice data (e.g. coll restore) - slices = Slice.loadAllFromMap((Map)messageShardsObj); + slices = Slice.loadAllFromMap(cName, (Map)messageShardsObj); } else { List shardNames = new ArrayList<>(); @@ -89,7 +89,7 @@ public class ClusterStateMutator { Map sliceProps = new LinkedHashMap<>(1); sliceProps.put(Slice.RANGE, ranges == null ? null : ranges.get(i)); - slices.put(sliceName, new Slice(sliceName, null, sliceProps)); + slices.put(sliceName, new Slice(sliceName, null, sliceProps,cName)); } } diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java index bb549f43363..c1f9a2be5c2 100644 --- a/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java +++ b/solr/core/src/java/org/apache/solr/cloud/overseer/CollectionMutator.java @@ -77,7 +77,7 @@ public class CollectionMutator { if (shardParentNode != null) { sliceProps.put("shard_parent_node", shardParentNode); } - collection = updateSlice(collectionName, collection, new Slice(shardId, replicas, sliceProps)); + collection = updateSlice(collectionName, collection, new Slice(shardId, replicas, sliceProps, collectionName)); return new ZkWriteCommand(collectionName, collection); } else { log.error("Unable to create Shard: " + shardId + " because it already exists in collection: " + collectionName); diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java index e7aa7b9c530..e9b11a146aa 100644 --- a/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java +++ b/solr/core/src/java/org/apache/solr/cloud/overseer/NodeMutator.java @@ -65,13 +65,13 @@ public class NodeMutator { log.debug("Update replica state for " + replica + " to " + Replica.State.DOWN.toString()); Map props = replica.shallowCopy(); props.put(ZkStateReader.STATE_PROP, Replica.State.DOWN.toString()); - Replica newReplica = new Replica(replica.getName(), props); + Replica newReplica = new Replica(replica.getName(), props, collection, slice.getName()); newReplicas.put(replica.getName(), newReplica); needToUpdateCollection = true; } } - Slice newSlice = new Slice(slice.getName(), newReplicas, slice.shallowCopy()); + Slice newSlice = new Slice(slice.getName(), newReplicas, slice.shallowCopy(),collection); slicesCopy.put(slice.getName(), newSlice); } diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java index 36caac5becd..a70687a6a6c 100644 --- a/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java +++ b/solr/core/src/java/org/apache/solr/cloud/overseer/ReplicaMutator.java @@ -72,7 +72,7 @@ public class ReplicaMutator { Map replicaProps = new LinkedHashMap<>(replica.getProperties()); replicaProps.put(key, value); - return new Replica(replica.getName(), replicaProps); + return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice()); } protected Replica unsetProperty(Replica replica, String key) { @@ -81,7 +81,7 @@ public class ReplicaMutator { if (!replica.containsKey(key)) return replica; Map replicaProps = new LinkedHashMap<>(replica.getProperties()); replicaProps.remove(key); - return new Replica(replica.getName(), replicaProps); + return new Replica(replica.getName(), replicaProps, replica.getCollection(), replica.getSlice()); } protected Replica setLeader(Replica replica) { @@ -144,10 +144,11 @@ public class ReplicaMutator { } log.info("Setting property {} with value {} for collection {}", property, propVal, collectionName); log.debug("Full message: {}", message); - if (StringUtils.equalsIgnoreCase(replica.getStr(property), propVal)) return ZkStateWriter.NO_OP; // already the value we're going to set + if (StringUtils.equalsIgnoreCase(replica.getStr(property), propVal)) + return ZkStateWriter.NO_OP; // already the value we're going to set // OK, there's no way we won't change the cluster state now - Map replicas = collection.getSlice(sliceName).getReplicasCopy(); + Map replicas = collection.getSlice(sliceName).getReplicasCopy(); if (isUnique == false) { replicas.get(replicaName).getProperties().put(property, propVal); } else { // Set prop for this replica, but remove it for all others. @@ -159,7 +160,7 @@ public class ReplicaMutator { } } } - Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy()); + Slice newSlice = new Slice(sliceName, replicas, collection.getSlice(sliceName).shallowCopy(),collectionName); DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection, newSlice); return new ZkWriteCommand(collectionName, newCollection); @@ -326,8 +327,8 @@ public class ReplicaMutator { String shardParent = (String) replicaProps.remove(ZkStateReader.SHARD_PARENT_PROP); - Replica replica = new Replica(coreNodeName, replicaProps); - + Replica replica = new Replica(coreNodeName, replicaProps, collectionName, sliceName); + log.debug("Will update state for replica: {}", replica); Map sliceProps = null; @@ -347,7 +348,7 @@ public class ReplicaMutator { sliceProps.put(Slice.PARENT, shardParent); } replicas.put(replica.getName(), replica); - slice = new Slice(sliceName, replicas, sliceProps); + slice = new Slice(sliceName, replicas, sliceProps, collectionName); DocCollection newCollection = CollectionMutator.updateSlice(collectionName, collection, slice); log.debug("Collection is now: {}", newCollection); @@ -424,10 +425,10 @@ public class ReplicaMutator { String parentSliceName = (String) sliceProps.remove(Slice.PARENT); // now lets see if the parent leader is still the same or else there's a chance of data loss // see SOLR-9438 for details - String shardParentZkSession = (String) sliceProps.remove("shard_parent_zk_session"); + String shardParentZkSession = (String) sliceProps.remove("shard_parent_zk_session"); String shardParentNode = (String) sliceProps.remove("shard_parent_node"); boolean isLeaderSame = true; - if (shardParentNode != null && shardParentZkSession != null) { + if (shardParentNode != null && shardParentZkSession != null) { log.info("Checking whether sub-shard leader node is still the same one at {} with ZK session id {}", shardParentNode, shardParentZkSession); try { VersionedData leaderZnode = null; @@ -437,7 +438,7 @@ public class ReplicaMutator { } catch (NoSuchElementException e) { // ignore } - if (leaderZnode == null) { + if (leaderZnode == null) { log.error("The shard leader node: {} is not live anymore!", shardParentNode); isLeaderSame = false; } else if (!shardParentZkSession.equals(leaderZnode.getOwner())) { @@ -471,7 +472,7 @@ public class ReplicaMutator { log.info("TIMINGS Sub-shard " + subShardSlice.getName() + " not available: " + subShardSlice); } } - } else { + } else { // we must mark the shard split as failed by switching sub-shards to recovery_failed state propMap.put(sliceName, Slice.State.RECOVERY_FAILED.toString()); for (Slice subShardSlice : subShardSlices) { diff --git a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java index c0a8a7b883b..1bbad8f8e4b 100644 --- a/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java +++ b/solr/core/src/java/org/apache/solr/cloud/overseer/SliceMutator.java @@ -80,7 +80,7 @@ public class SliceMutator { ZkStateReader.BASE_URL_PROP, message.getStr(ZkStateReader.BASE_URL_PROP), ZkStateReader.STATE_PROP, message.getStr(ZkStateReader.STATE_PROP), ZkStateReader.NODE_NAME_PROP, message.getStr(ZkStateReader.NODE_NAME_PROP), - ZkStateReader.REPLICA_TYPE, message.get(ZkStateReader.REPLICA_TYPE))); + ZkStateReader.REPLICA_TYPE, message.get(ZkStateReader.REPLICA_TYPE)), coll, slice); return new ZkWriteCommand(coll, updateReplica(collection, sl, replica.getName(), replica)); } @@ -103,7 +103,7 @@ public class SliceMutator { if (replica != null && (baseUrl == null || baseUrl.equals(replica.getBaseUrl()))) { Map newReplicas = slice.getReplicasCopy(); newReplicas.remove(cnn); - slice = new Slice(slice.getName(), newReplicas, slice.getProperties()); + slice = new Slice(slice.getName(), newReplicas, slice.getProperties(),collection); } newSlices.put(slice.getName(), slice); } @@ -150,7 +150,7 @@ public class SliceMutator { Map newSliceProps = slice.shallowCopy(); newSliceProps.put(Slice.REPLICAS, newReplicas); - slice = new Slice(slice.getName(), newReplicas, slice.getProperties()); + slice = new Slice(slice.getName(), newReplicas, slice.getProperties(), collectionName); return new ZkWriteCommand(collectionName, CollectionMutator.updateSlice(collectionName, coll, slice)); } @@ -180,7 +180,7 @@ public class SliceMutator { props.put(ZkStateReader.STATE_PROP, message.getStr(key)); // we need to use epoch time so that it's comparable across Overseer restarts props.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs())); - Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props); + Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props,collectionName); slicesCopy.put(slice.getName(), newSlice); } @@ -224,7 +224,7 @@ public class SliceMutator { Map props = slice.shallowCopy(); props.put("routingRules", routingRules); - Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props); + Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props,collectionName); return new ZkWriteCommand(collectionName, CollectionMutator.updateSlice(collectionName, collection, newSlice)); } @@ -249,7 +249,7 @@ public class SliceMutator { routingRules.remove(routeKeyStr); // no rules left Map props = slice.shallowCopy(); props.put("routingRules", routingRules); - Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props); + Slice newSlice = new Slice(slice.getName(), slice.getReplicasCopy(), props,collectionName); return new ZkWriteCommand(collectionName, CollectionMutator.updateSlice(collectionName, collection, newSlice)); } @@ -264,7 +264,7 @@ public class SliceMutator { } else { replicasCopy.put(replica.getName(), replica); } - Slice newSlice = new Slice(slice.getName(), replicasCopy, slice.getProperties()); + Slice newSlice = new Slice(slice.getName(), replicasCopy, slice.getProperties(), collection.getName()); log.debug("Old Slice: {}", slice); log.debug("New Slice: {}", newSlice); return CollectionMutator.updateSlice(collection.getName(), collection, newSlice); diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java index a37f16cb8a2..10cfdfb81be 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java +++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateMockUtil.java @@ -112,6 +112,8 @@ public class ClusterStateMockUtil { collectionProps.put(ZkStateReader.REPLICATION_FACTOR, Integer.toString(replicationFactor)); Map collectionStates = new HashMap<>(); DocCollection docCollection = null; + String collName = null; + String sliceName = null; Slice slice = null; int replicaCount = 1; @@ -121,12 +123,13 @@ public class ClusterStateMockUtil { switch (m.group(1)) { case "c": slices = new HashMap<>(); - docCollection = new DocCollection("collection" + (collectionStates.size() + 1), slices, collectionProps, null); + docCollection = new DocCollection(collName = "collection" + (collectionStates.size() + 1), slices, collectionProps, null); collectionStates.put(docCollection.getName(), docCollection); break; case "s": replicas = new HashMap<>(); - slice = new Slice("slice" + (slices.size() + 1), replicas, null); + if(collName == null) collName = "collection" + (collectionStates.size() + 1); + slice = new Slice(sliceName = "slice" + (slices.size() + 1), replicas, null, collName); slices.put(slice.getName(), slice); break; case "r": @@ -168,8 +171,9 @@ public class ClusterStateMockUtil { replicaPropMap.put(ZkStateReader.NODE_NAME_PROP, nodeName); replicaPropMap.put(ZkStateReader.BASE_URL_PROP, "http://baseUrl" + node); replicaPropMap.put(ZkStateReader.STATE_PROP, state.toString()); - - replica = new Replica(replicaName, replicaPropMap); + if(collName == null) collName = "collection" + (collectionStates.size() + 1); + if(sliceName == null) collName = "slice" + (slices.size() + 1); + replica = new Replica(replicaName, replicaPropMap, collName, sliceName); replicas.put(replica.getName(), replica); break; diff --git a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java index 58e0a60aa25..9af6bbf721f 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ClusterStateTest.java @@ -25,8 +25,8 @@ import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocCollection; import org.apache.solr.common.cloud.DocRouter; -import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.cloud.Replica; +import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.util.Utils; import org.junit.Test; @@ -45,11 +45,11 @@ public class ClusterStateTest extends SolrTestCaseJ4 { props.put("prop1", "value"); props.put("prop2", "value2"); - Replica replica = new Replica("node1", props); + Replica replica = new Replica("node1", props, "collection1", "shard1"); sliceToProps.put("node1", replica); - Slice slice = new Slice("shard1", sliceToProps, null); + Slice slice = new Slice("shard1", sliceToProps, null, "collection1"); slices.put("shard1", slice); - Slice slice2 = new Slice("shard2", sliceToProps, null); + Slice slice2 = new Slice("shard2", sliceToProps, null, "collection1"); slices.put("shard2", slice2); collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT)); collectionStates.put("collection2", new DocCollection("collection2", slices, null, DocRouter.DEFAULT)); diff --git a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java index 9b2d2d00411..a165030e137 100644 --- a/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/OverseerCollectionConfigSetProcessorTest.java @@ -309,12 +309,12 @@ public class OverseerCollectionConfigSetProcessorTest extends SolrTestCaseJ4 { String slice = replica.getStr(ZkStateReader.SHARD_ID_PROP); if (!slices.containsKey(slice)) slices.put(slice, new HashMap<>()); String replicaName = replica.getStr(ZkStateReader.CORE_NAME_PROP); - slices.get(slice).put(replicaName, new Replica(replicaName, replica.getProperties())); + slices.get(slice).put(replicaName, new Replica(replicaName, replica.getProperties(), docCollection.getName(), slice)); } Map slicesMap = new HashMap<>(); for (Map.Entry> entry : slices.entrySet()) { - slicesMap.put(entry.getKey(), new Slice(entry.getKey(), entry.getValue(), null)); + slicesMap.put(entry.getKey(), new Slice(entry.getKey(), entry.getValue(), null,docCollection.getName())); } return docCollection.copyWithSlices(slicesMap); diff --git a/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java b/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java index f2356431bfe..cd6bb89ff02 100644 --- a/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/SliceStateTest.java @@ -16,6 +16,11 @@ */ package org.apache.solr.cloud; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.cloud.ClusterState; import org.apache.solr.common.cloud.DocCollection; @@ -25,11 +30,6 @@ import org.apache.solr.common.cloud.Slice; import org.apache.solr.common.util.Utils; import org.junit.Test; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - public class SliceStateTest extends SolrTestCaseJ4 { @Test @@ -42,9 +42,9 @@ public class SliceStateTest extends SolrTestCaseJ4 { Map sliceToProps = new HashMap<>(); Map props = new HashMap<>(); - Replica replica = new Replica("node1", props); + Replica replica = new Replica("node1", props, "collection1", "shard1"); sliceToProps.put("node1", replica); - Slice slice = new Slice("shard1", sliceToProps, null); + Slice slice = new Slice("shard1", sliceToProps, null, "collection1"); assertSame("Default state not set to active", Slice.State.ACTIVE, slice.getState()); slices.put("shard1", slice); collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT)); diff --git a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java index c84c2048857..55e69fae0f7 100644 --- a/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java +++ b/solr/core/src/test/org/apache/solr/cloud/TestHashPartitioner.java @@ -273,7 +273,7 @@ public class TestHashPartitioner extends SolrTestCaseJ4 { Map slices = new HashMap<>(); for (int i=0; i slices = new HashMap<>(); - slices.put("shard1", new Slice("shard1", new HashMap<>(), null)); - slices.put("shard2", new Slice("shard2", new HashMap<>(), null)); + slices.put("shard1", new Slice("shard1", new HashMap<>(), null,"collection1")); + slices.put("shard2", new Slice("shard2", new HashMap<>(), null,"collection1")); DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT); assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT)); diff --git a/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java b/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java index bb8bb678bfd..8ababe45d62 100644 --- a/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java +++ b/solr/core/src/test/org/apache/solr/core/CoreSorterTest.java @@ -151,7 +151,7 @@ public class CoreSorterTest extends SolrTestCaseJ4 { } public Replica getReplica(String node) { - return new Replica(replicaName, Utils.makeMap("core", replicaName, "node_name", node)); + return new Replica(replicaName, Utils.makeMap("core", replicaName, "node_name", node), cd.getCollectionName(), cd.getShardId()); } public boolean equals(String coll, String slice) { diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java index 559203f6137..80f26f784db 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ClusterState.java @@ -31,7 +31,6 @@ import org.apache.solr.common.SolrException; import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.util.Utils; import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.Watcher; import org.noggit.JSONWriter; /** @@ -104,7 +103,6 @@ public class ClusterState implements JSONWriter.Writable { * Implementation note: This method resolves the collection reference by calling * {@link CollectionRef#get()} which can make a call to ZooKeeper. This is necessary * because the semantics of how collection list is loaded have changed in SOLR-6629. - * Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)} */ public boolean hasCollection(String collectionName) { return getCollectionOrNull(collectionName) != null; @@ -141,7 +139,6 @@ public class ClusterState implements JSONWriter.Writable { * Implementation note: This method resolves the collection reference by calling * {@link CollectionRef#get()} which may make a call to ZooKeeper. This is necessary * because the semantics of how collection list is loaded have changed in SOLR-6629. - * Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)} */ public DocCollection getCollectionOrNull(String collectionName, boolean allowCached) { CollectionRef ref = collectionStates.get(collectionName); @@ -154,7 +151,6 @@ public class ClusterState implements JSONWriter.Writable { * Implementation note: This method resolves the collection reference by calling * {@link CollectionRef#get()} which can make a call to ZooKeeper. This is necessary * because the semantics of how collection list is loaded have changed in SOLR-6629. - * Please see javadocs in {@link ZkStateReader#refreshCollectionList(Watcher)} * * @return a map of collection name vs DocCollection object */ @@ -258,13 +254,13 @@ public class ClusterState implements JSONWriter.Writable { Map props; Map slices; - Map sliceObjs = (Map)objs.get(DocCollection.SHARDS); + Map sliceObjs = (Map) objs.get(DocCollection.SHARDS); if (sliceObjs == null) { // legacy format from 4.0... there was no separate "shards" level to contain the collection shards. - slices = Slice.loadAllFromMap(objs); + slices = Slice.loadAllFromMap(name, objs); props = Collections.emptyMap(); } else { - slices = Slice.loadAllFromMap(sliceObjs); + slices = Slice.loadAllFromMap(name, sliceObjs); props = new HashMap<>(objs); objs.remove(DocCollection.SHARDS); } diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java index e3b6d5dd184..bc57176377a 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Replica.java @@ -110,9 +110,12 @@ public class Replica extends ZkNodeProps { private final String nodeName; private final State state; private final Type type; + public final String slice, collection; - public Replica(String name, Map propMap) { + public Replica(String name, Map propMap, String collection, String slice) { super(propMap); + this.collection = collection; + this.slice = slice; this.name = name; this.nodeName = (String) propMap.get(ZkStateReader.NODE_NAME_PROP); if (propMap.get(ZkStateReader.STATE_PROP) != null) { @@ -124,6 +127,13 @@ public class Replica extends ZkNodeProps { type = Type.get((String) propMap.get(ZkStateReader.REPLICA_TYPE)); } + public String getCollection(){ + return collection; + } + public String getSlice(){ + return slice; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java index 2acbd00b15e..c9adbab2357 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/Slice.java @@ -37,9 +37,10 @@ import static org.apache.solr.common.util.Utils.toJSONString; * A Slice contains immutable information about a logical shard (all replicas that share the same shard id). */ public class Slice extends ZkNodeProps implements Iterable { + public final String collection; /** Loads multiple slices into a Map from a generic Map that probably came from deserialized JSON. */ - public static Map loadAllFromMap(Map genericSlices) { + public static Map loadAllFromMap(String collection, Map genericSlices) { if (genericSlices == null) return Collections.emptyMap(); Map result = new LinkedHashMap<>(genericSlices.size()); for (Map.Entry entry : genericSlices.entrySet()) { @@ -48,7 +49,7 @@ public class Slice extends ZkNodeProps implements Iterable { if (val instanceof Slice) { result.put(name, (Slice) val); } else if (val instanceof Map) { - result.put(name, new Slice(name, null, (Map) val)); + result.put(name, new Slice(name, null, (Map) val, collection)); } } return result; @@ -128,9 +129,10 @@ public class Slice extends ZkNodeProps implements Iterable { * @param replicas The replicas of the slice. This is used directly and a copy is not made. If null, replicas will be constructed from props. * @param props The properties of the slice - a shallow copy will always be made. */ - public Slice(String name, Map replicas, Map props) { + public Slice(String name, Map replicas, Map props, String collection) { super( props==null ? new LinkedHashMap(2) : new LinkedHashMap<>(props)); this.name = name; + this.collection = collection; Object rangeObj = propMap.get(RANGE); if (propMap.get(ZkStateReader.STATE_PROP) != null) { @@ -162,7 +164,7 @@ public class Slice extends ZkNodeProps implements Iterable { replicationFactor = null; // future // add the replicas *after* the other properties (for aesthetics, so it's easy to find slice properties in the JSON output) - this.replicas = replicas != null ? replicas : makeReplicas((Map)propMap.get(REPLICAS)); + this.replicas = replicas != null ? replicas : makeReplicas(collection,name, (Map)propMap.get(REPLICAS)); propMap.put(REPLICAS, this.replicas); Map rules = (Map) propMap.get("routingRules"); @@ -186,7 +188,7 @@ public class Slice extends ZkNodeProps implements Iterable { } - private Map makeReplicas(Map genericReplicas) { + private Map makeReplicas(String collection, String slice,Map genericReplicas) { if (genericReplicas == null) return new HashMap<>(1); Map result = new LinkedHashMap<>(genericReplicas.size()); for (Map.Entry entry : genericReplicas.entrySet()) { @@ -196,7 +198,7 @@ public class Slice extends ZkNodeProps implements Iterable { if (val instanceof Replica) { r = (Replica)val; } else { - r = new Replica(name, (Map)val); + r = new Replica(name, (Map)val, collection, slice); } result.put(name, r); } @@ -213,6 +215,9 @@ public class Slice extends ZkNodeProps implements Iterable { return null; } + public String getCollection() { + return collection; + } /** * Return slice name (shard id). */ diff --git a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java index 7185f4e7c7d..a7d7e37fb00 100644 --- a/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java +++ b/solr/solrj/src/java/org/apache/solr/common/cloud/ZkStateReader.java @@ -944,6 +944,11 @@ public class ZkStateReader implements SolrCloseable { return null; } + public boolean isNodeLive(String node) { + return liveNodes.contains(node); + + } + /** * Get shard leader properties, with retry if none exist. */ diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java index d428c97a2d3..e6917ec246b 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/cloud/autoscaling/TestPolicy.java @@ -18,16 +18,6 @@ package org.apache.solr.client.solrj.cloud.autoscaling; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.CLUSTER_PREFERENCES; -import static org.apache.solr.client.solrj.cloud.autoscaling.TestPolicy2.loadFromResource; -import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORES; -import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK; -import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.REPLICA; -import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_STATE; -import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA; -import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA; - import java.io.IOException; import java.io.StringWriter; import java.lang.invoke.MethodHandles; @@ -45,6 +35,8 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.client.solrj.SolrRequest; import org.apache.solr.client.solrj.SolrResponse; @@ -83,8 +75,15 @@ import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.solr.client.solrj.cloud.autoscaling.Policy.CLUSTER_PREFERENCES; +import static org.apache.solr.client.solrj.cloud.autoscaling.TestPolicy2.loadFromResource; +import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.CORES; +import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.FREEDISK; +import static org.apache.solr.client.solrj.cloud.autoscaling.Variable.Type.REPLICA; +import static org.apache.solr.common.cloud.ZkStateReader.CLUSTER_STATE; +import static org.apache.solr.common.params.CollectionParams.CollectionAction.ADDREPLICA; +import static org.apache.solr.common.params.CollectionParams.CollectionAction.MOVEREPLICA; public class TestPolicy extends SolrTestCaseJ4 { boolean useNodeset ; @@ -2595,13 +2594,13 @@ public class TestPolicy extends SolrTestCaseJ4 { if (node.equals("node1")) { Map m = Utils.makeMap("newColl", Utils.makeMap("shard1", Collections.singletonList(new ReplicaInfo("r1", "shard1", - new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1")), + new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1"), "newColl", "shard1"), Utils.makeMap(FREEDISK.perReplicaValue, 200))))); return m; } else if (node.equals("node2")) { Map m = Utils.makeMap("newColl", Utils.makeMap("shard2", Collections.singletonList(new ReplicaInfo("r1", "shard2", - new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2")), + new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2"),"newColl", "shard2"), Utils.makeMap(FREEDISK.perReplicaValue, 200))))); return m; } @@ -2624,9 +2623,9 @@ public class TestPolicy extends SolrTestCaseJ4 { @Override public Replica getLeader(String sliceName) { if (sliceName.equals("shard1")) - return new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1")); + return new Replica("r1", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node1"), name, "shard1"); if (sliceName.equals("shard2")) - return new Replica("r2", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2")); + return new Replica("r2", Utils.makeMap(ZkStateReader.NODE_NAME_PROP, "node2"),name, "shard2"); return null; } }; @@ -2643,20 +2642,20 @@ public class TestPolicy extends SolrTestCaseJ4 { public void testMoveReplicaLeaderlast() { List> validReplicas = new ArrayList<>(); - Replica replica = new Replica("r1", Utils.makeMap("leader", "true")); - ReplicaInfo replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>()); + Replica replica = new Replica("r1", Utils.makeMap("leader", "true"), "c1", "s1"); + ReplicaInfo replicaInfo = new ReplicaInfo(replica.collection, replica.slice ,replica, new HashMap<>()); validReplicas.add(new Pair<>(replicaInfo, null)); replicaInfo = new ReplicaInfo("r4", "c1_s2_r1", "c1", "s2", Replica.Type.NRT, "n1", Collections.singletonMap("leader", "true")); validReplicas.add(new Pair<>(replicaInfo, null)); - replica = new Replica("r2", Utils.makeMap("leader", false)); - replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>()); + replica = new Replica("r2", Utils.makeMap("leader", false),"c1","s1"); + replicaInfo = new ReplicaInfo(replica.collection, replica.slice, replica, new HashMap<>()); validReplicas.add(new Pair<>(replicaInfo, null)); - replica = new Replica("r3", Utils.makeMap("leader", false)); - replicaInfo = new ReplicaInfo("c1", "s1", replica, new HashMap<>()); + replica = new Replica("r3", Utils.makeMap("leader", false),"c1","s1"); + replicaInfo = new ReplicaInfo(replica.collection,replica.slice, replica, new HashMap<>()); validReplicas.add(new Pair<>(replicaInfo, null)); diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java index 245947a1788..3998dd08507 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/NodePreferenceRulesComparatorTest.java @@ -75,7 +75,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node4", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "TLOG" - ) + ),"collection1","shard1" ) ); @@ -125,7 +125,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "NRT" - ) + ),"collection1","shard1" ) ); replicas.add( @@ -136,7 +136,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node2", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "TLOG" - ) + ),"collection1","shard1" ) ); replicas.add( @@ -147,7 +147,7 @@ public class NodePreferenceRulesComparatorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node3", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "PULL" - ) + ),"collection1","shard1" ) ); return replicas; diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java index 7fcd04abd85..905f82e6989 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ReplicaListTransformerTest.java @@ -23,7 +23,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; - import org.apache.solr.SolrTestCase; import org.apache.solr.common.cloud.Replica; import org.apache.solr.common.params.ModifiableSolrParams; @@ -128,7 +127,7 @@ public class ReplicaListTransformerTest extends SolrTestCase { final Map propMap = new HashMap(); propMap.put("base_url", url); // a skeleton replica, good enough for this test's purposes - final Replica replica = new Replica(name, propMap); + final Replica replica = new Replica(name, propMap,"c1","s1"); inputs.add(replica); if (url.matches(regex)) { diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java index f8f2e681640..5b64fde54ba 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/RequestReplicaListTransformerGeneratorTest.java @@ -84,7 +84,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node4", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "TLOG" - ) + ), "c1","s1" ) ); @@ -122,7 +122,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node1", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "NRT" - ) + ),"c1","s1" ) ); replicas.add( @@ -133,7 +133,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node2", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "TLOG" - ) + ),"c1","s1" ) ); replicas.add( @@ -144,7 +144,7 @@ public class RequestReplicaListTransformerGeneratorTest extends SolrTestCaseJ4 { ZkStateReader.NODE_NAME_PROP, "node3", ZkStateReader.CORE_NAME_PROP, "collection1", ZkStateReader.REPLICA_TYPE, "PULL" - ) + ),"c1","s1" ) ); return replicas; diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java index 7e4be97dfa9..33d1baaf624 100644 --- a/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java +++ b/solr/solrj/src/test/org/apache/solr/client/solrj/routing/ShufflingReplicaListTransformerTest.java @@ -35,7 +35,7 @@ public class ShufflingReplicaListTransformerTest extends SolrTestCase { public void testTransformReplicas() throws Exception { final List replicas = new ArrayList<>(); for (final String url : createRandomUrls()) { - replicas.add(new Replica(url, new HashMap())); + replicas.add(new Replica(url, new HashMap(),"c1","s1")); } implTestTransform(replicas); } From a51c7b89f2597a0b22041517ad6ea55ff6011589 Mon Sep 17 00:00:00 2001 From: noble Date: Tue, 3 Dec 2019 15:16:34 +1100 Subject: [PATCH 05/11] SOLR-13995: Move ZkShardTerms.Terms to SolrJ --- .../solr/cloud/RecoveringCoreTermWatcher.java | 3 +- .../org/apache/solr/cloud/ZkShardTerms.java | 259 ++---------------- .../apache/solr/cloud/ZkShardTermsTest.java | 3 +- .../solr/client/solrj/cloud/ShardTerms.java | 256 +++++++++++++++++ 4 files changed, 286 insertions(+), 235 deletions(-) create mode 100644 solr/solrj/src/java/org/apache/solr/client/solrj/cloud/ShardTerms.java diff --git a/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java b/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java index 007d22147c8..5d4ec17f8db 100644 --- a/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java +++ b/solr/core/src/java/org/apache/solr/cloud/RecoveringCoreTermWatcher.java @@ -20,6 +20,7 @@ package org.apache.solr.cloud; import java.lang.invoke.MethodHandles; import java.util.concurrent.atomic.AtomicLong; +import org.apache.solr.client.solrj.cloud.ShardTerms; import org.apache.solr.core.CoreContainer; import org.apache.solr.core.CoreDescriptor; import org.apache.solr.core.SolrCore; @@ -44,7 +45,7 @@ public class RecoveringCoreTermWatcher implements ZkShardTerms.CoreTermWatcher { } @Override - public boolean onTermChanged(ZkShardTerms.Terms terms) { + public boolean onTermChanged(ShardTerms terms) { if (coreContainer.isShutDown()) return false; try (SolrCore solrCore = coreContainer.getCore(coreDescriptor.getName())) { diff --git a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java b/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java index 2c97164bad5..2c2a24a20be 100644 --- a/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java +++ b/solr/core/src/java/org/apache/solr/cloud/ZkShardTerms.java @@ -18,15 +18,14 @@ package org.apache.solr.cloud; import java.lang.invoke.MethodHandles; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.solr.client.solrj.cloud.ShardTerms; import org.apache.solr.common.SolrException; import org.apache.solr.common.cloud.SolrZkClient; import org.apache.solr.common.cloud.ZkStateReader; @@ -74,14 +73,12 @@ public class ZkShardTerms implements AutoCloseable{ private final Set listeners = new HashSet<>(); private final AtomicBoolean isClosed = new AtomicBoolean(false); - private static final String RECOVERING_TERM_SUFFIX = "_recovering"; - - private Terms terms; + private ShardTerms terms; // Listener of a core for shard's term change events interface CoreTermWatcher { // return true if the listener wanna to be triggered in the next time - boolean onTermChanged(Terms terms); + boolean onTermChanged(ShardTerms terms); } public ZkShardTerms(String collection, String shard, SolrZkClient zkClient) { @@ -103,12 +100,15 @@ public class ZkShardTerms implements AutoCloseable{ public void ensureTermsIsHigher(String leader, Set replicasNeedingRecovery) { if (replicasNeedingRecovery.isEmpty()) return; - Terms newTerms; + ShardTerms newTerms; while( (newTerms = terms.increaseTerms(leader, replicasNeedingRecovery)) != null) { if (forceSaveTerms(newTerms)) return; } } + public ShardTerms getShardTerms() { + return terms; + } /** * Can this replica become leader? * @param coreNodeName of the replica @@ -148,7 +148,7 @@ public class ZkShardTerms implements AutoCloseable{ // package private for testing, only used by tests Map getTerms() { synchronized (writingLock) { - return new HashMap<>(terms.values); + return terms.getTerms(); } } @@ -178,7 +178,7 @@ public class ZkShardTerms implements AutoCloseable{ // package private for testing, only used by tests // return true if this object should not be reused boolean removeTerm(String coreNodeName) { - Terms newTerms; + ShardTerms newTerms; while ( (newTerms = terms.removeTerm(coreNodeName)) != null) { try { if (saveTerms(newTerms)) return false; @@ -195,7 +195,7 @@ public class ZkShardTerms implements AutoCloseable{ * @param coreNodeName of the replica */ void registerTerm(String coreNodeName) { - Terms newTerms; + ShardTerms newTerms; while ( (newTerms = terms.registerTerm(coreNodeName)) != null) { if (forceSaveTerms(newTerms)) break; } @@ -207,14 +207,14 @@ public class ZkShardTerms implements AutoCloseable{ * @param coreNodeName of the replica */ public void setTermEqualsToLeader(String coreNodeName) { - Terms newTerms; + ShardTerms newTerms; while ( (newTerms = terms.setTermEqualsToLeader(coreNodeName)) != null) { if (forceSaveTerms(newTerms)) break; } } public void setTermToZero(String coreNodeName) { - Terms newTerms; + ShardTerms newTerms; while ( (newTerms = terms.setTermToZero(coreNodeName)) != null) { if (forceSaveTerms(newTerms)) break; } @@ -224,7 +224,7 @@ public class ZkShardTerms implements AutoCloseable{ * Mark {@code coreNodeName} as recovering */ public void startRecovering(String coreNodeName) { - Terms newTerms; + ShardTerms newTerms; while ( (newTerms = terms.startRecovering(coreNodeName)) != null) { if (forceSaveTerms(newTerms)) break; } @@ -234,27 +234,22 @@ public class ZkShardTerms implements AutoCloseable{ * Mark {@code coreNodeName} as finished recovering */ public void doneRecovering(String coreNodeName) { - Terms newTerms; + ShardTerms newTerms; while ( (newTerms = terms.doneRecovering(coreNodeName)) != null) { if (forceSaveTerms(newTerms)) break; } } public boolean isRecovering(String name) { - return terms.values.containsKey(recoveringTerm(name)); + return terms.isRecovering(name); } - public static String recoveringTerm(String coreNodeName) { - return coreNodeName + RECOVERING_TERM_SUFFIX; - } - - /** * When first updates come in, all replicas have some data now, * so we must switch from term 0 (registered) to 1 (have some data) */ public void ensureHighestTermsAreNotZero() { - Terms newTerms; + ShardTerms newTerms; while ( (newTerms = terms.ensureHighestTermsAreNotZero()) != null) { if (forceSaveTerms(newTerms)) break; } @@ -282,7 +277,7 @@ public class ZkShardTerms implements AutoCloseable{ * @param newTerms to be set * @return true if terms is saved successfully to ZK, false if otherwise */ - private boolean forceSaveTerms(Terms newTerms) { + private boolean forceSaveTerms(ShardTerms newTerms) { try { return saveTerms(newTerms); } catch (KeeperException.NoNodeException e) { @@ -297,11 +292,11 @@ public class ZkShardTerms implements AutoCloseable{ * @return true if terms is saved successfully to ZK, false if otherwise * @throws KeeperException.NoNodeException correspond ZK term node is not created */ - private boolean saveTerms(Terms newTerms) throws KeeperException.NoNodeException { - byte[] znodeData = Utils.toJSON(newTerms.values); + private boolean saveTerms(ShardTerms newTerms) throws KeeperException.NoNodeException { + byte[] znodeData = Utils.toJSON(newTerms); try { - Stat stat = zkClient.setData(znodePath, znodeData, newTerms.version, true); - setNewTerms(new Terms(newTerms.values, stat.getVersion())); + Stat stat = zkClient.setData(znodePath, znodeData, newTerms.getVersion(), true); + setNewTerms(new ShardTerms(newTerms, stat.getVersion())); log.info("Successful update of terms at {} to {}", znodePath, newTerms); return true; } catch (KeeperException.BadVersionException e) { @@ -344,11 +339,11 @@ public class ZkShardTerms implements AutoCloseable{ * Fetch latest terms from ZK */ public void refreshTerms() { - Terms newTerms; + ShardTerms newTerms; try { Stat stat = new Stat(); byte[] data = zkClient.getData(znodePath, null, stat, true); - newTerms = new Terms((Map) Utils.fromJSON(data), stat.getVersion()); + newTerms = new ShardTerms((Map) Utils.fromJSON(data), stat.getVersion()); } catch (KeeperException e) { Thread.interrupted(); throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error updating shard term for collection: " + collection, e); @@ -411,10 +406,10 @@ public class ZkShardTerms implements AutoCloseable{ * Atomically update {@link ZkShardTerms#terms} and call listeners * @param newTerms to be set */ - private void setNewTerms(Terms newTerms) { + private void setNewTerms(ShardTerms newTerms) { boolean isChanged = false; synchronized (writingLock) { - if (terms == null || newTerms.version > terms.version) { + if (terms == null || newTerms.getVersion() > terms.getVersion()) { terms = newTerms; isChanged = true; } @@ -422,211 +417,9 @@ public class ZkShardTerms implements AutoCloseable{ if (isChanged) onTermUpdates(newTerms); } - private void onTermUpdates(Terms newTerms) { + private void onTermUpdates(ShardTerms newTerms) { synchronized (listeners) { listeners.removeIf(coreTermWatcher -> !coreTermWatcher.onTermChanged(newTerms)); } } - - /** - * Hold values of terms, this class is immutable - */ - static class Terms { - private final Map values; - private final long maxTerm; - // ZK node version - private final int version; - - public Terms () { - this(new HashMap<>(), 0); - } - - public Terms(Map values, int version) { - this.values = values; - this.version = version; - if (values.isEmpty()) this.maxTerm = 0; - else this.maxTerm = Collections.max(values.values()); - } - - /** - * Can {@code coreNodeName} become leader? - * @param coreNodeName of the replica - * @return true if {@code coreNodeName} can become leader, false if otherwise - */ - boolean canBecomeLeader(String coreNodeName) { - return haveHighestTermValue(coreNodeName) && !values.containsKey(recoveringTerm(coreNodeName)); - } - - /** - * Is {@code coreNodeName}'s term highest? - * @param coreNodeName of the replica - * @return true if term of {@code coreNodeName} is highest - */ - boolean haveHighestTermValue(String coreNodeName) { - if (values.isEmpty()) return true; - long maxTerm = Collections.max(values.values()); - return values.getOrDefault(coreNodeName, 0L) == maxTerm; - } - - Long getTerm(String coreNodeName) { - return values.get(coreNodeName); - } - - /** - * Return a new {@link Terms} in which term of {@code leader} is higher than {@code replicasNeedingRecovery} - * @param leader coreNodeName of leader - * @param replicasNeedingRecovery set of replicas in which their terms should be lower than leader's term - * @return null if term of {@code leader} is already higher than {@code replicasNeedingRecovery} - */ - Terms increaseTerms(String leader, Set replicasNeedingRecovery) { - if (!values.containsKey(leader)) { - throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Can not find leader's term " + leader); - } - - boolean changed = false; - boolean foundReplicasInLowerTerms = false; - - HashMap newValues = new HashMap<>(values); - long leaderTerm = newValues.get(leader); - for (Map.Entry entry : newValues.entrySet()) { - String key = entry.getKey(); - if (replicasNeedingRecovery.contains(key)) foundReplicasInLowerTerms = true; - if (Objects.equals(entry.getValue(), leaderTerm)) { - if(skipIncreaseTermOf(key, replicasNeedingRecovery)) { - changed = true; - } else { - newValues.put(key, leaderTerm+1); - } - } - } - - // We should skip the optimization if there are no replicasNeedingRecovery present in local terms, - // this may indicate that the current value is stale - if (!changed && foundReplicasInLowerTerms) return null; - return new Terms(newValues, version); - } - - private boolean skipIncreaseTermOf(String key, Set replicasNeedingRecovery) { - if (key.endsWith(RECOVERING_TERM_SUFFIX)) { - key = key.substring(0, key.length() - RECOVERING_TERM_SUFFIX.length()); - } - return replicasNeedingRecovery.contains(key); - } - - /** - * Return a new {@link Terms} in which highest terms are not zero - * @return null if highest terms are already larger than zero - */ - Terms ensureHighestTermsAreNotZero() { - if (maxTerm > 0) return null; - else { - HashMap newValues = new HashMap<>(values); - for (String replica : values.keySet()) { - newValues.put(replica, 1L); - } - return new Terms(newValues, version); - } - } - - /** - * Return a new {@link Terms} in which terms for the {@code coreNodeName} are removed - * @param coreNodeName of the replica - * @return null if term of {@code coreNodeName} is already not exist - */ - Terms removeTerm(String coreNodeName) { - if (!values.containsKey(recoveringTerm(coreNodeName)) && !values.containsKey(coreNodeName)) { - return null; - } - - HashMap newValues = new HashMap<>(values); - newValues.remove(coreNodeName); - newValues.remove(recoveringTerm(coreNodeName)); - - return new Terms(newValues, version); - } - - /** - * Return a new {@link Terms} in which the associate term of {@code coreNodeName} is not null - * @param coreNodeName of the replica - * @return null if term of {@code coreNodeName} is already exist - */ - Terms registerTerm(String coreNodeName) { - if (values.containsKey(coreNodeName)) return null; - - HashMap newValues = new HashMap<>(values); - newValues.put(coreNodeName, 0L); - return new Terms(newValues, version); - } - - Terms setTermToZero(String coreNodeName) { - if (values.getOrDefault(coreNodeName, -1L) == 0) { - return null; - } - HashMap newValues = new HashMap<>(values); - newValues.put(coreNodeName, 0L); - return new Terms(newValues, version); - } - - /** - * Return a new {@link Terms} in which the term of {@code coreNodeName} is max - * @param coreNodeName of the replica - * @return null if term of {@code coreNodeName} is already maximum - */ - Terms setTermEqualsToLeader(String coreNodeName) { - long maxTerm = getMaxTerm(); - if (values.get(coreNodeName) == maxTerm) return null; - - HashMap newValues = new HashMap<>(values); - newValues.put(coreNodeName, maxTerm); - newValues.remove(recoveringTerm(coreNodeName)); - return new Terms(newValues, version); - } - - long getMaxTerm() { - return maxTerm; - } - - /** - * Mark {@code coreNodeName} as recovering - * @param coreNodeName of the replica - * @return null if {@code coreNodeName} is already marked as doing recovering - */ - Terms startRecovering(String coreNodeName) { - long maxTerm = getMaxTerm(); - if (values.get(coreNodeName) == maxTerm) - return null; - - HashMap newValues = new HashMap<>(values); - if (!newValues.containsKey(recoveringTerm(coreNodeName))) { - long currentTerm = newValues.getOrDefault(coreNodeName, 0L); - // by keeping old term, we will have more information in leader election - newValues.put(recoveringTerm(coreNodeName), currentTerm); - } - newValues.put(coreNodeName, maxTerm); - return new Terms(newValues, version); - } - - /** - * Mark {@code coreNodeName} as finished recovering - * @param coreNodeName of the replica - * @return null if term of {@code coreNodeName} is already finished doing recovering - */ - Terms doneRecovering(String coreNodeName) { - if (!values.containsKey(recoveringTerm(coreNodeName))) { - return null; - } - - HashMap newValues = new HashMap<>(values); - newValues.remove(recoveringTerm(coreNodeName)); - return new Terms(newValues, version); - } - - @Override - public String toString() { - return "Terms{" + - "values=" + values + - ", version=" + version + - '}'; - } - } } diff --git a/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java b/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java index a56202a4431..56ed8ae7cb4 100644 --- a/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java +++ b/solr/core/src/test/org/apache/solr/cloud/ZkShardTermsTest.java @@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.cloud.ShardTerms; import org.apache.solr.client.solrj.request.CollectionAdminRequest; import org.apache.solr.common.util.TimeSource; import org.apache.solr.util.TimeOut; @@ -267,7 +268,7 @@ public class ZkShardTermsTest extends SolrCloudTestCase { public void testEnsureTermsIsHigher() { Map map = new HashMap<>(); map.put("leader", 0L); - ZkShardTerms.Terms terms = new ZkShardTerms.Terms(map, 0); + ShardTerms terms = new ShardTerms(map, 0); terms = terms.increaseTerms("leader", Collections.singleton("replica")); assertEquals(1L, terms.getTerm("leader").longValue()); } diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/ShardTerms.java b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/ShardTerms.java new file mode 100644 index 00000000000..3b2f754bc4d --- /dev/null +++ b/solr/solrj/src/java/org/apache/solr/client/solrj/cloud/ShardTerms.java @@ -0,0 +1,256 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.solr.client.solrj.cloud; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +import org.apache.solr.common.MapWriter; +import org.apache.solr.common.SolrException; + +/** + * Hold values of terms, this class is immutable. Create a new instance for every mutation + */ +public class ShardTerms implements MapWriter { + private static final String RECOVERING_TERM_SUFFIX = "_recovering"; + private final Map values; + private final long maxTerm; + // ZK node version + private final int version; + + public ShardTerms () { + this(new HashMap<>(), 0); + } + + public ShardTerms(ShardTerms newTerms, int version) { + this(newTerms.values, version); + } + + @Override + public void writeMap(EntryWriter ew) throws IOException { + values.forEach(ew.getBiConsumer()); + } + + public ShardTerms(Map values, int version) { + this.values = values; + this.version = version; + if (values.isEmpty()) this.maxTerm = 0; + else this.maxTerm = Collections.max(values.values()); + } + + /** + * Can {@code coreNodeName} become leader? + * @param coreNodeName of the replica + * @return true if {@code coreNodeName} can become leader, false if otherwise + */ + public boolean canBecomeLeader(String coreNodeName) { + return haveHighestTermValue(coreNodeName) && !values.containsKey(recoveringTerm(coreNodeName)); + } + + /** + * Is {@code coreNodeName}'s term highest? + * @param coreNodeName of the replica + * @return true if term of {@code coreNodeName} is highest + */ + public boolean haveHighestTermValue(String coreNodeName) { + if (values.isEmpty()) return true; + long maxTerm = Collections.max(values.values()); + return values.getOrDefault(coreNodeName, 0L) == maxTerm; + } + + public Long getTerm(String coreNodeName) { + return values.get(coreNodeName); + } + + /** + * Return a new {@link ShardTerms} in which term of {@code leader} is higher than {@code replicasNeedingRecovery} + * @param leader coreNodeName of leader + * @param replicasNeedingRecovery set of replicas in which their terms should be lower than leader's term + * @return null if term of {@code leader} is already higher than {@code replicasNeedingRecovery} + */ + public ShardTerms increaseTerms(String leader, Set replicasNeedingRecovery) { + if (!values.containsKey(leader)) { + throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Can not find leader's term " + leader); + } + + boolean changed = false; + boolean foundReplicasInLowerTerms = false; + + HashMap newValues = new HashMap<>(values); + long leaderTerm = newValues.get(leader); + for (Map.Entry entry : newValues.entrySet()) { + String key = entry.getKey(); + if (replicasNeedingRecovery.contains(key)) foundReplicasInLowerTerms = true; + if (Objects.equals(entry.getValue(), leaderTerm)) { + if(skipIncreaseTermOf(key, replicasNeedingRecovery)) { + changed = true; + } else { + newValues.put(key, leaderTerm+1); + } + } + } + + // We should skip the optimization if there are no replicasNeedingRecovery present in local terms, + // this may indicate that the current value is stale + if (!changed && foundReplicasInLowerTerms) return null; + return new ShardTerms(newValues, version); + } + + private boolean skipIncreaseTermOf(String key, Set replicasNeedingRecovery) { + if (key.endsWith(RECOVERING_TERM_SUFFIX)) { + key = key.substring(0, key.length() - RECOVERING_TERM_SUFFIX.length()); + } + return replicasNeedingRecovery.contains(key); + } + + /** + * Return a new {@link ShardTerms} in which highest terms are not zero + * @return null if highest terms are already larger than zero + */ + public ShardTerms ensureHighestTermsAreNotZero() { + if (maxTerm > 0) return null; + else { + HashMap newValues = new HashMap<>(values); + for (String replica : values.keySet()) { + newValues.put(replica, 1L); + } + return new ShardTerms(newValues, version); + } + } + + /** + * Return a new {@link ShardTerms} in which terms for the {@code coreNodeName} are removed + * @param coreNodeName of the replica + * @return null if term of {@code coreNodeName} is already not exist + */ + public ShardTerms removeTerm(String coreNodeName) { + if (!values.containsKey(recoveringTerm(coreNodeName)) && !values.containsKey(coreNodeName)) { + return null; + } + + HashMap newValues = new HashMap<>(values); + newValues.remove(coreNodeName); + newValues.remove(recoveringTerm(coreNodeName)); + + return new ShardTerms(newValues, version); + } + + /** + * Return a new {@link ShardTerms} in which the associate term of {@code coreNodeName} is not null + * @param coreNodeName of the replica + * @return null if term of {@code coreNodeName} is already exist + */ + public ShardTerms registerTerm(String coreNodeName) { + if (values.containsKey(coreNodeName)) return null; + + HashMap newValues = new HashMap<>(values); + newValues.put(coreNodeName, 0L); + return new ShardTerms(newValues, version); + } + + public ShardTerms setTermToZero(String coreNodeName) { + if (values.getOrDefault(coreNodeName, -1L) == 0) { + return null; + } + HashMap newValues = new HashMap<>(values); + newValues.put(coreNodeName, 0L); + return new ShardTerms(newValues, version); + } + + /** + * Return a new {@link ShardTerms} in which the term of {@code coreNodeName} is max + * @param coreNodeName of the replica + * @return null if term of {@code coreNodeName} is already maximum + */ + public ShardTerms setTermEqualsToLeader(String coreNodeName) { + long maxTerm = getMaxTerm(); + if (values.get(coreNodeName) == maxTerm) return null; + + HashMap newValues = new HashMap<>(values); + newValues.put(coreNodeName, maxTerm); + newValues.remove(recoveringTerm(coreNodeName)); + return new ShardTerms(newValues, version); + } + + public long getMaxTerm() { + return maxTerm; + } + + /** + * Mark {@code coreNodeName} as recovering + * @param coreNodeName of the replica + * @return null if {@code coreNodeName} is already marked as doing recovering + */ + public ShardTerms startRecovering(String coreNodeName) { + long maxTerm = getMaxTerm(); + if (values.get(coreNodeName) == maxTerm) + return null; + + HashMap newValues = new HashMap<>(values); + if (!newValues.containsKey(recoveringTerm(coreNodeName))) { + long currentTerm = newValues.getOrDefault(coreNodeName, 0L); + // by keeping old term, we will have more information in leader election + newValues.put(recoveringTerm(coreNodeName), currentTerm); + } + newValues.put(coreNodeName, maxTerm); + return new ShardTerms(newValues, version); + } + + /** + * Mark {@code coreNodeName} as finished recovering + * @param coreNodeName of the replica + * @return null if term of {@code coreNodeName} is already finished doing recovering + */ + public ShardTerms doneRecovering(String coreNodeName) { + if (!values.containsKey(recoveringTerm(coreNodeName))) { + return null; + } + + HashMap newValues = new HashMap<>(values); + newValues.remove(recoveringTerm(coreNodeName)); + return new ShardTerms(newValues, version); + } + + public static String recoveringTerm(String coreNodeName) { + return coreNodeName + RECOVERING_TERM_SUFFIX; + } + + @Override + public String toString() { + return "Terms{" + + "values=" + values + + ", version=" + version + + '}'; + } + + public int getVersion() { + return version; + } + + public Map getTerms() { + return new HashMap<>(this.values); + } + + public boolean isRecovering(String name) { + return values.containsKey(recoveringTerm(name)); + } +} From 441abb83199088a9fd08adbd40f3ab8a73ac5a36 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 3 Dec 2019 11:28:36 +0100 Subject: [PATCH 06/11] Fix CHANGES formatting. --- lucene/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt index 65f40987f8e..8d04bc1e7ce 100644 --- a/lucene/CHANGES.txt +++ b/lucene/CHANGES.txt @@ -251,7 +251,7 @@ Optimizations non-competitive hits. (Adrien Grand) * LUCENE-8935: BooleanQuery with no scoring clause can now early terminate the query when -the total hits is not requested. + the total hits is not requested. (Jim Ferenczi) * LUCENE-8941: Matches on wildcard queries will defer building their full disjunction until a MatchesIterator is pulled (Alan Woodward) From c8c9c1002353db3b8a4d89d21849bf67bc4f0931 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 3 Dec 2019 06:12:33 -0500 Subject: [PATCH 07/11] SOLR-13982: set security-related http response headers by default Unfortunately, as a first start this is very weak protection against e.g. XSS. This is because some 'unsafe-xxx' rules must be present due to the insecurity of angular JS: Until SOLR-13987 is fixed, XSS & co are still easy. --- solr/CHANGES.txt | 4 ++++ solr/server/etc/jetty.xml | 41 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index 96dfc4d9a84..24d0c5d473f 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -110,6 +110,10 @@ Upgrade Notes * SOLR-13817: Deprecate legacy SolrCache implementations. Users are encouraged to transition their configurations to use org.apache.solr.search.CaffeineCache instead. (ab) +* SOLR-13982: Some security-related http headers such as Content-Security-Policy are now set. If you have custom html served + up by Solr's http server that contains inline javascript, it will no longer execute in modern browsers. You can fix your JS + code to not run inline anymore, or edit etc/jetty.xml and weaken the CSP, or remove/alter the headers with a reverse proxy. (rmuir) + New Features --------------------- * SOLR-13821: A Package store to store and load package artifacts (noble, Ishan Chattopadhyaya) diff --git a/solr/server/etc/jetty.xml b/solr/server/etc/jetty.xml index 1f6de775a49..0a0172a9a06 100644 --- a/solr/server/etc/jetty.xml +++ b/solr/server/etc/jetty.xml @@ -82,13 +82,52 @@ - + true false requestedPath + + + + + * + Content-Security-Policy + default-src 'none'; base-uri 'none'; form-action 'self'; frame-ancestors 'none'; style-src 'self' 'unsafe-inline'; script-src 'self' 'unsafe-eval'; img-src 'self'; media-src 'self'; font-src 'self'; connect-src 'self'; + + + + + + + * + X-Content-Type-Options + nosniff + + + + + + + * + X-Frame-Options + SAMEORIGIN + + + + + + + * + X-XSS-Protection + 1; mode=block + + + + + From 9e5d11be8afb088ee09025735237623c9648a4bf Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 3 Dec 2019 06:28:19 -0500 Subject: [PATCH 08/11] fix static leaks, null stuff out in afterclass --- .../org/apache/solr/analytics/ExpressionFactoryTest.java | 6 ++++++ .../function/field/AbstractAnalyticsFieldTest.java | 3 +++ 2 files changed, 9 insertions(+) diff --git a/solr/contrib/analytics/src/test/org/apache/solr/analytics/ExpressionFactoryTest.java b/solr/contrib/analytics/src/test/org/apache/solr/analytics/ExpressionFactoryTest.java index cb5b09f7e81..3e0d0224993 100644 --- a/solr/contrib/analytics/src/test/org/apache/solr/analytics/ExpressionFactoryTest.java +++ b/solr/contrib/analytics/src/test/org/apache/solr/analytics/ExpressionFactoryTest.java @@ -24,6 +24,7 @@ import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.analytics.function.ReductionCollectionManager; import org.apache.solr.analytics.value.constant.ConstantValue; import org.apache.solr.schema.IndexSchema; +import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -55,6 +56,11 @@ public class ExpressionFactoryTest extends SolrTestCaseJ4 { indexSchema = h.getCore().getLatestSchema(); } + @AfterClass + public static void cleanUp() throws Exception { + indexSchema = null; + } + private ExpressionFactory getExpressionFactory() { ExpressionFactory fact = new ExpressionFactory(indexSchema); fact.startRequest(); diff --git a/solr/contrib/analytics/src/test/org/apache/solr/analytics/function/field/AbstractAnalyticsFieldTest.java b/solr/contrib/analytics/src/test/org/apache/solr/analytics/function/field/AbstractAnalyticsFieldTest.java index 7b20e75cd5a..3c9ee6e7c57 100644 --- a/solr/contrib/analytics/src/test/org/apache/solr/analytics/function/field/AbstractAnalyticsFieldTest.java +++ b/solr/contrib/analytics/src/test/org/apache/solr/analytics/function/field/AbstractAnalyticsFieldTest.java @@ -228,6 +228,9 @@ public class AbstractAnalyticsFieldTest extends SolrTestCaseJ4 { ref.decref(); ref = null; } + indexSchema = null; + searcher = null; + ref = null; } protected void checkSingleFieldValues(Map expected, Map found, Set missing) { From 6f0842eaa5e654c7f4976c95e5d1aba1e940983c Mon Sep 17 00:00:00 2001 From: Dawid Weiss Date: Tue, 3 Dec 2019 13:01:43 +0100 Subject: [PATCH 09/11] Use toLowerCase with an explicit locale in CheckLinksAndAnchors. --- solr/solr-ref-guide/tools/CheckLinksAndAnchors.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java b/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java index a1641f1c589..4d356bec896 100644 --- a/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java +++ b/solr/solr-ref-guide/tools/CheckLinksAndAnchors.java @@ -121,7 +121,7 @@ public class CheckLinksAndAnchors { // TODO: rename this class now that it does public static final class HtmlFileFilter implements FileFilter { public boolean accept(File pathname) { - return pathname.getName().toLowerCase().endsWith("html"); + return pathname.getName().toLowerCase(Locale.ROOT).endsWith("html"); } } From 0f61aa9516f94d72ca0eba19209a588fa9ac999c Mon Sep 17 00:00:00 2001 From: Dawid Weiss Date: Tue, 3 Dec 2019 13:07:23 +0100 Subject: [PATCH 10/11] Forbidden APIs: add missing root locale. --- solr/core/src/test/org/apache/hadoop/http/HttpServer2.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/solr/core/src/test/org/apache/hadoop/http/HttpServer2.java b/solr/core/src/test/org/apache/hadoop/http/HttpServer2.java index 5962bebe58d..757b211d159 100644 --- a/solr/core/src/test/org/apache/hadoop/http/HttpServer2.java +++ b/solr/core/src/test/org/apache/hadoop/http/HttpServer2.java @@ -33,6 +33,7 @@ import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Locale; import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -395,13 +396,13 @@ public final class HttpServer2 implements FilterContainer { SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT); keyStore = sslConf.getTrimmed(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION); if (keyStore == null || keyStore.isEmpty()) { - throw new IOException(String.format("Property %s not specified", + throw new IOException(String.format(Locale.ROOT, "Property %s not specified", SSLFactory.SSL_SERVER_KEYSTORE_LOCATION)); } keyStorePassword = getPasswordString(sslConf, SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD); if (keyStorePassword == null) { - throw new IOException(String.format("Property %s not specified", + throw new IOException(String.format(Locale.ROOT, "Property %s not specified", SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD)); } keyStoreType = sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE, From 323b214dc3aa145c3ea47353ca877fa1709c01f7 Mon Sep 17 00:00:00 2001 From: David Smiley Date: Tue, 3 Dec 2019 12:23:30 -0500 Subject: [PATCH 11/11] GitHub PR template: inform committers this can be removed --- .github/PULL_REQUEST_TEMPLATE.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index a720b0c8cb1..18cf2e5dfe5 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,4 +1,6 @@