SOLR-10757: Merge remote-tracking branch 'refs/remotes/origin/master'

Conflicts:
	solr/CHANGES.txt
This commit is contained in:
Chris Hostetter 2017-05-31 09:52:25 -07:00
commit 7a5261a222
11 changed files with 321 additions and 20 deletions

View File

@ -49,6 +49,34 @@ public class GeoPolygonFactory {
return makeGeoPolygon(planetModel, pointList, null); return makeGeoPolygon(planetModel, pointList, null);
} }
/** Create a GeoConcavePolygon using the specified points. The polygon must have
* a maximum extent larger than PI. The siding of the polygon is chosen so that any
* adjacent point to a segment provides an exterior measurement and therefore,
* the polygon is a truly concave polygon. Note that this method should only be used when there is certainty
* that we are dealing with a concave polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @return a GeoConcavePolygon corresponding to what was specified.
*/
public static GeoConcavePolygon makeGeoConcavePolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList) {
return new GeoConcavePolygon(planetModel, pointList);
}
/** Create a GeoConvexPolygon using the specified points. The polygon must have
* a maximum extent no larger than PI. The siding of the polygon is chosen so that any adjacent
* point to a segment provides an interior measurement and therefore
* the polygon is a truly convex polygon. Note that this method should only be used when
* there is certainty that we are dealing with a convex polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to @{@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @return a GeoConvexPolygon corresponding to what was specified.
*/
public static GeoConvexPolygon makeGeoConvexPolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList) {
return new GeoConvexPolygon(planetModel, pointList);
}
/** Create a GeoPolygon using the specified points and holes, using order to determine /** Create a GeoPolygon using the specified points and holes, using order to determine
* siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space * siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space
* on the same side of the shape as being inside, and counter-clockwise to indicate the * on the same side of the shape as being inside, and counter-clockwise to indicate the
@ -67,6 +95,41 @@ public class GeoPolygonFactory {
return makeGeoPolygon(planetModel, pointList, holes, 0.0); return makeGeoPolygon(planetModel, pointList, holes, 0.0);
} }
/** Create a GeoConcavePolygon using the specified points and holes. The polygon must have
* a maximum extent larger than PI. The siding of the polygon is chosen so that any adjacent
* point to a segment provides an exterior measurement and therefore
* the polygon is a truly concave polygon. Note that this method should only be used when
* there is certainty that we are dealing with a concave polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside
* each hole as being "in set". Null == none.
* @return a GeoConcavePolygon corresponding to what was specified.
*/
public static GeoConcavePolygon makeGeoConcavePolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList,
final List<GeoPolygon> holes) {
return new GeoConcavePolygon(planetModel,pointList, holes);
}
/** Create a GeoConvexPolygon using the specified points and holes. The polygon must have
* a maximum extent no larger than PI. The siding of the polygon is chosen so that any adjacent
* point to a segment provides an interior measurement and therefore
* the polygon is a truly convex polygon. Note that this method should only be used when
* there is certainty that we are dealing with a convex polygon, e.g. the polygon has been serialized.
* If there is not such certainty, please refer to {@link GeoPolygonFactory#makeGeoPolygon(PlanetModel, List, List)}.
* @param pointList is a list of the GeoPoints to build an arbitrary polygon out of.
* @param holes is a list of polygons representing "holes" in the outside polygon. Holes describe the area outside
* each hole as being "in set". Null == none.
* @return a GeoConvexPolygon corresponding to what was specified.
*/
public static GeoConvexPolygon makeGeoConvexPolygon(final PlanetModel planetModel,
final List<GeoPoint> pointList,
final List<GeoPolygon> holes) {
return new GeoConvexPolygon(planetModel,pointList, holes);
}
/** Create a GeoPolygon using the specified points and holes, using order to determine /** Create a GeoPolygon using the specified points and holes, using order to determine
* siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space * siding of the polygon. Much like ESRI, this method uses clockwise to indicate the space
* on the same side of the shape as being inside, and counter-clockwise to indicate the * on the same side of the shape as being inside, and counter-clockwise to indicate the

View File

@ -19,6 +19,7 @@ package org.apache.lucene.spatial3d.geom;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.BitSet; import java.util.BitSet;
import java.util.Collections;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
@ -967,4 +968,66 @@ shape:
assertTrue(solid.isWithin(point)); assertTrue(solid.isWithin(point));
} }
@Test
public void testConcavePolygon() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)).shapes.get(0);
GeoPolygon polygonConcave = GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE,points);
assertEquals(polygon,polygonConcave);
}
@Test
public void testConcavePolygonWithHole() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, -1.1, -1.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 1.0, -1.6));
points.add(new GeoPoint(PlanetModel.SPHERE, 1.1, -1.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 1.0, -1.4));
ArrayList<GeoPoint> hole_points = new ArrayList<>();
hole_points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
GeoPolygon hole = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE,hole_points);
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points,Collections.singletonList(hole))).shapes.get(0);
GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConcavePolygon(PlanetModel.SPHERE,points,Collections.singletonList(hole));
assertEquals(polygon,polygon2);
}
@Test
public void testConvexPolygon() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, 0, 0));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, 0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0.5));
points.add(new GeoPoint(PlanetModel.SPHERE, 0.5, 0));
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points)).shapes.get(0);
GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConvexPolygon(PlanetModel.SPHERE,points);
assertEquals(polygon,polygon2);
}
@Test
public void testConvexPolygonWithHole() {
ArrayList<GeoPoint> points = new ArrayList<>();
points.add(new GeoPoint(PlanetModel.SPHERE, -1, -1));
points.add(new GeoPoint(PlanetModel.SPHERE, -1, 1));
points.add(new GeoPoint(PlanetModel.SPHERE, 1, 1));
points.add(new GeoPoint(PlanetModel.SPHERE, 1, -1));
ArrayList<GeoPoint> hole_points = new ArrayList<>();
hole_points.add(new GeoPoint(PlanetModel.SPHERE, -0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.6));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.1, -0.5));
hole_points.add(new GeoPoint(PlanetModel.SPHERE, 0.0, -0.4));
GeoPolygon hole = GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE,hole_points);
GeoPolygon polygon = (GeoPolygon)((GeoCompositePolygon)GeoPolygonFactory.makeGeoPolygon(PlanetModel.SPHERE, points,Collections.singletonList(hole))).shapes.get(0);
GeoPolygon polygon2 = GeoPolygonFactory.makeGeoConvexPolygon(PlanetModel.SPHERE,points,Collections.singletonList(hole));
assertEquals(polygon,polygon2);
}
} }

View File

@ -198,6 +198,9 @@ Other Changes
* SOLR-10755: delete/refactor many solrj deprecations (hossman) * SOLR-10755: delete/refactor many solrj deprecations (hossman)
* SOLR-10752: replicationFactor (nrtReplicas) default is 0 if tlogReplicas is specified when creating a collection
(Tomás Fernández Löbbe)
* SOLR-10757: delete/refactor/cleanup CollectionAdminRequest deprecations (hossman) * SOLR-10757: delete/refactor/cleanup CollectionAdminRequest deprecations (hossman)
================== 6.7.0 ================== ================== 6.7.0 ==================

View File

@ -95,9 +95,9 @@ public class CreateCollectionCmd implements Cmd {
// look at the replication factor and see if it matches reality // look at the replication factor and see if it matches reality
// if it does not, find best nodes to create more cores // if it does not, find best nodes to create more cores
int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, 1));
int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0); int numTlogReplicas = message.getInt(TLOG_REPLICAS, 0);
int numNrtReplicas = message.getInt(NRT_REPLICAS, message.getInt(REPLICATION_FACTOR, numTlogReplicas>0?0:1));
int numPullReplicas = message.getInt(PULL_REPLICAS, 0);
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler(); ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
final String async = message.getStr(ASYNC); final String async = message.getStr(ASYNC);

View File

@ -29,6 +29,7 @@ import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.http.client.HttpClient; import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrServerException; import org.apache.solr.client.solrj.SolrServerException;
@ -116,9 +117,22 @@ public class TestPullReplica extends SolrCloudTestCase {
@Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection @Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection
public void testCreateDelete() throws Exception { public void testCreateDelete() throws Exception {
try { try {
if (random().nextBoolean()) {
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1, 0, 3) CollectionAdminRequest.createCollection(collectionName, "conf", 2, 1, 0, 3)
.setMaxShardsPerNode(100) .setMaxShardsPerNode(100)
.process(cluster.getSolrClient()); .process(cluster.getSolrClient());
} else {
// Sometimes don't use SolrJ.
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&pullReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
2, // numShards
3, // pullReplicas
100); // maxShardsPerNode
url = url + pickRandom("", "&nrtReplicas=1", "&replicationFactor=1"); // These options should all mean the same
HttpGet createCollectionRequest = new HttpGet(url);
cluster.getSolrClient().getHttpClient().execute(createCollectionRequest);
}
boolean reloaded = false; boolean reloaded = false;
while (true) { while (true) {
DocCollection docCollection = getCollectionState(collectionName); DocCollection docCollection = getCollectionState(collectionName);

View File

@ -30,6 +30,7 @@ import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import org.apache.http.client.HttpClient; import org.apache.http.client.HttpClient;
import org.apache.http.client.methods.HttpGet;
import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.util.LuceneTestCase.Slow; import org.apache.lucene.util.LuceneTestCase.Slow;
import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.SolrClient;
@ -144,9 +145,22 @@ public class TestTlogReplica extends SolrCloudTestCase {
@Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection @Repeat(iterations=2) // 2 times to make sure cleanup is complete and we can create the same collection
public void testCreateDelete() throws Exception { public void testCreateDelete() throws Exception {
try { try {
if (random().nextBoolean()) {
CollectionAdminRequest.createCollection(collectionName, "conf", 2, 0, 4, 0) CollectionAdminRequest.createCollection(collectionName, "conf", 2, 0, 4, 0)
.setMaxShardsPerNode(100) .setMaxShardsPerNode(100)
.process(cluster.getSolrClient()); .process(cluster.getSolrClient());
} else {
// Sometimes don't use SolrJ
String url = String.format(Locale.ROOT, "%s/admin/collections?action=CREATE&name=%s&numShards=%s&tlogReplicas=%s&maxShardsPerNode=%s",
cluster.getRandomJetty(random()).getBaseUrl(),
collectionName,
2, // numShards
4, // tlogReplicas
100); // maxShardsPerNode
HttpGet createCollectionRequest = new HttpGet(url);
cluster.getSolrClient().getHttpClient().execute(createCollectionRequest);
}
boolean reloaded = false; boolean reloaded = false;
while (true) { while (true) {
DocCollection docCollection = getCollectionState(collectionName); DocCollection docCollection = getCollectionState(collectionName);

View File

@ -20,7 +20,16 @@
A `codecFactory` can be specified in `solrconfig.xml` to determine which Lucene {lucene-javadocs}/core/org/apache/lucene/codecs/Codec.html[`Codec`] is used when writing the index to disk. A `codecFactory` can be specified in `solrconfig.xml` to determine which Lucene {lucene-javadocs}/core/org/apache/lucene/codecs/Codec.html[`Codec`] is used when writing the index to disk.
If not specified, Lucene's default codec is implicitly used, but a {solr-javadocs}/solr-core/org/apache/solr/core/SchemaCodecFactory.html[`solr.SchemaCodecFactory`] is also available which supports 2 key features: If not specified, Lucene's default codec is implicitly used.
There are two alternatives to Lucene's default codec:
. {solr-javadocs}/solr-core/org/apache/solr/core/SchemaCodecFactory.html[`solr.SchemaCodecFactory`]
. {solr-javadocs}/solr-core/org/apache/solr/core/SimpleTextCodecFactory.html[`solr.SimpleTextCodecFactory`]
=== solr.SchemaCodecFactory
`solr.SchemaCodecFactory` supports 2 key features:
* Schema based per-fieldtype configuration for `docValuesFormat` and `postingsFormat` - see the <<field-type-definitions-and-properties.adoc#field-type-properties,Field Type Properties>> section for more details. * Schema based per-fieldtype configuration for `docValuesFormat` and `postingsFormat` - see the <<field-type-definitions-and-properties.adoc#field-type-properties,Field Type Properties>> section for more details.
* A `compressionMode` option: * A `compressionMode` option:
@ -35,3 +44,16 @@ Example:
<str name="compressionMode">BEST_COMPRESSION</str> <str name="compressionMode">BEST_COMPRESSION</str>
</codecFactory> </codecFactory>
---- ----
=== solr.SimpleTextCodecFactory
This factory for Lucene's `SimpleTextCodec` produces a plain text human-readable index format.
CAUTION: *FOR RECREATIONAL USE ONLY*. This codec should never be used in production. `SimpleTextCodec` is relatively slow and takes up a large amount of disk space. Its use should be limited to educational and debugging purposes.
Example:
[source,xml]
----
<codecFactory class="solr.SimpleTextCodecFactory"/>
----

View File

@ -29,9 +29,9 @@ However, pay special attention to cache and autowarm settings as they can have a
[[NearRealTimeSearching-CommitsandOptimizing]] [[NearRealTimeSearching-CommitsandOptimizing]]
== Commits and Optimizing == Commits and Optimizing
A commit operation makes index changes visible to new search requests. A *hard commit* uses the transaction log to get the id of the latest document changes, and also calls `fsync` on the index files to ensure they have been flushed to stable storage and no data loss will result from a power failure. A commit operation makes index changes visible to new search requests. A *hard commit* uses the transaction log to get the id of the latest document changes, and also calls `fsync` on the index files to ensure they have been flushed to stable storage and no data loss will result from a power failure. The current transaction log is closed and a new one is opened. See the "transaction log" discussion below for data loss issues.
A *soft commit* is much faster since it only makes index changes visible and does not `fsync` index files or write a new index descriptor. If the JVM crashes or there is a loss of power, changes that occurred after the last *hard commit* will be lost. Search collections that have NRT requirements (that want index changes to be quickly visible to searches) will want to soft commit often but hard commit less frequently. A softCommit may be "less expensive" in terms of time, but not free, since it can slow throughput. A *soft commit* is much faster since it only makes index changes visible and does not `fsync` index files, or write a new index descriptor or start a new transaction log. Search collections that have NRT requirements (that want index changes to be quickly visible to searches) will want to soft commit often but hard commit less frequently. A softCommit may be "less expensive", but it is not free, since it can slow throughput. See the "transaction log" discussion below for data loss issues.
An *optimize* is like a *hard commit* except that it forces all of the index segments to be merged into a single segment first. Depending on the use, this operation should be performed infrequently (e.g., nightly), if at all, since it involves reading and re-writing the entire index. Segments are normally merged over time anyway (as determined by the merge policy), and optimize just forces these merges to occur immediately. An *optimize* is like a *hard commit* except that it forces all of the index segments to be merged into a single segment first. Depending on the use, this operation should be performed infrequently (e.g., nightly), if at all, since it involves reading and re-writing the entire index. Segments are normally merged over time anyway (as determined by the merge policy), and optimize just forces these merges to occur immediately.
@ -48,6 +48,15 @@ Soft commit takes uses two parameters: `maxDocs` and `maxTime`.
Use `maxDocs` and `maxTime` judiciously to fine-tune your commit strategies. Use `maxDocs` and `maxTime` judiciously to fine-tune your commit strategies.
[[NearRealTimeSearching-TransactionLogs]]
=== Transaction Logs (tlogs)
Transaction logs are a "rolling window" of at least the last `N` (default 100) documents indexed. Tlogs are configured in solrconfig.xml, including the value of `N`. The current transaction log is closed and a new one opened each time any variety of hard commit occurs. Soft commits have no effect on the transaction log.
When tlogs are enabled, documents being added to the index are written to the tlog before the indexing call returns to the client. In the event of an un-graceful shutdown (power loss, JVM crash, `kill -9` etc) any documents written to the tlog that was open when Solr stopped are replayed on startup.
When Solr is shut down gracefully (i.e. using the `bin/solr stop` command and the like) Solr will close the tlog file and index segments so no replay will be necessary on startup.
[[NearRealTimeSearching-AutoCommits]] [[NearRealTimeSearching-AutoCommits]]
=== AutoCommits === AutoCommits
@ -75,6 +84,7 @@ It's better to use `maxTime` rather than `maxDocs` to modify an `autoSoftCommit`
|=== |===
|Parameter |Valid Attributes |Description |Parameter |Valid Attributes |Description
|`waitSearcher` |true, false |Block until a new searcher is opened and registered as the main query searcher, making the changes visible. Default is true. |`waitSearcher` |true, false |Block until a new searcher is opened and registered as the main query searcher, making the changes visible. Default is true.
|`OpenSearcher` |true, false |Open a new searcher making all documents indexed so far visible for searching. Default is true.
|`softCommit` |true, false |Perform a soft commit. This will refresh the view of the index faster, but without guarantees that the document is stably stored. Default is false. |`softCommit` |true, false |Perform a soft commit. This will refresh the view of the index faster, but without guarantees that the document is stably stored. Default is false.
|`expungeDeletes` |true, false |Valid for `commit` only. This parameter purges deleted data from segments. The default is false. |`expungeDeletes` |true, false |Valid for `commit` only. This parameter purges deleted data from segments. The default is false.
|`maxSegments` |integer |Valid for `optimize` only. Optimize down to at most this number of segments. The default is 1. |`maxSegments` |integer |Valid for `optimize` only. Optimize down to at most this number of segments. The default is 1.

View File

@ -425,7 +425,7 @@ This option is useful if you are running multiple standalone Solr instances on t
The `bin/solr` script allows enabling or disabling Basic Authentication, allowing you to configure authentication from the command line. The `bin/solr` script allows enabling or disabling Basic Authentication, allowing you to configure authentication from the command line.
Currently, this script only enables Basic Authentication. Currently, this script only enables Basic Authentication, and is only available when using SolrCloud mode.
=== Enabling Basic Authentication === Enabling Basic Authentication
@ -435,7 +435,7 @@ TIP: For more information about Solr's authentication plugins, see the section <
The `bin/solr auth enable` command makes several changes to enable Basic Authentication: The `bin/solr auth enable` command makes several changes to enable Basic Authentication:
* Creates a `security.json` file and, if using SolrCloud, uploads it to ZooKeeper. The `security.json` file will look similar to: * Creates a `security.json` file and uploads it to ZooKeeper. The `security.json` file will look similar to:
+ +
[source,json] [source,json]
---- ----
@ -484,15 +484,14 @@ When *true*, blocks all unauthenticated users from accessing Solr. This defaults
`-updateIncludeFileOnly`:: `-updateIncludeFileOnly`::
When *true*, only the settings in `bin/solr.in.sh` or `bin\solr.in.cmd` will be updated, and `security.json` will not be created. When *true*, only the settings in `bin/solr.in.sh` or `bin\solr.in.cmd` will be updated, and `security.json` will not be created.
// TODO 6.6 clarify when this is required
`-z`:: `-z`::
Defines the ZooKeeper connect string. Defines the ZooKeeper connect string. This is useful if you want to enable authentication before all your Solr nodes have come up.
`-d`:: `-d`::
Defines the Solr server directory, by default `$SOLR_HOME/server`. It is not common to need to override the default. Defines the Solr server directory, by default `$SOLR_HOME/server`. It is not common to need to override the default, and is only needed if you have customized the `$SOLR_HOME` directory path.
`-s`:: `-s`::
Defines the location of `solr.solr.home`, which by default is `server/solr`. If you have multiple instances of Solr on the same host, you likely need to define this. Defines the location of `solr.solr.home`, which by default is `server/solr`. If you have multiple instances of Solr on the same host, or if you have customized the `$SOLR_HOME` directory path, you likely need to define this.
=== Disabling Basic Authentication === Disabling Basic Authentication

View File

@ -17,6 +17,9 @@
package org.apache.solr.client.solrj.io.stream; package org.apache.solr.client.solrj.io.stream;
import java.io.IOException; import java.io.IOException;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
@ -53,6 +56,8 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
private String end; private String end;
private String gap; private String gap;
private String field; private String field;
private String format;
private DateTimeFormatter formatter;
private Metric[] metrics; private Metric[] metrics;
private List<Tuple> tuples = new ArrayList(); private List<Tuple> tuples = new ArrayList();
@ -70,8 +75,9 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
String field, String field,
String start, String start,
String end, String end,
String gap) throws IOException { String gap,
init(collection, params, field, metrics, start, end, gap, zkHost); String format) throws IOException {
init(collection, params, field, metrics, start, end, gap, format, zkHost);
} }
public TimeSeriesStream(StreamExpression expression, StreamFactory factory) throws IOException{ public TimeSeriesStream(StreamExpression expression, StreamFactory factory) throws IOException{
@ -82,9 +88,17 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
StreamExpressionNamedParameter endExpression = factory.getNamedOperand(expression, "end"); StreamExpressionNamedParameter endExpression = factory.getNamedOperand(expression, "end");
StreamExpressionNamedParameter fieldExpression = factory.getNamedOperand(expression, "field"); StreamExpressionNamedParameter fieldExpression = factory.getNamedOperand(expression, "field");
StreamExpressionNamedParameter gapExpression = factory.getNamedOperand(expression, "gap"); StreamExpressionNamedParameter gapExpression = factory.getNamedOperand(expression, "gap");
StreamExpressionNamedParameter formatExpression = factory.getNamedOperand(expression, "format");
StreamExpressionNamedParameter qExpression = factory.getNamedOperand(expression, "q");
StreamExpressionNamedParameter zkHostExpression = factory.getNamedOperand(expression, "zkHost"); StreamExpressionNamedParameter zkHostExpression = factory.getNamedOperand(expression, "zkHost");
List<StreamExpression> metricExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, Metric.class); List<StreamExpression> metricExpressions = factory.getExpressionOperandsRepresentingTypes(expression, Expressible.class, Metric.class);
if(qExpression == null) {
throw new IOException("The timeseries expression requires the q parameter");
}
String start = null; String start = null;
if(startExpression != null) { if(startExpression != null) {
start = ((StreamExpressionValue)startExpression.getParameter()).getValue(); start = ((StreamExpressionValue)startExpression.getParameter()).getValue();
@ -105,6 +119,11 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
field = ((StreamExpressionValue)fieldExpression.getParameter()).getValue(); field = ((StreamExpressionValue)fieldExpression.getParameter()).getValue();
} }
String format = null;
if(formatExpression != null) {
format = ((StreamExpressionValue)formatExpression.getParameter()).getValue();
}
// Collection Name // Collection Name
if(null == collectionName){ if(null == collectionName){
throw new IOException(String.format(Locale.ROOT,"invalid expression %s - collectionName expected as first operand",expression)); throw new IOException(String.format(Locale.ROOT,"invalid expression %s - collectionName expected as first operand",expression));
@ -149,7 +168,7 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
} }
// We've got all the required items // We've got all the required items
init(collectionName, params, field, metrics, start, end, gap , zkHost); init(collectionName, params, field, metrics, start, end, gap, format, zkHost);
} }
public String getCollection() { public String getCollection() {
@ -163,6 +182,7 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
String start, String start,
String end, String end,
String gap, String gap,
String format,
String zkHost) throws IOException { String zkHost) throws IOException {
this.zkHost = zkHost; this.zkHost = zkHost;
this.collection = collection; this.collection = collection;
@ -175,6 +195,10 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
this.field = field; this.field = field;
this.params = params; this.params = params;
this.end = end; this.end = end;
if(format != null) {
this.format = format;
formatter = DateTimeFormatter.ofPattern(format, Locale.ROOT);
}
} }
@Override @Override
@ -201,6 +225,8 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
expression.addParameter(new StreamExpressionNamedParameter("end", end)); expression.addParameter(new StreamExpressionNamedParameter("end", end));
expression.addParameter(new StreamExpressionNamedParameter("gap", gap)); expression.addParameter(new StreamExpressionNamedParameter("gap", gap));
expression.addParameter(new StreamExpressionNamedParameter("field", gap)); expression.addParameter(new StreamExpressionNamedParameter("field", gap));
expression.addParameter(new StreamExpressionNamedParameter("format", format));
// zkHost // zkHost
expression.addParameter(new StreamExpressionNamedParameter("zkHost", zkHost)); expression.addParameter(new StreamExpressionNamedParameter("zkHost", zkHost));
@ -348,6 +374,12 @@ public class TimeSeriesStream extends TupleStream implements Expressible {
for(int b=0; b<allBuckets.size(); b++) { for(int b=0; b<allBuckets.size(); b++) {
NamedList bucket = (NamedList)allBuckets.get(b); NamedList bucket = (NamedList)allBuckets.get(b);
Object val = bucket.get("val"); Object val = bucket.get("val");
if(formatter != null) {
LocalDateTime localDateTime = LocalDateTime.ofInstant(((java.util.Date) val).toInstant(), ZoneOffset.UTC);
val = localDateTime.format(formatter);
}
Tuple t = currentTuple.clone(); Tuple t = currentTuple.clone();
t.put(field, val); t.put(field, val);
int m = 0; int m = 0;

View File

@ -5368,6 +5368,87 @@ public class StreamExpressionTest extends SolrCloudTestCase {
assertTrue(tuples.get(3).getDouble("max(price_f)").equals(400D)); assertTrue(tuples.get(3).getDouble("max(price_f)").equals(400D));
assertTrue(tuples.get(3).getDouble("min(price_f)").equals(400D)); assertTrue(tuples.get(3).getDouble("min(price_f)").equals(400D));
expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
"end=\"2016-12-01T01:00:00.000Z\", " +
"gap=\"+1YEAR\", " +
"field=\"test_dt\", " +
"format=\"yyyy\", " +
"count(*), sum(price_f), max(price_f), min(price_f))";
paramsLoc = new ModifiableSolrParams();
paramsLoc.set("expr", expr);
paramsLoc.set("qt", "/stream");
solrStream = new SolrStream(url, paramsLoc);
solrStream.setStreamContext(context);
tuples = getTuples(solrStream);
assertTrue(tuples.size() == 4);
assertTrue(tuples.get(0).get("test_dt").equals("2013"));
assertTrue(tuples.get(0).getLong("count(*)").equals(100L));
assertTrue(tuples.get(0).getDouble("sum(price_f)").equals(10000D));
assertTrue(tuples.get(0).getDouble("max(price_f)").equals(100D));
assertTrue(tuples.get(0).getDouble("min(price_f)").equals(100D));
assertTrue(tuples.get(1).get("test_dt").equals("2014"));
assertTrue(tuples.get(1).getLong("count(*)").equals(50L));
assertTrue(tuples.get(1).getDouble("sum(price_f)").equals(25000D));
assertTrue(tuples.get(1).getDouble("max(price_f)").equals(500D));
assertTrue(tuples.get(1).getDouble("min(price_f)").equals(500D));
assertTrue(tuples.get(2).get("test_dt").equals("2015"));
assertTrue(tuples.get(2).getLong("count(*)").equals(50L));
assertTrue(tuples.get(2).getDouble("sum(price_f)").equals(15000D));
assertTrue(tuples.get(2).getDouble("max(price_f)").equals(300D));
assertTrue(tuples.get(2).getDouble("min(price_f)").equals(300D));
assertTrue(tuples.get(3).get("test_dt").equals("2016"));
assertTrue(tuples.get(3).getLong("count(*)").equals(50L));
assertTrue(tuples.get(3).getDouble("sum(price_f)").equals(20000D));
assertTrue(tuples.get(3).getDouble("max(price_f)").equals(400D));
assertTrue(tuples.get(3).getDouble("min(price_f)").equals(400D));
expr = "timeseries("+COLLECTIONORALIAS+", q=\"*:*\", start=\"2013-01-01T01:00:00.000Z\", " +
"end=\"2016-12-01T01:00:00.000Z\", " +
"gap=\"+1YEAR\", " +
"field=\"test_dt\", " +
"format=\"yyyy-MM\", " +
"count(*), sum(price_f), max(price_f), min(price_f))";
paramsLoc = new ModifiableSolrParams();
paramsLoc.set("expr", expr);
paramsLoc.set("qt", "/stream");
solrStream = new SolrStream(url, paramsLoc);
solrStream.setStreamContext(context);
tuples = getTuples(solrStream);
assertTrue(tuples.size() == 4);
assertTrue(tuples.get(0).get("test_dt").equals("2013-01"));
assertTrue(tuples.get(0).getLong("count(*)").equals(100L));
assertTrue(tuples.get(0).getDouble("sum(price_f)").equals(10000D));
assertTrue(tuples.get(0).getDouble("max(price_f)").equals(100D));
assertTrue(tuples.get(0).getDouble("min(price_f)").equals(100D));
assertTrue(tuples.get(1).get("test_dt").equals("2014-01"));
assertTrue(tuples.get(1).getLong("count(*)").equals(50L));
assertTrue(tuples.get(1).getDouble("sum(price_f)").equals(25000D));
assertTrue(tuples.get(1).getDouble("max(price_f)").equals(500D));
assertTrue(tuples.get(1).getDouble("min(price_f)").equals(500D));
assertTrue(tuples.get(2).get("test_dt").equals("2015-01"));
assertTrue(tuples.get(2).getLong("count(*)").equals(50L));
assertTrue(tuples.get(2).getDouble("sum(price_f)").equals(15000D));
assertTrue(tuples.get(2).getDouble("max(price_f)").equals(300D));
assertTrue(tuples.get(2).getDouble("min(price_f)").equals(300D));
assertTrue(tuples.get(3).get("test_dt").equals("2016-01"));
assertTrue(tuples.get(3).getLong("count(*)").equals(50L));
assertTrue(tuples.get(3).getDouble("sum(price_f)").equals(20000D));
assertTrue(tuples.get(3).getDouble("max(price_f)").equals(400D));
assertTrue(tuples.get(3).getDouble("min(price_f)").equals(400D));
} }
@Test @Test