This commit is contained in:
Karl Wright 2016-04-21 08:31:14 -04:00
commit 2ff8bb0c8b
23 changed files with 895 additions and 116 deletions

2
.gitignore vendored
View File

@ -48,5 +48,7 @@ solr/contrib/morphlines-core/test-lib/
solr/core/test-lib/
solr/server/logs/
solr/server/solr/zoo_data/
solr/server/solr-webapp
solr/server/start.jar

View File

@ -43,6 +43,7 @@
<module group="Solr" filepath="$PROJECT_DIR$/solr/core/src/java/solr-core.iml" />
<module group="Solr" filepath="$PROJECT_DIR$/solr/core/src/solr-core-tests.iml" />
<module group="Solr" filepath="$PROJECT_DIR$/solr/server/server.iml" />
<module group="Solr" filepath="$PROJECT_DIR$/solr/solrj/src/java/solrj.iml" />
<module group="Solr" filepath="$PROJECT_DIR$/solr/solrj/src/solrj-tests.iml" />
<module group="Solr" filepath="$PROJECT_DIR$/solr/test-framework/solr-test-framework.iml" />

View File

@ -340,8 +340,16 @@
<option name="TEST_SEARCH_SCOPE"><value defaultName="singleModule" /></option>
<patterns><pattern testClass=".*\.Test[^.]*|.*\.[^.]*Test" /></patterns>
</configuration>
<configuration default="false" name="solrcloud" type="Application" factoryName="Application" singleton="true">
<option name="MAIN_CLASS_NAME" value="org.eclipse.jetty.start.Main" />
<option name="VM_PARAMETERS" value="-DzkRun -Dhost=127.0.0.1 -Duser.timezone=UTC -Djetty.home=$PROJECT_DIR$/solr/server -Dsolr.solr.home=$PROJECT_DIR$/solr/server/solr -Dsolr.install.dir=$PROJECT_DIR$/solr -Dsolr.log=$PROJECT_DIR$/solr/server/logs/solr.log" />
<option name="PROGRAM_PARAMETERS" value="--module=http" />
<option name="WORKING_DIRECTORY" value="file://$PROJECT_DIR$/solr/server" />
<option name="PARENT_ENVS" value="true" />
<module name="server" />
</configuration>
<list size="41">
<list size="42">
<item index="0" class="java.lang.String" itemvalue="JUnit.Lucene core" />
<item index="1" class="java.lang.String" itemvalue="JUnit.Module analyzers-common" />
<item index="2" class="java.lang.String" itemvalue="JUnit.Module analyzers-icu" />
@ -383,6 +391,7 @@
<item index="38" class="java.lang.String" itemvalue="JUnit.Solr uima contrib" />
<item index="39" class="java.lang.String" itemvalue="JUnit.Solr velocity contrib" />
<item index="40" class="java.lang.String" itemvalue="JUnit.Solrj" />
<item index="41" class="java.lang.String" itemvalue="Application.solrcloud" />
</list>
</component>
</project>

View File

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="JAVA_MODULE" version="4">
<component name="NewModuleRootManager" inherit-compiler-output="false">
<output url="file://$MODULE_DIR$/../../idea-build/solr/server/classes/java" />
<exclude-output />
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="sourceFolder" forTests="false" />
<orderEntry type="module-library">
<library>
<CLASSES>
<root url="jar://$MODULE_DIR$/start.jar!/" />
</CLASSES>
<JAVADOC />
<SOURCES />
</library>
</orderEntry>
</component>
</module>

View File

@ -19,6 +19,9 @@ New Features
* LUCENE-7069: Add LatLonPoint.nearest, to find nearest N points to a
provided query point (Mike McCandless)
* LUCENE-7234: Added InetAddressPoint.nextDown/nextUp to easily generate range
queries with excluded bounds. (Adrien Grand)
API Changes
* LUCENE-7184: Refactor LatLonPoint encoding methods to new GeoEncodingUtils
@ -88,6 +91,11 @@ Bug Fixes
match the underlying queries' (lower|upper)Term optionality logic.
(Kaneshanathan Srivisagan, Christine Poerschke)
* LUCENE-7209: Fixed explanations of FunctionScoreQuery. (Adrien Grand)
* LUCENE-7232: Fixed InetAddressPoint.newPrefixQuery, which was generating an
incorrect query when the prefix length was not a multiple of 8. (Adrien Grand)
Documentation
* LUCENE-7223: Improve XXXPoint javadocs to make it clear that you

View File

@ -55,7 +55,7 @@ public class FunctionQuery extends Query {
protected class FunctionWeight extends Weight {
protected final IndexSearcher searcher;
protected float queryNorm = 1f;
protected float queryNorm, boost, queryWeight;
protected final Map context;
public FunctionWeight(IndexSearcher searcher) throws IOException {
@ -63,6 +63,7 @@ public class FunctionQuery extends Query {
this.searcher = searcher;
this.context = ValueSource.newContext(searcher);
func.createWeight(context, searcher);
normalize(1f, 1f);;
}
@Override
@ -70,22 +71,24 @@ public class FunctionQuery extends Query {
@Override
public float getValueForNormalization() throws IOException {
return queryNorm * queryNorm;
return queryWeight * queryWeight;
}
@Override
public void normalize(float norm, float boost) {
this.queryNorm = norm * boost;
this.queryNorm = norm;
this.boost = boost;
this.queryWeight = norm * boost;
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
return new AllScorer(context, this, queryNorm);
return new AllScorer(context, this, queryWeight);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return ((AllScorer)scorer(context)).explain(doc, queryNorm);
return ((AllScorer)scorer(context)).explain(doc);
}
}
@ -132,13 +135,13 @@ public class FunctionQuery extends Query {
return 1;
}
public Explanation explain(int doc, float queryNorm) throws IOException {
public Explanation explain(int doc) throws IOException {
float sc = qWeight * vals.floatVal(doc);
return Explanation.match(sc, "FunctionQuery(" + func + "), product of:",
vals.explain(doc),
Explanation.match(queryNorm, "boost"),
Explanation.match(weight.queryNorm = 1f, "queryNorm"));
Explanation.match(weight.boost, "boost"),
Explanation.match(weight.queryNorm, "queryNorm"));
}
}

View File

@ -16,6 +16,8 @@
*/
package org.apache.lucene.queries;
import java.io.IOException;
import org.apache.lucene.index.Term;
import org.apache.lucene.queries.function.FunctionQuery;
import org.apache.lucene.queries.function.valuesource.ConstValueSource;
@ -23,9 +25,14 @@ import org.apache.lucene.search.BaseExplanationTestCase;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.similarities.BM25Similarity;
import org.apache.lucene.search.similarities.ClassicSimilarity;
import org.apache.lucene.search.similarities.Similarity;
public class TestCustomScoreExplanations extends BaseExplanationTestCase {
public void testOneTerm() throws Exception {
@ -49,4 +56,43 @@ public class TestCustomScoreExplanations extends BaseExplanationTestCase {
BooleanQuery bq = bqB.build();
qtest(new BoostQuery(bq, 6), new int[] { 0,1,2,3 });
}
public void testSubExplanations() throws IOException {
Query query = new FunctionQuery(new ConstValueSource(5));
IndexSearcher searcher = newSearcher(BaseExplanationTestCase.searcher.getIndexReader());
searcher.setSimilarity(new BM25Similarity());
Explanation expl = searcher.explain(query, 0);
// function
assertEquals(5f, expl.getDetails()[0].getValue(), 0f);
// boost
assertEquals("boost", expl.getDetails()[1].getDescription());
assertEquals(1f, expl.getDetails()[1].getValue(), 0f);
// norm
assertEquals("queryNorm", expl.getDetails()[2].getDescription());
assertEquals(1f, expl.getDetails()[2].getValue(), 0f);
query = new BoostQuery(query, 2);
expl = searcher.explain(query, 0);
// function
assertEquals(5f, expl.getDetails()[0].getValue(), 0f);
// boost
assertEquals("boost", expl.getDetails()[1].getDescription());
assertEquals(2f, expl.getDetails()[1].getValue(), 0f);
// norm
assertEquals("queryNorm", expl.getDetails()[2].getDescription());
assertEquals(1f, expl.getDetails()[2].getValue(), 0f);
searcher.setSimilarity(new ClassicSimilarity()); // in order to have a queryNorm != 1
expl = searcher.explain(query, 0);
// function
assertEquals(5f, expl.getDetails()[0].getValue(), 0f);
// boost
assertEquals("boost", expl.getDetails()[1].getDescription());
assertEquals(2f, expl.getDetails()[1].getValue(), 0f);
// norm
assertEquals("queryNorm", expl.getDetails()[2].getDescription());
assertEquals(0.5f, expl.getDetails()[2].getValue(), 0f);
}
}

View File

@ -26,6 +26,7 @@ import org.apache.lucene.search.PointInSetQuery;
import org.apache.lucene.search.PointRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.NumericUtils;
import org.apache.lucene.util.StringHelper;
/**
@ -65,6 +66,53 @@ public class InetAddressPoint extends Field {
TYPE.freeze();
}
/** The minimum value that an ip address can hold. */
public static final InetAddress MIN_VALUE;
/** The maximum value that an ip address can hold. */
public static final InetAddress MAX_VALUE;
static {
MIN_VALUE = decode(new byte[BYTES]);
byte[] maxValueBytes = new byte[BYTES];
Arrays.fill(maxValueBytes, (byte) 0xFF);
MAX_VALUE = decode(maxValueBytes);
}
/**
* Return the {@link InetAddress} that compares immediately greater than
* {@code address}.
* @throws ArithmeticException if the provided address is the
* {@link #MAX_VALUE maximum ip address}
*/
public static InetAddress nextUp(InetAddress address) {
if (address.equals(MAX_VALUE)) {
throw new ArithmeticException("Overflow: there is no greater InetAddress than "
+ address.getHostAddress());
}
byte[] delta = new byte[BYTES];
delta[BYTES-1] = 1;
byte[] nextUpBytes = new byte[InetAddressPoint.BYTES];
NumericUtils.add(InetAddressPoint.BYTES, 0, encode(address), delta, nextUpBytes);
return decode(nextUpBytes);
}
/**
* Return the {@link InetAddress} that compares immediately less than
* {@code address}.
* @throws ArithmeticException if the provided address is the
* {@link #MIN_VALUE minimum ip address}
*/
public static InetAddress nextDown(InetAddress address) {
if (address.equals(MIN_VALUE)) {
throw new ArithmeticException("Underflow: there is no smaller InetAddress than "
+ address.getHostAddress());
}
byte[] delta = new byte[BYTES];
delta[BYTES-1] = 1;
byte[] nextDownBytes = new byte[InetAddressPoint.BYTES];
NumericUtils.subtract(InetAddressPoint.BYTES, 0, encode(address), delta, nextDownBytes);
return decode(nextDownBytes);
}
/** Change the values of this field */
public void setInetAddressValue(InetAddress value) {
if (value == null) {
@ -174,8 +222,9 @@ public class InetAddressPoint extends Field {
byte lower[] = value.getAddress();
byte upper[] = value.getAddress();
for (int i = prefixLength; i < 8 * lower.length; i++) {
lower[i >> 3] &= ~(1 << (i & 7));
upper[i >> 3] |= 1 << (i & 7);
int m = 1 << (7 - (i & 7));
lower[i >> 3] &= ~m;
upper[i >> 3] |= m;
}
try {
return newRangeQuery(field, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper));
@ -186,6 +235,12 @@ public class InetAddressPoint extends Field {
/**
* Create a range query for network addresses.
* <p>
* You can have half-open ranges (which are in fact &lt;/&le; or &gt;/&ge; queries)
* by setting {@code lowerValue = InetAddressPoint.MIN_VALUE} or
* {@code upperValue = InetAddressPoint.MAX_VALUE}.
* <p> Ranges are inclusive. For exclusive ranges, pass {@code InetAddressPoint#nextUp(lowerValue)}
* or {@code InetAddressPoint#nexDown(upperValue)}.
*
* @param field field name. must not be {@code null}.
* @param lowerValue lower portion of the range (inclusive). must not be null.

View File

@ -119,4 +119,58 @@ public class TestInetAddressPoint extends LuceneTestCase {
assertEquals(q1.hashCode(), q2.hashCode());
assertFalse(q1.equals(InetAddressPoint.newSetQuery("a", InetAddress.getByName("1.2.3.3"), InetAddress.getByName("1.2.3.7"))));
}
public void testPrefixQuery() throws Exception {
assertEquals(
InetAddressPoint.newRangeQuery("a", InetAddress.getByName("1.2.3.0"), InetAddress.getByName("1.2.3.255")),
InetAddressPoint.newPrefixQuery("a", InetAddress.getByName("1.2.3.127"), 24));
assertEquals(
InetAddressPoint.newRangeQuery("a", InetAddress.getByName("1.2.3.128"), InetAddress.getByName("1.2.3.255")),
InetAddressPoint.newPrefixQuery("a", InetAddress.getByName("1.2.3.213"), 25));
assertEquals(
InetAddressPoint.newRangeQuery("a", InetAddress.getByName("2001::a000:0"), InetAddress.getByName("2001::afff:ffff")),
InetAddressPoint.newPrefixQuery("a", InetAddress.getByName("2001::a6bd:fc80"), 100));
}
public void testNextUp() throws Exception {
assertEquals(InetAddress.getByName("::1"),
InetAddressPoint.nextUp(InetAddress.getByName("::")));
assertEquals(InetAddress.getByName("::1:0"),
InetAddressPoint.nextUp(InetAddress.getByName("::ffff")));
assertEquals(InetAddress.getByName("1.2.4.0"),
InetAddressPoint.nextUp(InetAddress.getByName("1.2.3.255")));
assertEquals(InetAddress.getByName("0.0.0.0"),
InetAddressPoint.nextUp(InetAddress.getByName("::fffe:ffff:ffff")));
assertEquals(InetAddress.getByName("::1:0:0:0"),
InetAddressPoint.nextUp(InetAddress.getByName("255.255.255.255")));
ArithmeticException e = expectThrows(ArithmeticException.class,
() -> InetAddressPoint.nextUp(InetAddress.getByName("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")));
assertEquals("Overflow: there is no greater InetAddress than ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", e.getMessage());
}
public void testNextDown() throws Exception {
assertEquals(InetAddress.getByName("ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe"),
InetAddressPoint.nextDown(InetAddress.getByName("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")));
assertEquals(InetAddress.getByName("::ffff"),
InetAddressPoint.nextDown(InetAddress.getByName("::1:0")));
assertEquals(InetAddress.getByName("1.2.3.255"),
InetAddressPoint.nextDown(InetAddress.getByName("1.2.4.0")));
assertEquals(InetAddress.getByName("::fffe:ffff:ffff"),
InetAddressPoint.nextDown(InetAddress.getByName("0.0.0.0")));
assertEquals(InetAddress.getByName("255.255.255.255"),
InetAddressPoint.nextDown(InetAddress.getByName("::1:0:0:0")));
ArithmeticException e = expectThrows(ArithmeticException.class,
() -> InetAddressPoint.nextDown(InetAddress.getByName("::")));
assertEquals("Underflow: there is no smaller InetAddress than 0:0:0:0:0:0:0:0", e.getMessage());
}
}

View File

@ -104,6 +104,13 @@ New Features
* SOLR-8918: Adds Streaming to the admin page under the collections section. Includes
ability to see graphically the expression explanation (Dennis Gove)
* SOLR-8913: When using a shared filesystem we should store data dir and tlog dir locations in
the cluster state. (Mark Miller)
* SOLR-8809: Implement Connection.prepareStatement (Kevin Risden)
* SOLR-9020: Implement StatementImpl/ResultSetImpl get/set fetch* methods and proper errors for traversal methods (Kevin Risden)
Bug Fixes
----------------------
@ -158,6 +165,8 @@ Optimizations
* SOLR-8937: bin/post (SimplePostTool) now streams the standard input instead of buffering fully.
(David Smiley)
* SOLR-8973: Zookeeper frenzy when a core is first created. (Janmejay Singh, Scott Blum, shalin)
Other Changes
----------------------
* SOLR-7516: Improve javadocs for JavaBinCodec, ObjectResolver and enforce the single-usage policy.
@ -188,6 +197,10 @@ Other Changes
* SOLR-8985: Added back support for 'includeDynamic' flag to /schema/fields endpoint (noble)
* SOLR-9015: Adds SelectStream as a default function in the StreamHandler (Dennis Gove)
* SOLR-8929: Add an idea module for solr/server to enable launching start.jar (Scott Blum, Steve Rowe)
================== 6.0.0 ==================
Consult the LUCENE_CHANGES.txt file for additional, low level, changes in this release

View File

@ -1165,9 +1165,8 @@ public final class ZkController {
if (coreNodeName != null) {
props.put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
}
if (ClusterStateUtil.isAutoAddReplicas(getZkStateReader(), collection)) {
try (SolrCore core = cc.getCore(cd.getName())) {
if (core != null && core.getDirectoryFactory().isSharedStorage()) {
if (core != null && core.getDirectoryFactory().isSharedStorage()) {
props.put("dataDir", core.getDataDir());
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
@ -1478,11 +1477,13 @@ public final class ZkController {
}
publish(cd, Replica.State.DOWN, false, true);
DocCollection collection = zkStateReader.getClusterState().getCollectionOrNull(cd.getCloudDescriptor().getCollectionName());
if (collection != null) {
log.info("Registering watch for collection {}", cd.getCloudDescriptor().getCollectionName());
zkStateReader.addCollectionWatch(cd.getCloudDescriptor().getCollectionName());
}
String collectionName = cd.getCloudDescriptor().getCollectionName();
DocCollection collection = zkStateReader.getClusterState().getCollectionOrNull(collectionName);
log.info(collection == null ?
"Collection {} not visible yet, but flagging it so a watch is registered when it becomes visible" :
"Registering watch for collection {}",
collectionName);
zkStateReader.addCollectionWatch(collectionName);
} catch (KeeperException e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR, "", e);

View File

@ -125,6 +125,7 @@ public class StreamHandler extends RequestHandlerBase implements SolrCoreAware,
.withFunctionName("daemon", DaemonStream.class)
.withFunctionName("shortestPath", ShortestPathStream.class)
.withFunctionName("gatherNodes", GatherNodesStream.class)
.withFunctionName("select", SelectStream.class)
// metrics
.withFunctionName("min", MinMetric.class)

View File

@ -32,6 +32,9 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.cloud.BasicDistributedZkTest;
import org.apache.solr.cloud.ChaosMonkey;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.params.CollectionParams.CollectionAction;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
@ -46,6 +49,7 @@ import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
@ -153,6 +157,17 @@ public class StressHdfsTest extends BasicDistributedZkTest {
createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode);
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
// data dirs should be in zk, SOLR-8913
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
Slice slice = clusterState.getSlice(DELETE_DATA_DIR_COLLECTION, "shard1");
assertNotNull(clusterState.getSlices(DELETE_DATA_DIR_COLLECTION).toString(), slice);
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
assertNotNull(replica.getProperties().toString(), replica.get("dataDir"));
assertNotNull(replica.getProperties().toString(), replica.get("ulogDir"));
}
cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);

View File

@ -26,6 +26,7 @@ import org.apache.solr.cloud.Overseer;
import org.apache.solr.cloud.OverseerTest;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.cloud.ZkTestServer;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocCollection;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
@ -179,4 +180,63 @@ public class ZkStateReaderTest extends SolrTestCaseJ4 {
server.shutdown();
}
}
public void testWatchedCollectionCreation() throws Exception {
String zkDir = createTempDir("testWatchedCollectionCreation").toFile().getAbsolutePath();
ZkTestServer server = new ZkTestServer(zkDir);
SolrZkClient zkClient = null;
try {
server.run();
AbstractZkTestCase.tryCleanSolrZkNode(server.getZkHost());
AbstractZkTestCase.makeSolrZkNode(server.getZkHost());
zkClient = new SolrZkClient(server.getZkAddress(), OverseerTest.DEFAULT_CONNECTION_TIMEOUT);
ZkController.createClusterZkNodes(zkClient);
ZkStateReader reader = new ZkStateReader(zkClient);
reader.createClusterStateWatchersAndUpdate();
reader.addCollectionWatch("c1");
// Initially there should be no c1 collection.
assertNull(reader.getClusterState().getCollectionRef("c1"));
zkClient.makePath(ZkStateReader.COLLECTIONS_ZKNODE + "/c1", true);
reader.forceUpdateCollection("c1");
// Still no c1 collection, despite a collection path.
assertNull(reader.getClusterState().getCollectionRef("c1"));
ZkStateWriter writer = new ZkStateWriter(reader, new Overseer.Stats());
// create new collection with stateFormat = 2
DocCollection state = new DocCollection("c1", new HashMap<String, Slice>(), new HashMap<String, Object>(), DocRouter.DEFAULT, 0, ZkStateReader.CLUSTER_STATE + "/c1/state.json");
ZkWriteCommand wc = new ZkWriteCommand("c1", state);
writer.enqueueUpdate(reader.getClusterState(), wc, null);
writer.writePendingUpdates();
assertTrue(zkClient.exists(ZkStateReader.COLLECTIONS_ZKNODE + "/c1/state.json", true));
//reader.forceUpdateCollection("c1");
for (int i = 0; i < 100; ++i) {
Thread.sleep(50);
ClusterState.CollectionRef ref = reader.getClusterState().getCollectionRef("c1");
if (ref != null) {
break;
}
}
ClusterState.CollectionRef ref = reader.getClusterState().getCollectionRef("c1");
assertNotNull(ref);
assertFalse(ref.isLazilyLoaded());
assertEquals(2, ref.get().getStateFormat());
} finally {
IOUtils.close(zkClient);
server.shutdown();
}
}
}

View File

@ -88,17 +88,17 @@ class ConnectionImpl implements Connection {
@Override
public PreparedStatement prepareStatement(String sql) throws SQLException {
return null;
return new PreparedStatementImpl(this, sql);
}
@Override
public CallableStatement prepareCall(String sql) throws SQLException {
return null;
throw new UnsupportedOperationException();
}
@Override
public String nativeSQL(String sql) throws SQLException {
return null;
throw new UnsupportedOperationException();
}
@Override

View File

@ -0,0 +1,394 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.client.solrj.io.sql;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.net.URL;
import java.sql.Array;
import java.sql.Blob;
import java.sql.Clob;
import java.sql.Date;
import java.sql.NClob;
import java.sql.ParameterMetaData;
import java.sql.PreparedStatement;
import java.sql.Ref;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLXML;
import java.sql.Time;
import java.sql.Timestamp;
import java.util.Calendar;
class PreparedStatementImpl extends StatementImpl implements PreparedStatement {
private final String sql;
PreparedStatementImpl(ConnectionImpl connection, String sql) {
super(connection);
this.sql = sql;
}
@Override
public ResultSet executeQuery() throws SQLException {
return super.executeQuery(this.sql);
}
@Override
public int executeUpdate() throws SQLException {
return super.executeUpdate(this.sql);
}
@Override
public boolean execute() throws SQLException {
return super.execute(this.sql);
}
@Override
public void clearParameters() throws SQLException {
}
@Override
public ResultSetMetaData getMetaData() throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ParameterMetaData getParameterMetaData() throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public void addBatch() throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public void setNull(int parameterIndex, int sqlType) throws SQLException {
}
@Override
public void setBoolean(int parameterIndex, boolean x) throws SQLException {
}
@Override
public void setByte(int parameterIndex, byte x) throws SQLException {
}
@Override
public void setShort(int parameterIndex, short x) throws SQLException {
}
@Override
public void setInt(int parameterIndex, int x) throws SQLException {
}
@Override
public void setLong(int parameterIndex, long x) throws SQLException {
}
@Override
public void setFloat(int parameterIndex, float x) throws SQLException {
}
@Override
public void setDouble(int parameterIndex, double x) throws SQLException {
}
@Override
public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
}
@Override
public void setString(int parameterIndex, String x) throws SQLException {
}
@Override
public void setBytes(int parameterIndex, byte[] x) throws SQLException {
}
@Override
public void setDate(int parameterIndex, Date x) throws SQLException {
}
@Override
public void setTime(int parameterIndex, Time x) throws SQLException {
}
@Override
public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
}
@Override
public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
}
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
}
@Override
public void setObject(int parameterIndex, Object x) throws SQLException {
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
}
@Override
public void setRef(int parameterIndex, Ref x) throws SQLException {
}
@Override
public void setBlob(int parameterIndex, Blob x) throws SQLException {
}
@Override
public void setClob(int parameterIndex, Clob x) throws SQLException {
}
@Override
public void setArray(int parameterIndex, Array x) throws SQLException {
}
@Override
public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
}
@Override
public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
}
@Override
public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
}
@Override
public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
}
@Override
public void setURL(int parameterIndex, URL x) throws SQLException {
}
@Override
public void setRowId(int parameterIndex, RowId x) throws SQLException {
}
@Override
public void setNString(int parameterIndex, String value) throws SQLException {
}
@Override
public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
}
@Override
public void setNClob(int parameterIndex, NClob value) throws SQLException {
}
@Override
public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
}
@Override
public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
}
@Override
public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
}
@Override
public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
}
@Override
public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
}
@Override
public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
}
@Override
public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
}
@Override
public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
}
@Override
public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
}
@Override
public void setClob(int parameterIndex, Reader reader) throws SQLException {
}
@Override
public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
}
@Override
public void setNClob(int parameterIndex, Reader reader) throws SQLException {
}
// Methods below cannot be called from a PreparedStatement based on JDBC spec
@Override
public ResultSet executeQuery(String sql) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public int executeUpdate(String sql) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public boolean execute(String sql) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public void addBatch( String sql ) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public int executeUpdate(String sql, int columnIndexes[]) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public int executeUpdate(String sql, String columnNames[]) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public boolean execute(String sql, int columnIndexes[]) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public boolean execute(String sql, String columnNames[]) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public long executeLargeUpdate(String sql) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public long executeLargeUpdate(String sql, int columnIndexes[]) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
@Override
public long executeLargeUpdate(String sql, String columnNames[]) throws SQLException {
throw new SQLException("Cannot be called from PreparedStatement");
}
}

View File

@ -31,6 +31,7 @@ import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.RowId;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.sql.SQLWarning;
import java.sql.SQLXML;
import java.sql.Statement;
@ -480,92 +481,133 @@ class ResultSetImpl implements ResultSet {
@Override
public boolean isBeforeFirst() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLFeatureNotSupportedException();
}
@Override
public boolean isAfterLast() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLFeatureNotSupportedException();
}
@Override
public boolean isFirst() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLFeatureNotSupportedException();
}
@Override
public boolean isLast() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLFeatureNotSupportedException();
}
@Override
public void beforeFirst() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLException("beforeFirst() not supported on ResultSet with type TYPE_FORWARD_ONLY");
}
@Override
public void afterLast() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLException("afterLast() not supported on ResultSet with type TYPE_FORWARD_ONLY");
}
@Override
public boolean first() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLException("first() not supported on ResultSet with type TYPE_FORWARD_ONLY");
}
@Override
public boolean last() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLException("last() not supported on ResultSet with type TYPE_FORWARD_ONLY");
}
@Override
public int getRow() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLFeatureNotSupportedException();
}
@Override
public boolean absolute(int row) throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLException("absolute() not supported on ResultSet with type TYPE_FORWARD_ONLY");
}
@Override
public boolean relative(int rows) throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLException("relative() not supported on ResultSet with type TYPE_FORWARD_ONLY");
}
@Override
public boolean previous() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
throw new SQLException("previous() not supported on ResultSet with type TYPE_FORWARD_ONLY");
}
@Override
public void setFetchDirection(int direction) throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
if(direction != ResultSet.FETCH_FORWARD) {
throw new SQLException("Direction must be FETCH_FORWARD since ResultSet " +
"type is TYPE_FORWARD_ONLY");
}
}
@Override
public int getFetchDirection() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
return ResultSet.FETCH_FORWARD;
}
@Override
public void setFetchSize(int rows) throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
if(rows < 0) {
throw new SQLException("Rows must be >= 0");
}
}
@Override
public int getFetchSize() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
return 0;
}
@Override
public int getType() throws SQLException {
checkClosed();
return ResultSet.TYPE_FORWARD_ONLY;
}
@Override
public int getConcurrency() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
return ResultSet.CONCUR_READ_ONLY;
}
@Override

View File

@ -51,8 +51,13 @@ class StatementImpl implements Statement {
this.connection = connection;
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
private void checkClosed() throws SQLException {
if(isClosed()) {
throw new SQLException("Statement is closed.");
}
}
private ResultSet executeQueryImpl(String sql) throws SQLException {
try {
if(this.currentResultSet != null) {
this.currentResultSet.close();
@ -107,6 +112,11 @@ class StatementImpl implements Statement {
}
}
@Override
public ResultSet executeQuery(String sql) throws SQLException {
return this.executeQueryImpl(sql);
}
@Override
public int executeUpdate(String sql) throws SQLException {
return 0;
@ -167,18 +177,14 @@ class StatementImpl implements Statement {
@Override
public SQLWarning getWarnings() throws SQLException {
if(isClosed()) {
throw new SQLException("Statement is closed.");
}
checkClosed();
return this.currentWarning;
}
@Override
public void clearWarnings() throws SQLException {
if(isClosed()) {
throw new SQLException("Statement is closed.");
}
checkClosed();
this.currentWarning = null;
}
@ -203,14 +209,12 @@ class StatementImpl implements Statement {
@Override
public ResultSet getResultSet() throws SQLException {
return this.executeQuery(this.currentSQL);
return this.executeQueryImpl(this.currentSQL);
}
@Override
public int getUpdateCount() throws SQLException {
if(isClosed()) {
throw new SQLException("Statement is closed");
}
checkClosed();
// TODO Add logic when update statements are added to JDBC.
return -1;
@ -218,9 +222,7 @@ class StatementImpl implements Statement {
@Override
public boolean getMoreResults() throws SQLException {
if(isClosed()) {
throw new SQLException("Statement is closed");
}
checkClosed();
// Currently multiple result sets are not possible yet
this.currentResultSet.close();
@ -229,32 +231,48 @@ class StatementImpl implements Statement {
@Override
public void setFetchDirection(int direction) throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
if(direction != ResultSet.FETCH_FORWARD) {
throw new SQLException("Direction must be ResultSet.FETCH_FORWARD currently");
}
}
@Override
public int getFetchDirection() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
return ResultSet.FETCH_FORWARD;
}
@Override
public void setFetchSize(int rows) throws SQLException {
checkClosed();
if(rows < 0) {
throw new SQLException("Rows must be >= 0");
}
}
@Override
public int getFetchSize() throws SQLException {
checkClosed();
return 0;
}
@Override
public int getResultSetConcurrency() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
return ResultSet.CONCUR_READ_ONLY;
}
@Override
public int getResultSetType() throws SQLException {
throw new UnsupportedOperationException();
checkClosed();
return ResultSet.TYPE_FORWARD_ONLY;
}
@Override

View File

@ -443,13 +443,6 @@ public class ZkStateReader implements Closeable {
// To move a collection's state to format2, first create the new state2 format node, then remove legacy entry.
Map<String, ClusterState.CollectionRef> result = new LinkedHashMap<>(legacyCollectionStates);
// Are there any interesting collections that disappeared from the legacy cluster state?
for (String coll : interestingCollections) {
if (!result.containsKey(coll) && !watchedCollectionStates.containsKey(coll)) {
new StateWatcher(coll).refreshAndWatch(true);
}
}
// Add state format2 collections, but don't override legacy collection states.
for (Map.Entry<String, DocCollection> entry : watchedCollectionStates.entrySet()) {
result.putIfAbsent(entry.getKey(), new ClusterState.CollectionRef(entry.getValue()));
@ -1048,6 +1041,7 @@ public class ZkStateReader implements Closeable {
private DocCollection fetchCollectionState(String coll, Watcher watcher) throws KeeperException, InterruptedException {
String collectionPath = getCollectionPath(coll);
while (true) {
try {
Stat stat = new Stat();
byte[] data = zkClient.getData(collectionPath, watcher, stat, true);
@ -1056,9 +1050,19 @@ public class ZkStateReader implements Closeable {
ClusterState.CollectionRef collectionRef = state.getCollectionStates().get(coll);
return collectionRef == null ? null : collectionRef.get();
} catch (KeeperException.NoNodeException e) {
if (watcher != null) {
// Leave an exists watch in place in case a state.json is created later.
Stat exists = zkClient.exists(collectionPath, watcher, true);
if (exists != null) {
// Rare race condition, we tried to fetch the data and couldn't find it, then we found it exists.
// Loop and try again.
continue;
}
}
return null;
}
}
}
public static String getCollectionPath(String coll) {
return COLLECTIONS_ZKNODE+"/"+coll + "/state.json";

View File

@ -20,6 +20,7 @@ import java.io.File;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.Statement;
@ -500,16 +501,9 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
con.clearWarnings();
assertNull(con.getWarnings());
try (Statement statement = con.createStatement()) {
assertEquals(con, statement.getConnection());
assertNull(statement.getWarnings());
statement.clearWarnings();
assertNull(statement.getWarnings());
assertEquals(0, statement.getFetchSize());
statement.setFetchSize(0);
assertEquals(0, statement.getFetchSize());
checkStatement(con, statement);
try (ResultSet rs = statement.executeQuery(sql)) {
assertEquals(statement, rs.getStatement());
@ -530,7 +524,49 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
assertFalse(statement.getMoreResults());
}
try (PreparedStatement statement = con.prepareStatement(sql)) {
checkStatement(con, statement);
try (ResultSet rs = statement.executeQuery()) {
assertEquals(statement, rs.getStatement());
checkResultSetMetadata(rs);
checkResultSet(rs);
}
assertTrue(statement.execute());
assertEquals(-1, statement.getUpdateCount());
try (ResultSet rs = statement.getResultSet()) {
assertEquals(statement, rs.getStatement());
checkResultSetMetadata(rs);
checkResultSet(rs);
}
assertFalse(statement.getMoreResults());
}
}
}
private void checkStatement(Connection con, Statement statement) throws Exception {
assertEquals(con, statement.getConnection());
assertNull(statement.getWarnings());
statement.clearWarnings();
assertNull(statement.getWarnings());
assertEquals(ResultSet.TYPE_FORWARD_ONLY, statement.getResultSetType());
assertEquals(ResultSet.CONCUR_READ_ONLY, statement.getResultSetConcurrency());
assertEquals(ResultSet.FETCH_FORWARD, statement.getFetchDirection());
statement.setFetchDirection(ResultSet.FETCH_FORWARD);
assertEquals(ResultSet.FETCH_FORWARD, statement.getFetchDirection());
assertEquals(0, statement.getFetchSize());
statement.setFetchSize(0);
assertEquals(0, statement.getFetchSize());
}
private void checkResultSetMetadata(ResultSet rs) throws Exception {
@ -572,12 +608,21 @@ public class JdbcTest extends AbstractFullDistribZkTestBase {
}
private void checkResultSet(ResultSet rs) throws Exception {
assertEquals(ResultSet.TYPE_FORWARD_ONLY, rs.getType());
assertNull(rs.getWarnings());
rs.clearWarnings();
assertNull(rs.getWarnings());
assertEquals(ResultSet.TYPE_FORWARD_ONLY, rs.getType());
assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency());
assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection());
rs.setFetchDirection(ResultSet.FETCH_FORWARD);
assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection());
assertEquals(0, rs.getFetchSize());
rs.setFetchSize(10);
assertEquals(0, rs.getFetchSize());
assertTrue(rs.next());
assertEquals(14L, rs.getObject("a_i"));

View File

@ -184,20 +184,20 @@ limitations under the License.
top: 2px;
}
#content #stream #result #explanation #graph-content
#content #stream #result #explanation #explanation-content
{
min-height: 50px;
width: 100%
}
#content #stream #result #explanation #graph-content .node circle
#content #stream #result #explanation #explanation-content .node circle
{
color: #c48f00;
stroke: #c48f00;
fill: #c48f00;
}
#content #stream #result #explanation #graph-content .link
#content #stream #result #explanation #explanation-content .link
{
fill: none;
stroke: #e0e0e0;
@ -205,28 +205,28 @@ limitations under the License.
}
#content #stream #result #explanation #legend .datastore circle,
#content #stream #result #explanation #graph-content .node.datastore circle
#content #stream #result #explanation #explanation-content .node.datastore circle
{
stroke: #3800c4;
fill: #3800c4;
}
#content #stream #result #explanation #legend .stream-source circle,
#content #stream #result #explanation #graph-content .node.stream-source circle
#content #stream #result #explanation #explanation-content .node.stream-source circle
{
stroke: #21a9ec;
fill: #21a9ec;
}
#content #stream #result #explanation #legend .stream-decorator circle,
#content #stream #result #explanation #graph-content .node.stream-decorator circle
#content #stream #result #explanation #explanation-content .node.stream-decorator circle
{
stroke: #cb21ec;
fill: #cb21ec;
}
#content #stream #result #explanation #legend .graph-source circle,
#content #stream #result #explanation #graph-content .node.graph-source circle
#content #stream #result #explanation #explanation-content .node.graph-source circle
{
stroke: #21eca9;
fill: #21eca9;

View File

@ -29,8 +29,6 @@ solrAdminApp.controller('StreamController',
$scope.doStream = function() {
// alert("doing stream")
var params = {};
params.core = $routeParams.core;
params.handler = $scope.qt;
@ -51,7 +49,7 @@ solrAdminApp.controller('StreamController',
if (undefined != jsonData["explanation"]) {
$scope.showExplanation = true;
graphSubController($scope, jsonData["explanation"])
streamGraphSubController($scope, jsonData["explanation"])
delete jsonData["explanation"]
} else {
$scope.showExplanation = false;
@ -76,18 +74,10 @@ solrAdminApp.controller('StreamController',
}
);
var graphSubController = function($scope, explanation) {
var streamGraphSubController = function($scope, explanation) {
$scope.showGraph = true;
$scope.pos = 0;
$scope.rows = 8;
$scope.helperData = {
protocol: [],
host: [],
hostname: [],
port: [],
pathname: []
};
$scope.resetGraph = function() {
$scope.pos = 0;
@ -134,7 +124,7 @@ var graphSubController = function($scope, explanation) {
$scope.showPaging = false;
$scope.isRadial = false;
$scope.graphData = recurse(data, 1);
$scope.explanationData = recurse(data, 1);
$scope.depth = maxDepth + 1;
$scope.leafCount = leafCount;
@ -143,17 +133,16 @@ var graphSubController = function($scope, explanation) {
$scope.initGraph(explanation);
};
solrAdminApp.directive('foograph', function(Constants) {
solrAdminApp.directive('explanationGraph', function(Constants) {
return {
restrict: 'EA',
scope: {
data: "=",
leafCount: "=",
depth: "=",
helperData: "=",
isRadial: "="
depth: "="
},
link: function(scope, element, attrs) {
var helper_path_class = function(p) {
var classes = ['link'];

View File

@ -38,7 +38,7 @@ limitations under the License.
<div ng-show="showExplanation" id="explanation" class="clearfix">
<div id="frame">
<div foograph id="graph-content" data="graphData" depth="depth" leaf-count="leafCount" helper-data="helperData" is-radial="false" class="content clearfix" ng-show="showGraph">
<div explanation-graph id="explanation-content" data="explanationData" depth="depth" leaf-count="leafCount" class="content clearfix" ng-show="showGraph">
<div id="legend">
<svg width="100%" height="15">
<g transform="translate(5,10)" class="stream-decorator"><circle r="4.5"></circle></g>