This commit is contained in:
Karl Wright 2016-04-17 15:37:20 -04:00
commit aafdc372d9
13 changed files with 214 additions and 124 deletions

View File

@ -48,13 +48,6 @@
<artifactId>lucene-test-framework</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>
<artifactId>lucene-spatial</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
@lucene-sandbox.internal.dependencies@
@lucene-sandbox.external.dependencies@
@lucene-sandbox.internal.test.dependencies@

View File

@ -23,39 +23,4 @@
<import file="../module-build.xml"/>
<target name="compile-test-spatial" depends="init" if="module.has.tests">
<ant dir="${common.dir}/spatial" target="compile-test" inheritAll="false"/>
</target>
<path id="classpath">
<path refid="base.classpath"/>
<pathelement path="${spatial.jar}"/>
</path>
<target name="compile-core" depends="jar-spatial,common.compile-core" />
<path id="test.classpath">
<pathelement location="${build.dir}/classes/java"/>
<pathelement location="${build.dir}/classes/test"/>
<pathelement location="${common.dir}/build/spatial/classes/test"/>
<path refid="test.base.classpath"/>
<pathelement path="${spatial.jar}"/>
<path refid="junit-path"/>
</path>
<path id="junit.classpath">
<path refid="test.classpath"/>
<pathelement path="${java.class.path}"/>
</path>
<target name="javadocs" depends="javadocs-spatial,compile-core,check-javadocs-uptodate"
unless="javadocs-uptodate-${name}">
<invoke-module-javadoc>
<links>
<link href="../spatial"/>
</links>
</invoke-module-javadoc>
</target>
<target name="compile-test" depends="jar-spatial,compile-test-spatial,common.compile-test" />
</project>

View File

@ -18,7 +18,7 @@ package org.apache.lucene.search;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.LatLonPoint;
import org.apache.lucene.spatial.util.BaseGeoPointTestCase;
import org.apache.lucene.geo.BaseGeoPointTestCase;
import org.apache.lucene.geo.Polygon;
import org.apache.lucene.geo.GeoEncodingUtils;

View File

@ -19,11 +19,11 @@ package org.apache.lucene.spatial.geopoint.search;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.util.GeoEncodingUtils;
import org.apache.lucene.geo.BaseGeoPointTestCase;
import org.apache.lucene.geo.Polygon;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField.TermEncoding;
import org.apache.lucene.spatial.util.BaseGeoPointTestCase;
/**
* random testing for GeoPoint query logic

View File

@ -19,11 +19,11 @@ package org.apache.lucene.spatial.geopoint.search;
import org.apache.lucene.document.Document;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.util.GeoEncodingUtils;
import org.apache.lucene.geo.BaseGeoPointTestCase;
import org.apache.lucene.geo.Polygon;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.spatial.geopoint.document.GeoPointField.TermEncoding;
import org.apache.lucene.spatial.util.BaseGeoPointTestCase;
/**
* random testing for GeoPoint query logic (with deprecated numeric encoding)

View File

@ -14,7 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.spatial.util;
package org.apache.lucene.geo;
import java.io.IOException;
import java.text.DecimalFormat;
@ -772,12 +772,15 @@ public abstract class BaseGeoPointTestCase extends LuceneTestCase {
static final boolean rectContainsPoint(Rectangle rect, double pointLat, double pointLon) {
assert Double.isNaN(pointLat) == false;
if (pointLat < rect.minLat || pointLat > rect.maxLat) {
return false;
}
if (rect.minLon <= rect.maxLon) {
return GeoRelationUtils.pointInRectPrecise(pointLat, pointLon, rect.minLat, rect.maxLat, rect.minLon, rect.maxLon);
return pointLon >= rect.minLon && pointLon <= rect.maxLon;
} else {
// Rect crosses dateline:
return GeoRelationUtils.pointInRectPrecise(pointLat, pointLon, rect.minLat, rect.maxLat, -180.0, rect.maxLon)
|| GeoRelationUtils.pointInRectPrecise(pointLat, pointLon, rect.minLat, rect.maxLat, rect.minLon, 180.0);
return pointLon <= rect.maxLon || pointLon >= rect.minLon;
}
}

View File

@ -121,6 +121,11 @@ Bug Fixes
* SOLR-8946: bin/post failed to detect stdin usage on Ubuntu; maybe other unixes. (David Smiley)
* SOLR-8662: SchemaManager waits correctly for replicas to be notified of a new change.
(sarowe, Noble Paul, Varun Thacker)
* SOLR-9004: Fix "name" field type definition in films example. (Alexandre Rafalovitch via Varun Thacker)
Optimizations
----------------------
* SOLR-8722: Don't force a full ZkStateReader refresh on every Overseer operation.

View File

@ -80,17 +80,42 @@ public class ZkSolrResourceLoader extends SolrResourceLoader {
*/
@Override
public InputStream openResource(String resource) throws IOException {
InputStream is = null;
InputStream is;
String file = configSetZkPath + "/" + resource;
try {
if (zkController.pathExists(file)) {
Stat stat = new Stat();
byte[] bytes = zkController.getZkClient().getData(file, null, stat, true);
return new ZkByteArrayInputStream(bytes, stat);
int maxTries = 10;
Exception exception = null;
while (maxTries -- > 0) {
try {
if (zkController.pathExists(file)) {
Stat stat = new Stat();
byte[] bytes = zkController.getZkClient().getData(file, null, stat, true);
return new ZkByteArrayInputStream(bytes, stat);
} else {
//Path does not exists. We only retry for session expired exceptions.
break;
}
} catch (KeeperException.SessionExpiredException e) {
exception = e;
// Retry in case of session expiry
try {
Thread.sleep(1000);
log.debug("Sleeping for 1s before retrying fetching resource=" + resource);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
throw new IOException("Could not load resource=" + resource, ie);
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new IOException("Error opening " + file, e);
} catch (KeeperException e) {
throw new IOException("Error opening " + file, e);
}
} catch (Exception e) {
throw new IOException("Error opening " + file, e);
}
if (exception != null) {
throw new IOException("We re-tried 10 times but was still unable to fetch resource=" + resource + " from ZK", exception);
}
try {
// delegate to the class loader (looking into $INSTANCE_DIR/lib jars)
is = classLoader.getResourceAsStream(resource.replace(File.separatorChar, '/'));

View File

@ -25,6 +25,7 @@ import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.request.SolrQueryRequest;
import org.apache.solr.rest.BaseSolrResource;
import org.apache.solr.util.CommandOperation;
import org.apache.solr.util.TimeOut;
import org.apache.zookeeper.KeeperException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -86,20 +87,27 @@ public class SchemaManager {
if (!errs.isEmpty()) return errs;
IndexSchema schema = req.getCore().getLatestSchema();
if (!(schema instanceof ManagedIndexSchema)) {
if (schema instanceof ManagedIndexSchema && schema.isMutable()) {
synchronized (schema.getSchemaUpdateLock()) {
return doOperations(ops);
}
} else {
return singletonList(singletonMap(CommandOperation.ERR_MSGS, "schema is not editable"));
}
synchronized (schema.getSchemaUpdateLock()) {
return doOperations(ops);
}
}
private List doOperations(List<CommandOperation> operations) throws InterruptedException, IOException, KeeperException {
int timeout = req.getParams().getInt(BaseSolrResource.UPDATE_TIMEOUT_SECS, -1);
long startTime = System.nanoTime();
long endTime = timeout > 0 ? System.nanoTime() + (timeout * 1000 * 1000) : Long.MAX_VALUE;
//The default timeout is 10 minutes when no BaseSolrResource.UPDATE_TIMEOUT_SECS is specified
int timeout = req.getParams().getInt(BaseSolrResource.UPDATE_TIMEOUT_SECS, 600);
//If BaseSolrResource.UPDATE_TIMEOUT_SECS=0 or -1 then end time then we'll try for 10 mins ( default timeout )
if (timeout < 1) {
timeout = 600;
}
TimeOut timeOut = new TimeOut(timeout, TimeUnit.SECONDS);
SolrCore core = req.getCore();
while (System.nanoTime() < endTime) {
String errorMsg = "Unable to persist managed schema. ";
while (!timeOut.hasTimedOut()) {
managedIndexSchema = getFreshManagedSchema();
for (CommandOperation op : operations) {
OpType opType = OpType.get(op.name);
@ -118,25 +126,18 @@ public class SchemaManager {
try {
managedIndexSchema.persist(sw);
} catch (IOException e) {
log.info("race condition ");
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "unable to serialize schema");
//unlikely
}
try {
ZkController.persistConfigResourceToZooKeeper(zkLoader,
managedIndexSchema.getSchemaZkVersion(),
managedIndexSchema.getResourceName(),
sw.toString().getBytes(StandardCharsets.UTF_8),
true);
waitForOtherReplicasToUpdate(timeout, startTime);
ZkController.persistConfigResourceToZooKeeper(zkLoader, managedIndexSchema.getSchemaZkVersion(),
managedIndexSchema.getResourceName(), sw.toString().getBytes(StandardCharsets.UTF_8), true);
waitForOtherReplicasToUpdate(timeOut);
core.setLatestSchema(managedIndexSchema);
return Collections.emptyList();
} catch (ZkController.ResourceModifiedInZkException e) {
log.info("Race condition schema modified by another node");
} catch (Exception e) {
String s = "Exception persisting schema";
log.warn(s, e);
return singletonList(s + e.getMessage());
log.info("Schema was modified by another node. Retrying..");
}
} else {
try {
@ -144,36 +145,30 @@ public class SchemaManager {
managedIndexSchema.persistManagedSchema(false);
core.setLatestSchema(managedIndexSchema);
return Collections.emptyList();
} catch (ManagedIndexSchema.SchemaChangedInZkException e) {
String s = "Failed to update schema because schema is modified";
log.warn(s, e);
} catch (Exception e) {
String s = "Exception persisting schema";
log.warn(s, e);
return singletonList(s + e.getMessage());
} catch (SolrException e) {
log.warn(errorMsg);
return singletonList(errorMsg + e.getMessage());
}
}
}
return singletonList("Unable to persist schema");
log.warn(errorMsg + "Timed out.");
return singletonList(errorMsg + "Timed out.");
}
private void waitForOtherReplicasToUpdate(int timeout, long startTime) {
if (timeout > 0 && managedIndexSchema.getResourceLoader() instanceof ZkSolrResourceLoader) {
CoreDescriptor cd = req.getCore().getCoreDescriptor();
String collection = cd.getCollectionName();
if (collection != null) {
ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) managedIndexSchema.getResourceLoader();
long timeLeftSecs = timeout - TimeUnit.SECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
if (timeLeftSecs <= 0) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Not enough time left to update replicas. However, the schema is updated already.");
}
ManagedIndexSchema.waitForSchemaZkVersionAgreement(collection,
cd.getCloudDescriptor().getCoreNodeName(),
(managedIndexSchema).getSchemaZkVersion(),
zkLoader.getZkController(),
(int) timeLeftSecs);
private void waitForOtherReplicasToUpdate(TimeOut timeOut) {
CoreDescriptor cd = req.getCore().getCoreDescriptor();
String collection = cd.getCollectionName();
if (collection != null) {
ZkSolrResourceLoader zkLoader = (ZkSolrResourceLoader) managedIndexSchema.getResourceLoader();
if (timeOut.hasTimedOut()) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Not enough time left to update replicas. However, the schema is updated already.");
}
ManagedIndexSchema.waitForSchemaZkVersionAgreement(collection,
cd.getCloudDescriptor().getCoreNodeName(),
(managedIndexSchema).getSchemaZkVersion(),
zkLoader.getZkController(),
(int) timeOut.timeLeft(TimeUnit.SECONDS));
}
}

View File

@ -18,8 +18,14 @@
<schema name="minimal" version="1.1">
<types>
<fieldType name="string" class="solr.StrField"/>
<fieldType name="int" class="solr.TrieIntField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
<fieldType name="long" class="solr.TrieLongField" precisionStep="0" omitNorms="true" positionIncrementGap="0"/>
</types>
<fields>
<dynamicField name="*" type="string" indexed="true" stored="true" />
<!-- for versioning -->
<field name="_version_" type="long" indexed="true" stored="true"/>
<field name="_root_" type="int" indexed="true" stored="true" multiValued="false" required="false"/>
<field name="id" type="string" indexed="true" stored="true"/>
</fields>
<uniqueKey>id</uniqueKey>
</schema>

View File

@ -17,16 +17,7 @@
limitations under the License.
-->
<!-- This is a "kitchen sink" config file that tests can use.
When writting a new test, feel free to add *new* items (plugins,
config options, etc...) as long as they don't break any existing
tests. if you need to test something esoteric please add a new
"solrconfig-your-esoteric-purpose.xml" config file.
Note in particular that this test is used by MinimalSchemaTest so
Anything added to this file needs to work correctly even if there
is now uniqueKey or defaultSearch Field.
-->
<!-- Minimal solrconfig.xml with /select, /admin and /update only -->
<config>
@ -34,7 +25,11 @@
<directoryFactory name="DirectoryFactory"
class="${solr.directoryFactory:solr.NRTCachingDirectoryFactory}"/>
<schemaFactory class="ClassicIndexSchemaFactory"/>
<schemaFactory class="ManagedIndexSchemaFactory">
<bool name="mutable">${managed.schema.mutable}</bool>
<str name="managedSchemaResourceName">managed-schema</str>
</schemaFactory>
<luceneMatchVersion>${tests.luceneMatchVersion:LATEST}</luceneMatchVersion>
@ -42,8 +37,9 @@
<commitWithin>
<softCommit>${solr.commitwithin.softcommit:true}</softCommit>
</commitWithin>
<updateLog></updateLog>
</updateHandler>
<requestHandler name="/select" class="solr.SearchHandler">
<lst name="defaults">
<str name="echoParams">explicit</str>
@ -53,4 +49,3 @@
</requestHandler>
</config>

View File

@ -0,0 +1,101 @@
package org.apache.solr.schema;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.LinkedHashMap;
import java.util.Map;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrClient;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.request.schema.SchemaRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.client.solrj.response.schema.SchemaResponse;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.SolrInputDocument;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public class TestManagedSchemaAPI extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@BeforeClass
public static void createCluster() throws Exception {
System.setProperty("managed.schema.mutable", "true");
configureCluster(2)
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-managed").resolve("conf"))
.configure();
}
@Test
public void test() throws Exception {
String collection = "testschemaapi";
cluster.createCollection(collection, 1, 2, "conf1", null);
testReloadAndAddSimple(collection);
testAddFieldAndDocument(collection);
}
private void testReloadAndAddSimple(String collection) throws IOException, SolrServerException {
CloudSolrClient cloudClient = cluster.getSolrClient();
String fieldName = "myNewField";
addStringField(fieldName, collection, cloudClient);
CollectionAdminRequest.Reload reloadRequest = CollectionAdminRequest.reloadCollection(collection);
CollectionAdminResponse response = reloadRequest.process(cloudClient);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "1");
doc.addField(fieldName, "val");
UpdateRequest ureq = new UpdateRequest().add(doc);
cloudClient.request(ureq, collection);
}
private void testAddFieldAndDocument(String collection) throws IOException, SolrServerException {
CloudSolrClient cloudClient = cluster.getSolrClient();
String fieldName = "myNewField1";
addStringField(fieldName, collection, cloudClient);
SolrInputDocument doc = new SolrInputDocument();
doc.addField("id", "2");
doc.addField(fieldName, "val1");
UpdateRequest ureq = new UpdateRequest().add(doc);
cloudClient.request(ureq, collection);;
}
private void addStringField(String fieldName, String collection, CloudSolrClient cloudClient) throws IOException, SolrServerException {
Map<String, Object> fieldAttributes = new LinkedHashMap<>();
fieldAttributes.put("name", fieldName);
fieldAttributes.put("type", "string");
SchemaRequest.AddField addFieldUpdateSchemaRequest = new SchemaRequest.AddField(fieldAttributes);
SchemaResponse.UpdateResponse addFieldResponse = addFieldUpdateSchemaRequest.process(cloudClient, collection);
assertEquals(0, addFieldResponse.getStatus());
assertNull(addFieldResponse.getResponse().get("errors"));
log.info("added new field="+fieldName);
}
}

View File

@ -22,6 +22,7 @@ curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:applicatio
"add-field" : {
"name":"name",
"type":"text_general",
"multiValued":false,
"stored":true
},
"add-field" : {
@ -103,6 +104,7 @@ curl http://localhost:8983/solr/films/schema -X POST -H 'Content-type:applicatio
"add-field" : {
"name":"name",
"type":"text_general",
"multiValued":false,
"stored":true
},
"add-field" : {