SOLR-6180: Callers of ManagedIndexSchema mutators should hold the schemaUpdateLock.

git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1608646 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Steven Rowe 2014-07-08 03:20:54 +00:00
parent 474d846bb5
commit 954f3dfdc5
8 changed files with 207 additions and 82 deletions

View File

@ -158,6 +158,9 @@ Bug Fixes
* SOLR-6223: SearchComponents may throw NPE when using shards.tolerant and there is a failure * SOLR-6223: SearchComponents may throw NPE when using shards.tolerant and there is a failure
in the 'GET_FIELDS/GET_HIGHLIGHTS/GET_DEBUG' phase. (Tomás Fernández Löbbe via shalin) in the 'GET_FIELDS/GET_HIGHLIGHTS/GET_DEBUG' phase. (Tomás Fernández Löbbe via shalin)
* SOLR-6180: Callers of ManagedIndexSchema mutators should hold the schemaUpdateLock.
(Gregory Chanan via Steve Rowe)
Optimizations Optimizations
--------------------- ---------------------

View File

@ -171,6 +171,7 @@ public class CopyFieldCollectionResource extends BaseFieldResource implements GE
boolean success = false; boolean success = false;
while (!success) { while (!success) {
try { try {
synchronized (oldSchema.getSchemaUpdateLock()) {
IndexSchema newSchema = oldSchema.addCopyFields(fieldsToCopy); IndexSchema newSchema = oldSchema.addCopyFields(fieldsToCopy);
if (null != newSchema) { if (null != newSchema) {
getSolrCore().setLatestSchema(newSchema); getSolrCore().setLatestSchema(newSchema);
@ -178,6 +179,7 @@ public class CopyFieldCollectionResource extends BaseFieldResource implements GE
} else { } else {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to add fields."); throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to add fields.");
} }
}
} catch (ManagedIndexSchema.SchemaChangedInZkException e) { } catch (ManagedIndexSchema.SchemaChangedInZkException e) {
log.debug("Schema changed while processing request, retrying"); log.debug("Schema changed while processing request, retrying");
oldSchema = (ManagedIndexSchema)getSolrCore().getLatestSchema(); oldSchema = (ManagedIndexSchema)getSolrCore().getLatestSchema();

View File

@ -195,6 +195,7 @@ public class FieldCollectionResource extends BaseFieldResource implements GETabl
} }
} }
firstAttempt = false; firstAttempt = false;
synchronized (oldSchema.getSchemaUpdateLock()) {
IndexSchema newSchema = oldSchema.addFields(newFields, copyFields); IndexSchema newSchema = oldSchema.addFields(newFields, copyFields);
if (null != newSchema) { if (null != newSchema) {
getSolrCore().setLatestSchema(newSchema); getSolrCore().setLatestSchema(newSchema);
@ -202,6 +203,7 @@ public class FieldCollectionResource extends BaseFieldResource implements GETabl
} else { } else {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to add fields."); throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to add fields.");
} }
}
} catch (ManagedIndexSchema.SchemaChangedInZkException e) { } catch (ManagedIndexSchema.SchemaChangedInZkException e) {
log.debug("Schema changed while processing request, retrying"); log.debug("Schema changed while processing request, retrying");
oldSchema = getSolrCore().getLatestSchema(); oldSchema = getSolrCore().getLatestSchema();

View File

@ -166,6 +166,7 @@ public class FieldResource extends BaseFieldResource implements GETable, PUTable
while (!success) { while (!success) {
try { try {
SchemaField newField = oldSchema.newField(fieldName, fieldType, map); SchemaField newField = oldSchema.newField(fieldName, fieldType, map);
synchronized (oldSchema.getSchemaUpdateLock()) {
IndexSchema newSchema = oldSchema.addField(newField, copyFieldNames); IndexSchema newSchema = oldSchema.addField(newField, copyFieldNames);
if (null != newSchema) { if (null != newSchema) {
getSolrCore().setLatestSchema(newSchema); getSolrCore().setLatestSchema(newSchema);
@ -173,6 +174,7 @@ public class FieldResource extends BaseFieldResource implements GETable, PUTable
} else { } else {
throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to add field."); throw new SolrException(ErrorCode.SERVER_ERROR, "Failed to add field.");
} }
}
} catch (ManagedIndexSchema.SchemaChangedInZkException e) { } catch (ManagedIndexSchema.SchemaChangedInZkException e) {
log.debug("Schema changed while processing request, retrying"); log.debug("Schema changed while processing request, retrying");
oldSchema = (ManagedIndexSchema)getSolrCore().getLatestSchema(); oldSchema = (ManagedIndexSchema)getSolrCore().getLatestSchema();

View File

@ -1473,7 +1473,9 @@ public class IndexSchema {
} }
/** /**
* Copies this schema, adds the given field to the copy, then persists the new schema. * Copies this schema, adds the given field to the copy, then persists the
* new schema. Requires synchronizing on the object returned by
* {@link #getSchemaUpdateLock()}.
* *
* @param newField the SchemaField to add * @param newField the SchemaField to add
* @return a new IndexSchema based on this schema with newField added * @return a new IndexSchema based on this schema with newField added
@ -1486,7 +1488,9 @@ public class IndexSchema {
} }
/** /**
* Copies this schema, adds the given field to the copy, then persists the new schema. * Copies this schema, adds the given field to the copy, then persists the
* new schema. Requires synchronizing on the object returned by
* {@link #getSchemaUpdateLock()}.
* *
* @param newField the SchemaField to add * @param newField the SchemaField to add
* @param copyFieldNames 0 or more names of targets to copy this field to. The targets must already exist. * @param copyFieldNames 0 or more names of targets to copy this field to. The targets must already exist.
@ -1500,7 +1504,9 @@ public class IndexSchema {
} }
/** /**
* Copies this schema, adds the given fields to the copy, then persists the new schema. * Copies this schema, adds the given fields to the copy, then persists the
* new schema. Requires synchronizing on the object returned by
* {@link #getSchemaUpdateLock()}.
* *
* @param newFields the SchemaFields to add * @param newFields the SchemaFields to add
* @return a new IndexSchema based on this schema with newFields added * @return a new IndexSchema based on this schema with newFields added
@ -1513,7 +1519,9 @@ public class IndexSchema {
} }
/** /**
* Copies this schema, adds the given fields to the copy, then persists the new schema. * Copies this schema, adds the given fields to the copy, then persists the
* new schema. Requires synchronizing on the object returned by
* {@link #getSchemaUpdateLock()}.
* *
* @param newFields the SchemaFields to add * @param newFields the SchemaFields to add
* @param copyFieldNames 0 or more names of targets to copy this field to. The target fields must already exist. * @param copyFieldNames 0 or more names of targets to copy this field to. The target fields must already exist.
@ -1527,7 +1535,10 @@ public class IndexSchema {
} }
/** /**
* Copies this schema and adds the new copy fields to the copy, then persists the new schema * Copies this schema and adds the new copy fields to the copy, then
* persists the new schema. Requires synchronizing on the object returned by
* {@link #getSchemaUpdateLock()}.
*
* @param copyFields Key is the name of the source field name, value is a collection of target field names. Fields must exist. * @param copyFields Key is the name of the source field name, value is a collection of target field names. Fields must exist.
* @return The new Schema with the copy fields added * @return The new Schema with the copy fields added
*/ */
@ -1554,4 +1565,16 @@ public class IndexSchema {
log.error(msg); log.error(msg);
throw new SolrException(ErrorCode.SERVER_ERROR, msg); throw new SolrException(ErrorCode.SERVER_ERROR, msg);
} }
/**
* Returns the schema update lock that should be synchronzied on
* to update the schema. Only applicable to mutable schemas.
*
* @return the schema update lock object to synchronize on
*/
public Object getSchemaUpdateLock() {
String msg = "This IndexSchema is not mutable.";
log.error(msg);
throw new SolrException(ErrorCode.SERVER_ERROR, msg);
}
} }

View File

@ -209,9 +209,6 @@ public final class ManagedIndexSchema extends IndexSchema {
if (copyFieldNames == null){ if (copyFieldNames == null){
copyFieldNames = Collections.emptyMap(); copyFieldNames = Collections.emptyMap();
} }
// even though fields is volatile, we need to synchronize to avoid two addFields
// happening concurrently (and ending up missing one of them)
synchronized (getSchemaUpdateLock()) {
newSchema = shallowCopy(true); newSchema = shallowCopy(true);
for (SchemaField newField : newFields) { for (SchemaField newField : newFields) {
@ -235,7 +232,7 @@ public final class ManagedIndexSchema extends IndexSchema {
newSchema.registerCopyField(newField.getName(), copyField); newSchema.registerCopyField(newField.getName(), copyField);
} }
} }
}
// Run the callbacks on SchemaAware now that everything else is done // Run the callbacks on SchemaAware now that everything else is done
for (SchemaAware aware : newSchema.schemaAware) { for (SchemaAware aware : newSchema.schemaAware) {
aware.inform(newSchema); aware.inform(newSchema);
@ -261,9 +258,6 @@ public final class ManagedIndexSchema extends IndexSchema {
ManagedIndexSchema newSchema = null; ManagedIndexSchema newSchema = null;
if (isMutable) { if (isMutable) {
boolean success = false; boolean success = false;
// even though fields is volatile, we need to synchronize to avoid two addCopyFields
// happening concurrently (and ending up missing one of them)
synchronized (getSchemaUpdateLock()) {
newSchema = shallowCopy(true); newSchema = shallowCopy(true);
for (Map.Entry<String, Collection<String>> entry : copyFields.entrySet()) { for (Map.Entry<String, Collection<String>> entry : copyFields.entrySet()) {
//Key is the name of the field, values are the destinations //Key is the name of the field, values are the destinations
@ -285,7 +279,6 @@ public final class ManagedIndexSchema extends IndexSchema {
log.error("Failed to add copy fields for {} sources", copyFields.size()); log.error("Failed to add copy fields for {} sources", copyFields.size());
} }
} }
}
return newSchema; return newSchema;
} }
@ -432,6 +425,7 @@ public final class ManagedIndexSchema extends IndexSchema {
return newSchema; return newSchema;
} }
@Override
public Object getSchemaUpdateLock() { public Object getSchemaUpdateLock() {
return schemaUpdateLock; return schemaUpdateLock;
} }

View File

@ -315,6 +315,7 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso
log.debug(builder.toString()); log.debug(builder.toString());
} }
try { try {
synchronized (oldSchema.getSchemaUpdateLock()) {
IndexSchema newSchema = oldSchema.addFields(newFields); IndexSchema newSchema = oldSchema.addFields(newFields);
if (null != newSchema) { if (null != newSchema) {
cmd.getReq().getCore().setLatestSchema(newSchema); cmd.getReq().getCore().setLatestSchema(newSchema);
@ -324,6 +325,7 @@ public class AddSchemaFieldsUpdateProcessorFactory extends UpdateRequestProcesso
} else { } else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed to add fields."); throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Failed to add fields.");
} }
}
} catch(ManagedIndexSchema.FieldExistsException e) { } catch(ManagedIndexSchema.FieldExistsException e) {
log.debug("At least one field to be added already exists in the schema - retrying."); log.debug("At least one field to be added already exists in the schema - retrying.");
// No action: at least one field to be added already exists in the schema, so retry // No action: at least one field to be added already exists in the schema, so retry

View File

@ -111,18 +111,19 @@ public class TestCloudManagedSchemaConcurrent extends AbstractFullDistribZkTestB
verifySuccess(request, response); verifySuccess(request, response);
} }
private String[] getExpectedFieldResponses(int numAddFieldPuts, int numAddFieldPosts) { private String[] getExpectedFieldResponses(int numAddFieldPuts, String putFieldName,
int numAddFieldPosts, String postFieldName) {
String[] expectedAddFields = new String[1 + numAddFieldPuts + numAddFieldPosts]; String[] expectedAddFields = new String[1 + numAddFieldPuts + numAddFieldPosts];
expectedAddFields[0] = SUCCESS_XPATH; expectedAddFields[0] = SUCCESS_XPATH;
for (int i = 0; i < numAddFieldPuts; ++i) { for (int i = 0; i < numAddFieldPuts; ++i) {
String newFieldName = "newfieldPut" + i; String newFieldName = putFieldName + i;
expectedAddFields[1 + i] expectedAddFields[1 + i]
= "/response/arr[@name='fields']/lst/str[@name='name'][.='" + newFieldName + "']"; = "/response/arr[@name='fields']/lst/str[@name='name'][.='" + newFieldName + "']";
} }
for (int i = 0; i < numAddFieldPosts; ++i) { for (int i = 0; i < numAddFieldPosts; ++i) {
String newFieldName = "newfieldPost" + i; String newFieldName = postFieldName + i;
expectedAddFields[1 + numAddFieldPuts + i] expectedAddFields[1 + numAddFieldPuts + i]
= "/response/arr[@name='fields']/lst/str[@name='name'][.='" + newFieldName + "']"; = "/response/arr[@name='fields']/lst/str[@name='name'][.='" + newFieldName + "']";
} }
@ -148,6 +149,11 @@ public class TestCloudManagedSchemaConcurrent extends AbstractFullDistribZkTestB
@Override @Override
public void doTest() throws Exception { public void doTest() throws Exception {
setupHarnesses(); setupHarnesses();
concurrentOperationsTest();
schemaLockTest();
}
private void concurrentOperationsTest() throws Exception {
// First, add a bunch of fields via PUT and POST, as well as copyFields, // First, add a bunch of fields via PUT and POST, as well as copyFields,
// but do it fast enough and verify shards' schemas after all of them are added // but do it fast enough and verify shards' schemas after all of them are added
@ -156,15 +162,18 @@ public class TestCloudManagedSchemaConcurrent extends AbstractFullDistribZkTestB
int numAddFieldPosts = 0; int numAddFieldPosts = 0;
List<CopyFieldInfo> copyFields = new ArrayList<>(); List<CopyFieldInfo> copyFields = new ArrayList<>();
final String putFieldName = "newfieldPut";
final String postFieldName = "newfieldPost";
for (int i = 0; i <= numFields ; ++i) { for (int i = 0; i <= numFields ; ++i) {
RestTestHarness publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size())); RestTestHarness publisher = restTestHarnesses.get(r.nextInt(restTestHarnesses.size()));
int type = random().nextInt(3); int type = random().nextInt(3);
if (type == 0) { // send an add field via PUT if (type == 0) { // send an add field via PUT
addFieldPut(publisher, "newfieldPut" + numAddFieldPuts++); addFieldPut(publisher, putFieldName + numAddFieldPuts++);
} }
else if (type == 1) { // send an add field via POST else if (type == 1) { // send an add field via POST
addFieldPost(publisher, "newfieldPost" + numAddFieldPosts++); addFieldPost(publisher, postFieldName + numAddFieldPosts++);
} }
else if (type == 2) { // send a copy field else if (type == 2) { // send a copy field
String sourceField = null; String sourceField = null;
@ -196,7 +205,8 @@ public class TestCloudManagedSchemaConcurrent extends AbstractFullDistribZkTestB
} }
} }
String[] expectedAddFields = getExpectedFieldResponses(numAddFieldPuts, numAddFieldPosts); String[] expectedAddFields = getExpectedFieldResponses(numAddFieldPuts, putFieldName,
numAddFieldPosts, postFieldName);
String[] expectedCopyFields = getExpectedCopyFieldResponses(copyFields); String[] expectedCopyFields = getExpectedCopyFieldResponses(copyFields);
boolean success = false; boolean success = false;
@ -236,6 +246,93 @@ public class TestCloudManagedSchemaConcurrent extends AbstractFullDistribZkTestB
} }
} }
private class PutPostThread extends Thread {
RestTestHarness harness;
String fieldName;
boolean isPut;
public PutPostThread(RestTestHarness harness, String fieldName, boolean isPut) {
this.harness = harness;
this.fieldName = fieldName;
this.isPut = isPut;
}
public void run() {
try {
if (isPut) {
addFieldPut(harness, fieldName);
} else {
addFieldPost(harness, fieldName);
}
} catch (Exception e) {
// log.error("###ACTUAL FAILURE!");
throw new RuntimeException(e);
}
}
}
private void schemaLockTest() throws Exception {
// First, add a bunch of fields via PUT and POST, as well as copyFields,
// but do it fast enough and verify shards' schemas after all of them are added
int numFields = 25;
int numAddFieldPuts = 0;
int numAddFieldPosts = 0;
final String putFieldName = "newfieldPutThread";
final String postFieldName = "newfieldPostThread";
for (int i = 0; i <= numFields ; ++i) {
// System.err.println("###ITERATION: " + i);
int postHarness = r.nextInt(restTestHarnesses.size());
RestTestHarness publisher = restTestHarnesses.get(postHarness);
PutPostThread postThread = new PutPostThread(publisher, postFieldName + numAddFieldPosts++, false);
postThread.start();
int putHarness = r.nextInt(restTestHarnesses.size());
publisher = restTestHarnesses.get(putHarness);
PutPostThread putThread = new PutPostThread(publisher, putFieldName + numAddFieldPuts++, true);
putThread.start();
postThread.join();
putThread.join();
String[] expectedAddFields = getExpectedFieldResponses(numAddFieldPuts, putFieldName,
numAddFieldPosts, postFieldName);
boolean success = false;
long maxTimeoutMillis = 100000;
long startTime = System.nanoTime();
String request = null;
String response = null;
String result = null;
while ( ! success
&& TimeUnit.MILLISECONDS.convert(System.nanoTime() - startTime, TimeUnit.NANOSECONDS) < maxTimeoutMillis) {
Thread.sleep(10);
// int j = 0;
for (RestTestHarness client : restTestHarnesses) {
// System.err.println("###CHECKING HARNESS: " + j++ + " for iteration: " + i);
// verify addFieldPuts and addFieldPosts
request = "/schema/fields?wt=xml";
response = client.query(request);
//System.err.println("###RESPONSE: " + response);
result = BaseTestHarness.validateXPath(response, expectedAddFields);
if (result != null) {
// System.err.println("###FAILURE!");
break;
}
}
success = (result == null);
}
if ( ! success) {
String msg = "QUERY FAILED: xpath=" + result + " request=" + request + " response=" + response;
log.error(msg);
fail(msg);
}
}
}
private static class CopyFieldInfo { private static class CopyFieldInfo {
private String sourceField; private String sourceField;
private String destField; private String destField;