mirror of https://github.com/apache/lucene.git
SOLR-7766: support creation of a coreless collection via createNodeSet=EMPTY
git-svn-id: https://svn.apache.org/repos/asf/lucene/dev/trunk@1694181 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
1930323dbc
commit
2de2e0a16f
|
@ -181,6 +181,8 @@ New Features
|
||||||
|
|
||||||
* SOLR-7769: Add bin/post -p alias for -port parameter. (ehatcher)
|
* SOLR-7769: Add bin/post -p alias for -port parameter. (ehatcher)
|
||||||
|
|
||||||
|
* SOLR-7766: support creation of a coreless collection via createNodeSet=EMPTY (Christine Poerschke)
|
||||||
|
|
||||||
Bug Fixes
|
Bug Fixes
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
|
|
|
@ -133,6 +133,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
|
||||||
|
|
||||||
static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
|
static final boolean CREATE_NODE_SET_SHUFFLE_DEFAULT = true;
|
||||||
public static final String CREATE_NODE_SET_SHUFFLE = "createNodeSet.shuffle";
|
public static final String CREATE_NODE_SET_SHUFFLE = "createNodeSet.shuffle";
|
||||||
|
public static final String CREATE_NODE_SET_EMPTY = "EMPTY";
|
||||||
public static final String CREATE_NODE_SET = "createNodeSet";
|
public static final String CREATE_NODE_SET = "createNodeSet";
|
||||||
|
|
||||||
public static final String ROUTER = "router";
|
public static final String ROUTER = "router";
|
||||||
|
@ -2231,7 +2232,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
|
||||||
List<String> nodeList;
|
List<String> nodeList;
|
||||||
|
|
||||||
final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
|
final String createNodeSetStr = message.getStr(CREATE_NODE_SET);
|
||||||
final List<String> createNodeList = (createNodeSetStr == null)?null:StrUtils.splitSmart(createNodeSetStr, ",", true);
|
final List<String> createNodeList = (createNodeSetStr == null)?null:StrUtils.splitSmart((CREATE_NODE_SET_EMPTY.equals(createNodeSetStr)?"":createNodeSetStr), ",", true);
|
||||||
|
|
||||||
if (createNodeList != null) {
|
if (createNodeList != null) {
|
||||||
nodeList = new ArrayList<>(createNodeList);
|
nodeList = new ArrayList<>(createNodeList);
|
||||||
|
@ -2248,7 +2249,7 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) throws KeeperException, InterruptedException {
|
private void createCollection(ClusterState clusterState, ZkNodeProps message, NamedList results) throws KeeperException, InterruptedException {
|
||||||
String collectionName = message.getStr(NAME);
|
final String collectionName = message.getStr(NAME);
|
||||||
if (clusterState.hasCollection(collectionName)) {
|
if (clusterState.hasCollection(collectionName)) {
|
||||||
throw new SolrException(ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
|
throw new SolrException(ErrorCode.BAD_REQUEST, "collection already exists: " + collectionName);
|
||||||
}
|
}
|
||||||
|
@ -2297,50 +2298,61 @@ public class OverseerCollectionProcessor implements Runnable, Closeable {
|
||||||
// but (for now) require that each core goes on a distinct node.
|
// but (for now) require that each core goes on a distinct node.
|
||||||
|
|
||||||
final List<String> nodeList = getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM);
|
final List<String> nodeList = getLiveOrLiveAndCreateNodeSetList(clusterState.getLiveNodes(), message, RANDOM);
|
||||||
|
Map<Position, String> positionVsNodes;
|
||||||
|
if (nodeList.isEmpty()) {
|
||||||
|
log.warn("It is unusual to create a collection ("+collectionName+") without cores.");
|
||||||
|
|
||||||
if (repFactor > nodeList.size()) {
|
positionVsNodes = new HashMap<>();
|
||||||
log.warn("Specified "
|
} else {
|
||||||
+ REPLICATION_FACTOR
|
if (repFactor > nodeList.size()) {
|
||||||
+ " of "
|
log.warn("Specified "
|
||||||
+ repFactor
|
+ REPLICATION_FACTOR
|
||||||
+ " on collection "
|
+ " of "
|
||||||
+ collectionName
|
+ repFactor
|
||||||
+ " is higher than or equal to the number of Solr instances currently live or live and part of your " + CREATE_NODE_SET + "("
|
+ " on collection "
|
||||||
+ nodeList.size()
|
+ collectionName
|
||||||
+ "). It's unusual to run two replica of the same slice on the same Solr-instance.");
|
+ " is higher than or equal to the number of Solr instances currently live or live and part of your " + CREATE_NODE_SET + "("
|
||||||
|
+ nodeList.size()
|
||||||
|
+ "). It's unusual to run two replica of the same slice on the same Solr-instance.");
|
||||||
|
}
|
||||||
|
|
||||||
|
int maxShardsAllowedToCreate = maxShardsPerNode * nodeList.size();
|
||||||
|
int requestedShardsToCreate = numSlices * repFactor;
|
||||||
|
if (maxShardsAllowedToCreate < requestedShardsToCreate) {
|
||||||
|
throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
|
||||||
|
+ MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
|
||||||
|
+ ", and the number of nodes currently live or live and part of your "+CREATE_NODE_SET+" is " + nodeList.size()
|
||||||
|
+ ". This allows a maximum of " + maxShardsAllowedToCreate
|
||||||
|
+ " to be created. Value of " + NUM_SLICES + " is " + numSlices
|
||||||
|
+ " and value of " + REPLICATION_FACTOR + " is " + repFactor
|
||||||
|
+ ". This requires " + requestedShardsToCreate
|
||||||
|
+ " shards to be created (higher than the allowed number)");
|
||||||
|
}
|
||||||
|
|
||||||
|
positionVsNodes = identifyNodes(clusterState, nodeList, message, shardNames, repFactor);
|
||||||
}
|
}
|
||||||
|
|
||||||
int maxShardsAllowedToCreate = maxShardsPerNode * nodeList.size();
|
|
||||||
int requestedShardsToCreate = numSlices * repFactor;
|
|
||||||
if (maxShardsAllowedToCreate < requestedShardsToCreate) {
|
|
||||||
throw new SolrException(ErrorCode.BAD_REQUEST, "Cannot create collection " + collectionName + ". Value of "
|
|
||||||
+ MAX_SHARDS_PER_NODE + " is " + maxShardsPerNode
|
|
||||||
+ ", and the number of nodes currently live or live and part of your "+CREATE_NODE_SET+" is " + nodeList.size()
|
|
||||||
+ ". This allows a maximum of " + maxShardsAllowedToCreate
|
|
||||||
+ " to be created. Value of " + NUM_SLICES + " is " + numSlices
|
|
||||||
+ " and value of " + REPLICATION_FACTOR + " is " + repFactor
|
|
||||||
+ ". This requires " + requestedShardsToCreate
|
|
||||||
+ " shards to be created (higher than the allowed number)");
|
|
||||||
}
|
|
||||||
|
|
||||||
Map<Position, String> positionVsNodes = identifyNodes(clusterState, nodeList, message, shardNames, repFactor);
|
|
||||||
boolean isLegacyCloud = Overseer.isLegacy(zkStateReader.getClusterProps());
|
boolean isLegacyCloud = Overseer.isLegacy(zkStateReader.getClusterProps());
|
||||||
|
|
||||||
createConfNode(configName, collectionName, isLegacyCloud);
|
createConfNode(configName, collectionName, isLegacyCloud);
|
||||||
|
|
||||||
Overseer.getInQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
|
Overseer.getInQueue(zkStateReader.getZkClient()).offer(Utils.toJSON(message));
|
||||||
|
|
||||||
// wait for a while until we don't see the collection
|
// wait for a while until we do see the collection
|
||||||
long waitUntil = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
|
long waitUntil = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
|
||||||
boolean created = false;
|
boolean created = false;
|
||||||
while (System.nanoTime() < waitUntil) {
|
while (System.nanoTime() < waitUntil) {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
created = zkStateReader.getClusterState().getCollections().contains(message.getStr(NAME));
|
created = zkStateReader.getClusterState().getCollections().contains(collectionName);
|
||||||
if(created) break;
|
if(created) break;
|
||||||
}
|
}
|
||||||
if (!created)
|
if (!created)
|
||||||
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not fully create collection: " + message.getStr(NAME));
|
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not fully create collection: " + collectionName);
|
||||||
|
|
||||||
|
if (nodeList.isEmpty()) {
|
||||||
|
log.info("Finished create command for collection: {}", collectionName);
|
||||||
|
return;
|
||||||
|
}
|
||||||
// For tracking async calls.
|
// For tracking async calls.
|
||||||
HashMap<String, String> requestMap = new HashMap<String, String>();
|
HashMap<String, String> requestMap = new HashMap<String, String>();
|
||||||
|
|
||||||
|
|
|
@ -85,15 +85,17 @@ public class TestAuthenticationFramework extends TestMiniSolrCloudCluster {
|
||||||
requestUsername = MockAuthenticationPlugin.expectedUsername;
|
requestUsername = MockAuthenticationPlugin.expectedUsername;
|
||||||
requestPassword = MockAuthenticationPlugin.expectedPassword;
|
requestPassword = MockAuthenticationPlugin.expectedPassword;
|
||||||
|
|
||||||
|
final String collectionName = "testAuthenticationFrameworkCollection";
|
||||||
|
|
||||||
// Should pass
|
// Should pass
|
||||||
testCollectionCreateSearchDelete();
|
testCollectionCreateSearchDelete(collectionName);
|
||||||
|
|
||||||
requestUsername = MockAuthenticationPlugin.expectedUsername;
|
requestUsername = MockAuthenticationPlugin.expectedUsername;
|
||||||
requestPassword = "junkpassword";
|
requestPassword = "junkpassword";
|
||||||
|
|
||||||
// Should fail with 401
|
// Should fail with 401
|
||||||
try {
|
try {
|
||||||
testCollectionCreateSearchDelete();
|
testCollectionCreateSearchDelete(collectionName);
|
||||||
fail("Should've returned a 401 error");
|
fail("Should've returned a 401 error");
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
if (!ex.getMessage().contains("Error 401")) {
|
if (!ex.getMessage().contains("Error 401")) {
|
||||||
|
|
|
@ -83,17 +83,41 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testBasics() throws Exception {
|
public void testBasics() throws Exception {
|
||||||
testCollectionCreateSearchDelete();
|
final String collectionName = "testSolrCloudCollection";
|
||||||
|
testCollectionCreateSearchDelete(collectionName);
|
||||||
// sometimes run a second test e.g. to test collection create-delete-create scenario
|
// sometimes run a second test e.g. to test collection create-delete-create scenario
|
||||||
if (random().nextBoolean()) testCollectionCreateSearchDelete();
|
if (random().nextBoolean()) testCollectionCreateSearchDelete(collectionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void testCollectionCreateSearchDelete() throws Exception {
|
private MiniSolrCloudCluster createMiniSolrCloudCluster() throws Exception {
|
||||||
|
|
||||||
File solrXml = new File(SolrTestCaseJ4.TEST_HOME(), "solr-no-core.xml");
|
File solrXml = new File(SolrTestCaseJ4.TEST_HOME(), "solr-no-core.xml");
|
||||||
Builder jettyConfig = JettyConfig.builder();
|
Builder jettyConfig = JettyConfig.builder();
|
||||||
jettyConfig.waitForLoadingCoresToFinish(null);
|
jettyConfig.waitForLoadingCoresToFinish(null);
|
||||||
MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir().toFile(), solrXml, jettyConfig.build());
|
MiniSolrCloudCluster miniCluster = new MiniSolrCloudCluster(NUM_SERVERS, createTempDir().toFile(), solrXml, jettyConfig.build());
|
||||||
|
return miniCluster;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void createCollection(MiniSolrCloudCluster miniCluster, String collectionName, String createNodeSet, String asyncId) throws Exception {
|
||||||
|
String configName = "solrCloudCollectionConfig";
|
||||||
|
File configDir = new File(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
|
||||||
|
miniCluster.uploadConfigDir(configDir, configName);
|
||||||
|
|
||||||
|
Map<String, String> collectionProperties = new HashMap<>();
|
||||||
|
collectionProperties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
|
||||||
|
collectionProperties.put("solr.tests.maxBufferedDocs", "100000");
|
||||||
|
collectionProperties.put("solr.tests.ramBufferSizeMB", "100");
|
||||||
|
// use non-test classes so RandomizedRunner isn't necessary
|
||||||
|
collectionProperties.put("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
|
||||||
|
collectionProperties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
|
||||||
|
collectionProperties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
|
||||||
|
|
||||||
|
miniCluster.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName, createNodeSet, asyncId, collectionProperties);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void testCollectionCreateSearchDelete(String collectionName) throws Exception {
|
||||||
|
|
||||||
|
MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
|
||||||
|
|
||||||
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
|
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
|
||||||
|
|
||||||
|
@ -116,21 +140,8 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
||||||
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
|
assertEquals(NUM_SERVERS, miniCluster.getJettySolrRunners().size());
|
||||||
|
|
||||||
// create collection
|
// create collection
|
||||||
String collectionName = "testSolrCloudCollection";
|
|
||||||
String configName = "solrCloudCollectionConfig";
|
|
||||||
File configDir = new File(SolrTestCaseJ4.TEST_HOME() + File.separator + "collection1" + File.separator + "conf");
|
|
||||||
miniCluster.uploadConfigDir(configDir, configName);
|
|
||||||
|
|
||||||
Map<String, String> collectionProperties = new HashMap<>();
|
|
||||||
collectionProperties.put(CoreDescriptor.CORE_CONFIG, "solrconfig-tlog.xml");
|
|
||||||
collectionProperties.put("solr.tests.maxBufferedDocs", "100000");
|
|
||||||
collectionProperties.put("solr.tests.ramBufferSizeMB", "100");
|
|
||||||
// use non-test classes so RandomizedRunner isn't necessary
|
|
||||||
collectionProperties.put("solr.tests.mergePolicy", "org.apache.lucene.index.TieredMergePolicy");
|
|
||||||
collectionProperties.put("solr.tests.mergeScheduler", "org.apache.lucene.index.ConcurrentMergeScheduler");
|
|
||||||
collectionProperties.put("solr.directoryFactory", "solr.RAMDirectoryFactory");
|
|
||||||
final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
||||||
miniCluster.createCollection(collectionName, NUM_SHARDS, REPLICATION_FACTOR, configName, asyncId, collectionProperties);
|
createCollection(miniCluster, collectionName, null, asyncId);
|
||||||
if (asyncId != null) {
|
if (asyncId != null) {
|
||||||
assertEquals("did not see async createCollection completion", "completed", AbstractFullDistribZkTestBase.getRequestStateAfterCompletion(asyncId, 330, cloudSolrClient));
|
assertEquals("did not see async createCollection completion", "completed", AbstractFullDistribZkTestBase.getRequestStateAfterCompletion(asyncId, 330, cloudSolrClient));
|
||||||
}
|
}
|
||||||
|
@ -271,4 +282,48 @@ public class TestMiniSolrCloudCluster extends LuceneTestCase {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCollectionCreateWithoutCoresThenDelete() throws Exception {
|
||||||
|
|
||||||
|
final String collectionName = "testSolrCloudCollectionWithoutCores";
|
||||||
|
final MiniSolrCloudCluster miniCluster = createMiniSolrCloudCluster();
|
||||||
|
final CloudSolrClient cloudSolrClient = miniCluster.getSolrClient();
|
||||||
|
|
||||||
|
try {
|
||||||
|
assertNotNull(miniCluster.getZkServer());
|
||||||
|
assertFalse(miniCluster.getJettySolrRunners().isEmpty());
|
||||||
|
|
||||||
|
// create collection
|
||||||
|
final String asyncId = (random().nextBoolean() ? null : "asyncId("+collectionName+".create)="+random().nextInt());
|
||||||
|
createCollection(miniCluster, collectionName, OverseerCollectionProcessor.CREATE_NODE_SET_EMPTY, asyncId);
|
||||||
|
if (asyncId != null) {
|
||||||
|
assertEquals("did not see async createCollection completion", "completed", AbstractFullDistribZkTestBase.getRequestStateAfterCompletion(asyncId, 330, cloudSolrClient));
|
||||||
|
}
|
||||||
|
|
||||||
|
try (SolrZkClient zkClient = new SolrZkClient
|
||||||
|
(miniCluster.getZkServer().getZkAddress(), AbstractZkTestCase.TIMEOUT, 45000, null);
|
||||||
|
ZkStateReader zkStateReader = new ZkStateReader(zkClient)) {
|
||||||
|
|
||||||
|
// wait for collection to appear
|
||||||
|
AbstractDistribZkTestBase.waitForRecoveriesToFinish(collectionName, zkStateReader, true, true, 330);
|
||||||
|
|
||||||
|
// check the collection's corelessness
|
||||||
|
{
|
||||||
|
int coreCount = 0;
|
||||||
|
for (Map.Entry<String,Slice> entry : zkStateReader.getClusterState().getSlicesMap(collectionName).entrySet()) {
|
||||||
|
coreCount += entry.getValue().getReplicasMap().entrySet().size();
|
||||||
|
}
|
||||||
|
assertEquals(0, coreCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete the collection we created earlier
|
||||||
|
miniCluster.deleteCollection(collectionName);
|
||||||
|
AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName, zkStateReader, true, true, 330);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
miniCluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,9 +132,10 @@ public class TestMiniSolrCloudClusterKerberos extends TestMiniSolrCloudCluster {
|
||||||
@Test
|
@Test
|
||||||
@Override
|
@Override
|
||||||
public void testBasics() throws Exception {
|
public void testBasics() throws Exception {
|
||||||
testCollectionCreateSearchDelete();
|
final String collectionName = "testSolrCloudCollectionKerberos";
|
||||||
|
testCollectionCreateSearchDelete(collectionName);
|
||||||
// sometimes run a second test e.g. to test collection create-delete-create scenario
|
// sometimes run a second test e.g. to test collection create-delete-create scenario
|
||||||
if (random().nextBoolean()) testCollectionCreateSearchDelete();
|
if (random().nextBoolean()) testCollectionCreateSearchDelete(collectionName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/HADOOP-9893")
|
@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/HADOOP-9893")
|
||||||
|
|
|
@ -307,17 +307,20 @@ public class MiniSolrCloudCluster {
|
||||||
|
|
||||||
public NamedList<Object> createCollection(String name, int numShards, int replicationFactor,
|
public NamedList<Object> createCollection(String name, int numShards, int replicationFactor,
|
||||||
String configName, Map<String, String> collectionProperties) throws SolrServerException, IOException {
|
String configName, Map<String, String> collectionProperties) throws SolrServerException, IOException {
|
||||||
return createCollection(name, numShards, replicationFactor, configName, null, collectionProperties);
|
return createCollection(name, numShards, replicationFactor, configName, null, null, collectionProperties);
|
||||||
}
|
}
|
||||||
|
|
||||||
public NamedList<Object> createCollection(String name, int numShards, int replicationFactor,
|
public NamedList<Object> createCollection(String name, int numShards, int replicationFactor,
|
||||||
String configName, String asyncId, Map<String, String> collectionProperties) throws SolrServerException, IOException {
|
String configName, String createNodeSet, String asyncId, Map<String, String> collectionProperties) throws SolrServerException, IOException {
|
||||||
final ModifiableSolrParams params = new ModifiableSolrParams();
|
final ModifiableSolrParams params = new ModifiableSolrParams();
|
||||||
params.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name());
|
params.set(CoreAdminParams.ACTION, CollectionAction.CREATE.name());
|
||||||
params.set(CoreAdminParams.NAME, name);
|
params.set(CoreAdminParams.NAME, name);
|
||||||
params.set("numShards", numShards);
|
params.set("numShards", numShards);
|
||||||
params.set("replicationFactor", replicationFactor);
|
params.set("replicationFactor", replicationFactor);
|
||||||
params.set("collection.configName", configName);
|
params.set("collection.configName", configName);
|
||||||
|
if (null != createNodeSet) {
|
||||||
|
params.set(OverseerCollectionProcessor.CREATE_NODE_SET, createNodeSet);
|
||||||
|
}
|
||||||
if (null != asyncId) {
|
if (null != asyncId) {
|
||||||
params.set(CommonAdminParams.ASYNC, asyncId);
|
params.set(CommonAdminParams.ASYNC, asyncId);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue