mirror of
https://github.com/apache/lucene.git
synced 2025-02-08 11:05:29 +00:00
SOLR-8913: When using a shared filesystem we should store data dir and tlog dir locations in the cluster state.
This commit is contained in:
parent
6276fe5ec7
commit
b111bfda42
@ -70,9 +70,11 @@ New Features
|
||||
|
||||
* SOLR-8349: Allow sharing of large in memory data structures across cores (Gus Heck, noble)
|
||||
|
||||
|
||||
* SOLR-8809: Implement Connection.prepareStatement (Kevin Risden)
|
||||
|
||||
* SOLR-8913: When using a shared filesystem we should store data dir and tlog dir locations in
|
||||
the cluster state. (Mark Miller)
|
||||
|
||||
Bug Fixes
|
||||
----------------------
|
||||
|
||||
|
@ -1165,9 +1165,8 @@ public final class ZkController {
|
||||
if (coreNodeName != null) {
|
||||
props.put(ZkStateReader.CORE_NODE_NAME_PROP, coreNodeName);
|
||||
}
|
||||
|
||||
if (ClusterStateUtil.isAutoAddReplicas(getZkStateReader(), collection)) {
|
||||
try (SolrCore core = cc.getCore(cd.getName())) {
|
||||
try (SolrCore core = cc.getCore(cd.getName())) {
|
||||
if (core != null && core.getDirectoryFactory().isSharedStorage()) {
|
||||
if (core != null && core.getDirectoryFactory().isSharedStorage()) {
|
||||
props.put("dataDir", core.getDataDir());
|
||||
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
|
||||
|
@ -32,6 +32,9 @@ import org.apache.solr.client.solrj.impl.HttpSolrClient;
|
||||
import org.apache.solr.client.solrj.request.QueryRequest;
|
||||
import org.apache.solr.cloud.BasicDistributedZkTest;
|
||||
import org.apache.solr.cloud.ChaosMonkey;
|
||||
import org.apache.solr.common.cloud.ClusterState;
|
||||
import org.apache.solr.common.cloud.Replica;
|
||||
import org.apache.solr.common.cloud.Slice;
|
||||
import org.apache.solr.common.params.CollectionParams.CollectionAction;
|
||||
import org.apache.solr.common.params.ModifiableSolrParams;
|
||||
import org.apache.solr.common.util.NamedList;
|
||||
@ -46,6 +49,7 @@ import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Timer;
|
||||
import java.util.TimerTask;
|
||||
@ -153,6 +157,17 @@ public class StressHdfsTest extends BasicDistributedZkTest {
|
||||
createCollection(DELETE_DATA_DIR_COLLECTION, nShards, rep, maxReplicasPerNode);
|
||||
|
||||
waitForRecoveriesToFinish(DELETE_DATA_DIR_COLLECTION, false);
|
||||
|
||||
// data dirs should be in zk, SOLR-8913
|
||||
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
|
||||
Slice slice = clusterState.getSlice(DELETE_DATA_DIR_COLLECTION, "shard1");
|
||||
assertNotNull(clusterState.getSlices(DELETE_DATA_DIR_COLLECTION).toString(), slice);
|
||||
Collection<Replica> replicas = slice.getReplicas();
|
||||
for (Replica replica : replicas) {
|
||||
assertNotNull(replica.getProperties().toString(), replica.get("dataDir"));
|
||||
assertNotNull(replica.getProperties().toString(), replica.get("ulogDir"));
|
||||
}
|
||||
|
||||
cloudClient.setDefaultCollection(DELETE_DATA_DIR_COLLECTION);
|
||||
cloudClient.getZkStateReader().forceUpdateCollection(DELETE_DATA_DIR_COLLECTION);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user