SOLR-13248: Autoscaling based replica placement is broken out of the box.

Solr 7.5 enabled autoscaling based replica placement by default but in the absence of default cluster policies, autoscaling can place more than 1 replica of the  same shard on the same node. Also, the maxShardsPerNode and createNodeSet was not respected. Due to these reasons,  this issue reverts the default replica placement policy to the 'legacy' assignment policy that was the default until Solr 7.4.
This commit is contained in:
shalinmangar 2019-02-18 13:46:05 -08:00
parent 6a0f7b251d
commit 7e2d40197c
9 changed files with 55 additions and 11 deletions

View File

@ -221,6 +221,12 @@ Bug Fixes
* SOLR-13126: Query boosts were not being combined correctly for documents where not all boost queries
matched (Alan Woodward, Mikhail Khludnev)
* SOLR-13248: Autoscaling based replica placement is broken out of the box. Solr 7.5 enabled autoscaling based replica
placement by default but in the absence of default cluster policies, autoscaling can place more than 1 replica of the
same shard on the same node. Also, the maxShardsPerNode and createNodeSet was not respected. Due to these reasons,
this issue reverts the default replica placement policy to the 'legacy' assignment policy that was the default until
Solr 7.4. (Gus Heck, Andrzej Bialecki, Bram Van Dam, shalin)
Improvements
----------------------

View File

@ -278,12 +278,12 @@ public class Assign {
}
private static boolean usePolicyFramework(Optional<DocCollection> collection, SolrCloudManager cloudManager) throws IOException, InterruptedException {
boolean useLegacyAssignment = false;
boolean useLegacyAssignment = true;
Map<String, Object> clusterProperties = cloudManager.getClusterStateProvider().getClusterProperties();
if (clusterProperties.containsKey(CollectionAdminParams.DEFAULTS)) {
Map<String, Object> defaults = (Map<String, Object>) clusterProperties.get(CollectionAdminParams.DEFAULTS);
Map<String, Object> collectionDefaults = (Map<String, Object>) defaults.getOrDefault(CollectionAdminParams.CLUSTER, Collections.emptyMap());
useLegacyAssignment = (boolean) collectionDefaults.getOrDefault(CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT, false);
useLegacyAssignment = Boolean.parseBoolean(collectionDefaults.getOrDefault(CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT, "true").toString());
}
if (!useLegacyAssignment) {

View File

@ -158,22 +158,35 @@ public class AssignTest extends SolrTestCaseJ4 {
}
@Test
public void testUsePolicyByDefault() throws Exception {
public void testUseLegacyByDefault() throws Exception {
assumeWorkingMockito();
SolrCloudManager solrCloudManager = mock(SolrCloudManager.class);
ClusterStateProvider clusterStateProvider = mock(ClusterStateProvider.class);
when(solrCloudManager.getClusterStateProvider()).thenReturn(clusterStateProvider);
DistribStateManager distribStateManager = mock(DistribStateManager.class);
when(solrCloudManager.getDistribStateManager()).thenReturn(distribStateManager);
when(distribStateManager.getAutoScalingConfig()).thenReturn(new AutoScalingConfig(Collections.emptyMap()));
// first we don't set any cluster property and assert that legacy assignment is used
when(clusterStateProvider.getClusterProperties()).thenReturn(Collections.emptyMap());
// verify
boolean usePolicyFramework = Assign.usePolicyFramework(solrCloudManager);
assertFalse(usePolicyFramework);
// another sanity check
when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Collections.emptyMap()));
// verify
usePolicyFramework = Assign.usePolicyFramework(solrCloudManager);
assertFalse(usePolicyFramework);
// first we set useLegacyReplicaAssignment=false, so autoscaling should always be used
when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("cluster", Utils.makeMap("useLegacyReplicaAssignment", false))));
// verify
boolean usePolicyFramework = Assign.usePolicyFramework(solrCloudManager);
usePolicyFramework = Assign.usePolicyFramework(solrCloudManager);
assertTrue(usePolicyFramework);
// now we set useLegacyReplicaAssignment=true, so autoscaling can only be used if an explicit policy or preference exists
when(clusterStateProvider.getClusterProperties()).thenReturn(Utils.makeMap("defaults", Utils.makeMap("cluster", Utils.makeMap("useLegacyReplicaAssignment", true))));
DistribStateManager distribStateManager = mock(DistribStateManager.class);
when(solrCloudManager.getDistribStateManager()).thenReturn(distribStateManager);
when(distribStateManager.getAutoScalingConfig()).thenReturn(new AutoScalingConfig(Collections.emptyMap()));
assertFalse(Assign.usePolicyFramework(solrCloudManager));

View File

@ -78,6 +78,7 @@ public class JWTAuthPluginIntegrationTest extends SolrCloudAuthTestCase {
configureCluster(NUM_SERVERS)// nodes
.withSecurityJson(TEST_PATH().resolve("security").resolve("jwt_plugin_jwk_security.json"))
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.withDefaultClusterProperty("useLegacyReplicaAssignment", "false")
.configure();
baseUrl = cluster.getRandomJetty(random()).getBaseUrl().toString();

View File

@ -48,6 +48,7 @@ public class TestSolrCloudWithHadoopAuthPlugin extends SolrCloudAuthTestCase {
configureCluster(NUM_SERVERS)// nodes
.withSecurityJson(TEST_PATH().resolve("security").resolve("hadoop_kerberos_config.json"))
.addConfig("conf1", TEST_PATH().resolve("configsets").resolve("cloud-minimal").resolve("conf"))
.withDefaultClusterProperty("useLegacyReplicaAssignment", "false")
.configure();
}

View File

@ -149,11 +149,11 @@ public class ClusterProperties {
* This method sets a cluster property.
*
* @param propertyName The property name to be set.
* @param propertyValue The value of the property.
* @param propertyValue The value of the property, could also be a nested structure.
* @throws IOException if there is an error writing data to the cluster
*/
@SuppressWarnings("unchecked")
public void setClusterProperty(String propertyName, String propertyValue) throws IOException {
public void setClusterProperty(String propertyName, Object propertyValue) throws IOException {
validatePropertyName(propertyName);

View File

@ -50,6 +50,7 @@ import org.apache.solr.common.Callable;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.params.AutoScalingParams;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.util.ExecutorUtil;
import org.apache.solr.common.util.ObjectReleaseTracker;
@ -239,7 +240,8 @@ public class ZkStateReader implements Closeable {
URL_SCHEME,
AUTO_ADD_REPLICAS,
CoreAdminParams.BACKUP_LOCATION,
MAX_CORES_PER_NODE)));
MAX_CORES_PER_NODE,
CollectionAdminParams.DEFAULTS)));
/**
* Returns config set name for collection.

View File

@ -58,6 +58,7 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.ReplicaPosition;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.cloud.rule.ImplicitSnitch;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.CollectionParams.CollectionAction;
import org.apache.solr.common.params.SolrParams;
@ -2609,6 +2610,11 @@ public class TestPolicy extends SolrTestCaseJ4 {
public ClusterState getClusterState() throws IOException {
return ClusterState.load(0, clusterState, getLiveNodes(), ZkStateReader.getCollectionPath("c1"));
}
@Override
public Map<String, Object> getClusterProperties() {
return Collections.singletonMap("defaults", Collections.singletonMap("cluster", Collections.singletonMap(CollectionAdminParams.USE_LEGACY_REPLICA_ASSIGNMENT, false)));
}
};
}

View File

@ -52,6 +52,7 @@ import org.apache.solr.common.cloud.Replica;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.SolrZkClient;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.util.NamedList;
import org.junit.AfterClass;
import org.junit.Before;
@ -104,7 +105,7 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
private Optional<String> securityJson = Optional.empty();
private List<Config> configs = new ArrayList<>();
private Map<String, String> clusterProperties = new HashMap<>();
private Map<String, Object> clusterProperties = new HashMap<>();
/**
* Create a builder
@ -211,13 +212,27 @@ public class SolrCloudTestCase extends SolrTestCaseJ4 {
if (clusterProperties.size() > 0) {
ClusterProperties props = new ClusterProperties(cluster.getSolrClient().getZkStateReader().getZkClient());
for (Map.Entry<String, String> entry : clusterProperties.entrySet()) {
for (Map.Entry<String, Object> entry : clusterProperties.entrySet()) {
props.setClusterProperty(entry.getKey(), entry.getValue());
}
}
return cluster;
}
public Builder withDefaultClusterProperty(String key, String value) {
HashMap<String,Object> defaults = (HashMap<String, Object>) this.clusterProperties.get(CollectionAdminParams.DEFAULTS);
if (defaults == null) {
defaults = new HashMap<>();
this.clusterProperties.put(CollectionAdminParams.DEFAULTS, defaults);
}
HashMap<String,Object> cluster = (HashMap<String, Object>) defaults.get(CollectionAdminParams.CLUSTER);
if (cluster == null) {
cluster = new HashMap<>();
defaults.put(CollectionAdminParams.CLUSTER, cluster);
}
cluster.put(key, value);
return this;
}
}
/** The cluster */