Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' (#2453)

* Add a new setting `cluster.no_cluster_manager_block`, aims to replace the existing `cluster.no_master_block`
* Set the value of the old setting `cluster.no_master_block` as the fallback to the new setting
* Deprecate the existing `setting cluster.no_master_block`
* Add unit tests

Signed-off-by: Tianli Feng <ftianli@amazon.com>
This commit is contained in:
Tianli Feng 2022-03-18 14:57:08 -07:00 committed by GitHub
parent ec47a937f3
commit 1193ec101b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 119 additions and 21 deletions

View File

@ -90,7 +90,7 @@ public class NoMasterNodeIT extends OpenSearchIntegTestCase {
public void testNoMasterActions() throws Exception {
Settings settings = Settings.builder()
.put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), true)
.put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "all")
.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all")
.build();
final TimeValue timeout = TimeValue.timeValueMillis(10);
@ -242,7 +242,7 @@ public class NoMasterNodeIT extends OpenSearchIntegTestCase {
public void testNoMasterActionsWriteMasterBlock() throws Exception {
Settings settings = Settings.builder()
.put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false)
.put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "write")
.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write")
.build();
final List<String> nodes = internalCluster().startNodes(3, settings);
@ -323,7 +323,7 @@ public class NoMasterNodeIT extends OpenSearchIntegTestCase {
public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception {
Settings settings = Settings.builder()
.put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "metadata_write")
.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write")
.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), "100ms")
.build();

View File

@ -189,7 +189,7 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
* Verify that the proper block is applied when nodes lose their master
*/
public void testVerifyApiBlocksDuringPartition() throws Exception {
internalCluster().startNodes(3, Settings.builder().putNull(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey()).build());
internalCluster().startNodes(3, Settings.builder().putNull(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey()).build());
// Makes sure that the get request can be executed on each node locally:
assertAcked(
@ -247,11 +247,11 @@ public class MasterDisruptionIT extends AbstractDisruptionTestCase {
// Wait until the master node sees al 3 nodes again.
ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkDisruption.expectedTimeToHeal().millis()));
logger.info("Verify no master block with {} set to {}", NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "all");
logger.info("Verify no master block with {} set to {}", NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all");
client().admin()
.cluster()
.prepareUpdateSettings()
.setTransientSettings(Settings.builder().put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), "all"))
.setTransientSettings(Settings.builder().put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all"))
.get();
networkDisruption.startDisrupting();

View File

@ -76,14 +76,24 @@ public class NoMasterBlockService {
"write",
NoMasterBlockService::parseNoMasterBlock,
Property.Dynamic,
Property.NodeScope,
Property.Deprecated
);
// The setting below is going to replace the above.
// To keep backwards compatibility, the old usage is remained, and it's also used as the fallback for the new usage.
public static final Setting<ClusterBlock> NO_CLUSTER_MANAGER_BLOCK_SETTING = new Setting<>(
"cluster.no_cluster_manager_block",
NO_MASTER_BLOCK_SETTING,
NoMasterBlockService::parseNoMasterBlock,
Property.Dynamic,
Property.NodeScope
);
private volatile ClusterBlock noMasterBlock;
public NoMasterBlockService(Settings settings, ClusterSettings clusterSettings) {
this.noMasterBlock = NO_MASTER_BLOCK_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock);
this.noMasterBlock = NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(NO_CLUSTER_MANAGER_BLOCK_SETTING, this::setNoMasterBlock);
}
private static ClusterBlock parseNoMasterBlock(String value) {

View File

@ -272,7 +272,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
InternalSnapshotsInfoService.INTERNAL_SNAPSHOT_INFO_MAX_CONCURRENT_FETCHES_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING,
NoMasterBlockService.NO_MASTER_BLOCK_SETTING,
NoMasterBlockService.NO_MASTER_BLOCK_SETTING, // deprecated
NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING,

View File

@ -84,7 +84,7 @@ import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_RET
import static org.opensearch.cluster.coordination.LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ALL;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_SETTING;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES;
import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION;
import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING;
@ -1143,9 +1143,9 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase {
cluster.stabilise();
final ClusterNode leader = cluster.getAnyLeader();
leader.submitUpdateTask("update NO_MASTER_BLOCK_SETTING", cs -> {
leader.submitUpdateTask("update NO_CLUSTER_MANAGER_BLOCK_SETTING", cs -> {
final Builder settingsBuilder = Settings.builder().put(cs.metadata().persistentSettings());
settingsBuilder.put(NO_MASTER_BLOCK_SETTING.getKey(), noMasterBlockSetting);
settingsBuilder.put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), noMasterBlockSetting);
return ClusterState.builder(cs)
.metadata(Metadata.builder(cs.metadata()).persistentSettings(settingsBuilder.build()))
.build();

View File

@ -0,0 +1,87 @@
/*
* SPDX-License-Identifier: Apache-2.0
*
* The OpenSearch Contributors require contributions made to
* this file be licensed under the Apache-2.0 license or a
* compatible open source license.
*/
package org.opensearch.cluster.coordination;
import org.opensearch.common.settings.ClusterSettings;
import org.opensearch.common.settings.Setting;
import org.opensearch.common.settings.Settings;
import org.opensearch.test.OpenSearchTestCase;
import java.util.Arrays;
import java.util.Set;
/**
* A unit test to validate the former name of the setting 'cluster.no_cluster_manager_block' still take effect,
* after it is deprecated, so that the backwards compatibility is maintained.
* The test can be removed along with removing support of the deprecated setting.
*/
public class NoMasterBlockServiceRenamedSettingTests extends OpenSearchTestCase {
/**
* Validate the both settings are known and supported.
*/
public void testReindexSettingsExist() {
Set<Setting<?>> settings = ClusterSettings.BUILT_IN_CLUSTER_SETTINGS;
assertTrue(
"Both 'cluster.no_cluster_manager_block' and its predecessor should be supported built-in settings.",
settings.containsAll(
Arrays.asList(NoMasterBlockService.NO_MASTER_BLOCK_SETTING, NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING)
)
);
}
/**
* Validate the default value of the both settings is the same.
*/
public void testSettingFallback() {
assertEquals(
NoMasterBlockService.NO_MASTER_BLOCK_SETTING.get(Settings.EMPTY),
NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(Settings.EMPTY)
);
}
/**
* Validate the new setting can be configured correctly, and it doesn't impact the old setting.
*/
public void testSettingGetValue() {
Settings settings = Settings.builder().put("cluster.no_cluster_manager_block", "all").build();
assertEquals(NoMasterBlockService.NO_MASTER_BLOCK_ALL, NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings));
assertEquals(
NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getDefault(Settings.EMPTY),
NoMasterBlockService.NO_MASTER_BLOCK_SETTING.get(settings)
);
}
/**
* Validate the value of the old setting will be applied to the new setting, if the new setting is not configured.
*/
public void testSettingGetValueWithFallback() {
Settings settings = Settings.builder().put("cluster.no_master_block", "metadata_write").build();
assertEquals(
NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES,
NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings)
);
assertSettingDeprecationsAndWarnings(new Setting<?>[] { NoMasterBlockService.NO_MASTER_BLOCK_SETTING });
}
/**
* Validate the value of the old setting will be ignored, if the new setting is configured.
*/
public void testSettingGetValueWhenBothAreConfigured() {
Settings settings = Settings.builder()
.put("cluster.no_cluster_manager_block", "all")
.put("cluster.no_master_block", "metadata_write")
.build();
assertEquals(NoMasterBlockService.NO_MASTER_BLOCK_ALL, NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.get(settings));
assertEquals(NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES, NoMasterBlockService.NO_MASTER_BLOCK_SETTING.get(settings));
assertSettingDeprecationsAndWarnings(new Setting<?>[] { NoMasterBlockService.NO_MASTER_BLOCK_SETTING });
}
}

View File

@ -37,7 +37,7 @@ import org.opensearch.test.OpenSearchTestCase;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_ALL;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_METADATA_WRITES;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_SETTING;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING;
import static org.opensearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES;
import static org.opensearch.common.settings.ClusterSettings.BUILT_IN_CLUSTER_SETTINGS;
import static org.hamcrest.Matchers.sameInstance;
@ -65,35 +65,35 @@ public class NoMasterBlockServiceTests extends OpenSearchTestCase {
}
public void testBlocksWritesIfConfiguredBySetting() {
createService(Settings.builder().put(NO_MASTER_BLOCK_SETTING.getKey(), "write").build());
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build());
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
}
public void testBlocksAllIfConfiguredBySetting() {
createService(Settings.builder().put(NO_MASTER_BLOCK_SETTING.getKey(), "all").build());
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build());
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL));
}
public void testBlocksMetadataWritesIfConfiguredBySetting() {
createService(Settings.builder().put(NO_MASTER_BLOCK_SETTING.getKey(), "metadata_write").build());
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build());
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES));
}
public void testRejectsInvalidSetting() {
expectThrows(
IllegalArgumentException.class,
() -> createService(Settings.builder().put(NO_MASTER_BLOCK_SETTING.getKey(), "unknown").build())
() -> createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "unknown").build())
);
}
public void testSettingCanBeUpdated() {
createService(Settings.builder().put(NO_MASTER_BLOCK_SETTING.getKey(), "all").build());
createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build());
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL));
clusterSettings.applySettings(Settings.builder().put(NO_MASTER_BLOCK_SETTING.getKey(), "write").build());
clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build());
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES));
clusterSettings.applySettings(Settings.builder().put(NO_MASTER_BLOCK_SETTING.getKey(), "metadata_write").build());
clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build());
assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES));
}
}

View File

@ -435,7 +435,7 @@ public final class InternalTestCluster extends TestCluster {
);
// TODO: currently we only randomize "cluster.no_master_block" between "write" and "metadata_write", as "all" is fragile
// and fails shards when a master abdicates, which breaks many tests.
builder.put(NoMasterBlockService.NO_MASTER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write"));
builder.put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), randomFrom(random, "write", "metadata_write"));
defaultSettings = builder.build();
executor = OpenSearchExecutors.newScaling(
"internal_test_cluster_executor",